summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLars Wirzenius <liw@liw.fi>2022-04-09 12:00:29 +0300
committerLars Wirzenius <liw@liw.fi>2022-04-16 09:06:59 +0300
commit18c0f4afab29e17c050208234becbfb5e2973746 (patch)
tree62bb67504c47747f8ce202f4eb4121bb3d051223
parent82ff782fe85c84c10f1f18c9bd5c2b017bc2f240 (diff)
downloadobnam2-18c0f4afab29e17c050208234becbfb5e2973746.tar.gz
feat: use one checksum for all chunks in a backup
When making a backup, use the same checksum for any chunks it re-uses or creates. This is for performance: if we allowed two checksums to be used, we would have to compute the checksum for a chunk twice, and potentially look up both on the server. This is just a lot of work. Instead, we use only one. The trade-off here is that when (not if) the user wants to switch to a new checksum type, they'll have to do a full backup, uploading all their data to the server, even when it's already there, just with a different checksum. Hopefully this will be rare. Full backups always use the built-in, hardcoded default checksum, and incremental backups use whatever the previous backup used. The default is still SHA256, but this commit add code to support BLAKE2 if we decide to switch that as a default. It's also easy to add support for others, now. BLAKE2 was added to verify that Obnam can actually handle the checksum changing (manual test: not in the test suite). I don't think users need to be offered even the option of choosing a checksum algorithm to use. When one cares about both security and performance, choosing a checksum requires specialist, expert knowledge. Obnam developers should choose the default. Giving users a knob they can twiddle just makes it that much harder to configure and use Obnam. If the choice Obnam developers have made is shown to be sub-optimal, it seems better to change the default for everyone, rather than hope that every user changes their configuration to gain the benefit. Experience has shown that people mostly don't change the default configuration, and that they are especially bad at choosing well when security is a concern. (Obnam is free software. Expert users can choose their checksum by changing the source code. I'm not fundamentally limiting anyone's freedom or choice here.) Users can switch to a new default algorithm by triggering a full backup with the new "obnam backup --full". Sponsored-by: author
-rw-r--r--Cargo.lock10
-rw-r--r--Cargo.toml1
-rw-r--r--obnam.md4
-rw-r--r--src/backup_run.rs22
-rw-r--r--src/chunker.rs16
-rw-r--r--src/cmd/backup.rs52
-rw-r--r--src/dbgen.rs40
-rw-r--r--src/error.rs5
-rw-r--r--src/generation.rs17
-rw-r--r--src/genmeta.rs5
-rw-r--r--src/label.rs54
11 files changed, 184 insertions, 42 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 9dafc07..7715bf3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -114,6 +114,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
+name = "blake2"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
+dependencies = [
+ "digest 0.10.3",
+]
+
+[[package]]
name = "block-buffer"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -977,6 +986,7 @@ version = "0.7.1"
dependencies = [
"aes-gcm",
"anyhow",
+ "blake2",
"bytesize",
"chrono",
"directories-next",
diff --git a/Cargo.toml b/Cargo.toml
index 616a34a..36f1682 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,6 +13,7 @@ rust-version = "1.56.0"
[dependencies]
aes-gcm = "0.9"
anyhow = "1"
+blake2 = "0.10.4"
bytesize = "1"
chrono = "0.4"
directories-next = "2"
diff --git a/obnam.md b/obnam.md
index 381802c..a6f9b4c 100644
--- a/obnam.md
+++ b/obnam.md
@@ -1901,7 +1901,9 @@ then stdout, as JSON, has all the values in file geninfo.json
"major": 0,
"minor": 0
},
- "extras": {}
+ "extras": {
+ "checksum_kind": "sha256"
+ }
}
~~~
diff --git a/src/backup_run.rs b/src/backup_run.rs
index 29e82fc..2418871 100644
--- a/src/backup_run.rs
+++ b/src/backup_run.rs
@@ -15,6 +15,7 @@ use crate::fsiter::{AnnotatedFsEntry, FsIterError, FsIterator};
use crate::generation::{
GenId, LocalGeneration, LocalGenerationError, NascentError, NascentGeneration,
};
+use crate::label::LabelChecksumKind;
use crate::performance::{Clock, Performance};
use crate::policy::BackupPolicy;
use crate::schema::SchemaVersion;
@@ -24,10 +25,12 @@ use chrono::{DateTime, Local};
use log::{debug, error, info, warn};
use std::path::{Path, PathBuf};
+const DEFAULT_CHECKSUM_KIND: LabelChecksumKind = LabelChecksumKind::Sha256;
const SQLITE_CHUNK_SIZE: usize = MIB as usize;
/// A running backup.
pub struct BackupRun<'a> {
+ checksum_kind: Option<LabelChecksumKind>,
client: &'a BackupClient,
policy: BackupPolicy,
buffer_size: usize,
@@ -105,6 +108,7 @@ impl<'a> BackupRun<'a> {
/// Create a new run for an initial backup.
pub fn initial(config: &ClientConfig, client: &'a BackupClient) -> Result<Self, BackupError> {
Ok(Self {
+ checksum_kind: Some(DEFAULT_CHECKSUM_KIND),
client,
policy: BackupPolicy::default(),
buffer_size: config.chunk_size,
@@ -118,6 +122,7 @@ impl<'a> BackupRun<'a> {
client: &'a BackupClient,
) -> Result<Self, BackupError> {
Ok(Self {
+ checksum_kind: None,
client,
policy: BackupPolicy::default(),
buffer_size: config.chunk_size,
@@ -136,7 +141,7 @@ impl<'a> BackupRun<'a> {
None => {
// Create a new, empty generation.
let schema = schema_version(DEFAULT_SCHEMA_MAJOR).unwrap();
- NascentGeneration::create(oldname, schema)?.close()?;
+ NascentGeneration::create(oldname, schema, self.checksum_kind.unwrap())?.close()?;
// Open the newly created empty generation.
Ok(LocalGeneration::open(oldname)?)
@@ -146,6 +151,11 @@ impl<'a> BackupRun<'a> {
let old = self.fetch_previous_generation(genid, oldname).await?;
perf.stop(Clock::GenerationDownload);
+ let meta = old.meta()?;
+ if let Some(v) = meta.get("checksum_kind") {
+ self.checksum_kind = Some(LabelChecksumKind::from(v)?);
+ }
+
let progress = BackupProgress::incremental();
progress.files_in_previous_generation(old.file_count()? as u64);
self.progress = Some(progress);
@@ -155,6 +165,12 @@ impl<'a> BackupRun<'a> {
}
}
+ fn checksum_kind(&self) -> LabelChecksumKind {
+ self.checksum_kind
+ .or(Some(LabelChecksumKind::Sha256))
+ .unwrap()
+ }
+
async fn fetch_previous_generation(
&self,
genid: &GenId,
@@ -185,7 +201,7 @@ impl<'a> BackupRun<'a> {
let mut warnings: Vec<BackupError> = vec![];
let mut new_cachedir_tags = vec![];
let files_count = {
- let mut new = NascentGeneration::create(newpath, schema)?;
+ let mut new = NascentGeneration::create(newpath, schema, self.checksum_kind.unwrap())?;
for root in &config.roots {
match self.backup_one_root(config, old, &mut new, root).await {
Ok(mut o) => {
@@ -378,7 +394,7 @@ impl<'a> BackupRun<'a> {
let mut chunk_ids = vec![];
let file = std::fs::File::open(filename)
.map_err(|err| ClientError::FileOpen(filename.to_path_buf(), err))?;
- let chunker = FileChunks::new(size, file, filename);
+ let chunker = FileChunks::new(size, file, filename, self.checksum_kind());
for item in chunker {
let chunk = item?;
if let Some(chunk_id) = self.client.has_chunk(chunk.meta()).await? {
diff --git a/src/chunker.rs b/src/chunker.rs
index 2394230..29f8a90 100644
--- a/src/chunker.rs
+++ b/src/chunker.rs
@@ -2,13 +2,14 @@
use crate::chunk::DataChunk;
use crate::chunkmeta::ChunkMeta;
-use crate::label::Label;
+use crate::label::{Label, LabelChecksumKind};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
/// Iterator over chunks in a file.
pub struct FileChunks {
chunk_size: usize,
+ kind: LabelChecksumKind,
buf: Vec<u8>,
filename: PathBuf,
handle: std::fs::File,
@@ -24,11 +25,17 @@ pub enum ChunkerError {
impl FileChunks {
/// Create new iterator.
- pub fn new(chunk_size: usize, handle: std::fs::File, filename: &Path) -> Self {
+ pub fn new(
+ chunk_size: usize,
+ handle: std::fs::File,
+ filename: &Path,
+ kind: LabelChecksumKind,
+ ) -> Self {
let mut buf = vec![];
buf.resize(chunk_size, 0);
Self {
chunk_size,
+ kind,
buf,
handle,
filename: filename.to_path_buf(),
@@ -54,7 +61,10 @@ impl FileChunks {
}
let buffer = &self.buf.as_slice()[..used];
- let hash = Label::sha256(buffer);
+ let hash = match self.kind {
+ LabelChecksumKind::Blake2 => Label::blake2(buffer),
+ LabelChecksumKind::Sha256 => Label::sha256(buffer),
+ };
let meta = ChunkMeta::new(&hash);
let chunk = DataChunk::new(buffer.to_vec(), meta);
Ok(Some(chunk))
diff --git a/src/cmd/backup.rs b/src/cmd/backup.rs
index 8a85703..60045cc 100644
--- a/src/cmd/backup.rs
+++ b/src/cmd/backup.rs
@@ -19,7 +19,11 @@ use tokio::runtime::Runtime;
/// Make a backup.
#[derive(Debug, StructOpt)]
pub struct Backup {
- /// Backup schema major version.
+ /// Force a full backup, instead of an incremental one.
+ #[structopt(long)]
+ full: bool,
+
+ /// Backup schema major version to use.
#[structopt(long)]
backup_version: Option<VersionComponent>,
}
@@ -53,29 +57,35 @@ impl Backup {
let oldtemp = temp.path().join("old.db");
let newtemp = temp.path().join("new.db");
- let (is_incremental, outcome) = match genlist.resolve("latest") {
- Err(_) => {
- info!("fresh backup without a previous generation");
- let mut run = BackupRun::initial(config, &client)?;
- let old = run.start(None, &oldtemp, perf).await?;
- (
- false,
- run.backup_roots(config, &old, &newtemp, schema, perf)
- .await?,
- )
- }
- Ok(old_id) => {
- info!("incremental backup based on {}", old_id);
- let mut run = BackupRun::incremental(config, &client)?;
- let old = run.start(Some(&old_id), &oldtemp, perf).await?;
- (
- true,
- run.backup_roots(config, &old, &newtemp, schema, perf)
- .await?,
- )
+ let old_id = if self.full {
+ None
+ } else {
+ match genlist.resolve("latest") {
+ Err(_) => None,
+ Ok(old_id) => Some(old_id),
}
};
+ let (is_incremental, outcome) = if let Some(old_id) = old_id {
+ info!("incremental backup based on {}", old_id);
+ let mut run = BackupRun::incremental(config, &client)?;
+ let old = run.start(Some(&old_id), &oldtemp, perf).await?;
+ (
+ true,
+ run.backup_roots(config, &old, &newtemp, schema, perf)
+ .await?,
+ )
+ } else {
+ info!("fresh backup without a previous generation");
+ let mut run = BackupRun::initial(config, &client)?;
+ let old = run.start(None, &oldtemp, perf).await?;
+ (
+ false,
+ run.backup_roots(config, &old, &newtemp, schema, perf)
+ .await?,
+ )
+ };
+
perf.start(Clock::GenerationUpload);
let mut trust = trust;
trust.append_backup(outcome.gen_id.as_chunk_id());
diff --git a/src/dbgen.rs b/src/dbgen.rs
index 816ea11..8e5ece5 100644
--- a/src/dbgen.rs
+++ b/src/dbgen.rs
@@ -5,6 +5,7 @@ use crate::chunkid::ChunkId;
use crate::db::{Column, Database, DatabaseError, SqlResults, Table, Value};
use crate::fsentry::FilesystemEntry;
use crate::genmeta::{GenerationMeta, GenerationMetaError};
+use crate::label::LabelChecksumKind;
use crate::schema::{SchemaVersion, VersionComponent};
use log::error;
use std::collections::HashMap;
@@ -90,14 +91,15 @@ impl GenerationDb {
pub fn create<P: AsRef<Path>>(
filename: P,
schema: SchemaVersion,
+ checksum_kind: LabelChecksumKind,
) -> Result<Self, GenerationDbError> {
let meta_table = Self::meta_table();
let variant = match schema.version() {
(V0_0::MAJOR, V0_0::MINOR) => {
- GenerationDbVariant::V0_0(V0_0::create(filename, meta_table)?)
+ GenerationDbVariant::V0_0(V0_0::create(filename, meta_table, checksum_kind)?)
}
(V1_0::MAJOR, V1_0::MINOR) => {
- GenerationDbVariant::V1_0(V1_0::create(filename, meta_table)?)
+ GenerationDbVariant::V1_0(V1_0::create(filename, meta_table, checksum_kind)?)
}
(major, minor) => return Err(GenerationDbError::Incompatible(major, minor)),
};
@@ -240,11 +242,15 @@ impl V0_0 {
const MINOR: VersionComponent = 0;
/// Create a new generation database in read/write mode.
- pub fn create<P: AsRef<Path>>(filename: P, meta: Table) -> Result<Self, GenerationDbError> {
+ pub fn create<P: AsRef<Path>>(
+ filename: P,
+ meta: Table,
+ checksum_kind: LabelChecksumKind,
+ ) -> Result<Self, GenerationDbError> {
let db = Database::create(filename.as_ref())?;
let mut moi = Self::new(db, meta);
moi.created = true;
- moi.create_tables()?;
+ moi.create_tables(checksum_kind)?;
Ok(moi)
}
@@ -276,7 +282,7 @@ impl V0_0 {
}
}
- fn create_tables(&mut self) -> Result<(), GenerationDbError> {
+ fn create_tables(&mut self, checksum_kind: LabelChecksumKind) -> Result<(), GenerationDbError> {
self.db.create_table(&self.meta)?;
self.db.create_table(&self.files)?;
self.db.create_table(&self.chunks)?;
@@ -295,6 +301,13 @@ impl V0_0 {
Value::text("value", &format!("{}", Self::MINOR)),
],
)?;
+ self.db.insert(
+ &self.meta,
+ &[
+ Value::text("key", "checksum_kind"),
+ Value::text("value", checksum_kind.serialize()),
+ ],
+ )?;
Ok(())
}
@@ -483,11 +496,15 @@ impl V1_0 {
const MINOR: VersionComponent = 0;
/// Create a new generation database in read/write mode.
- pub fn create<P: AsRef<Path>>(filename: P, meta: Table) -> Result<Self, GenerationDbError> {
+ pub fn create<P: AsRef<Path>>(
+ filename: P,
+ meta: Table,
+ checksum_kind: LabelChecksumKind,
+ ) -> Result<Self, GenerationDbError> {
let db = Database::create(filename.as_ref())?;
let mut moi = Self::new(db, meta);
moi.created = true;
- moi.create_tables()?;
+ moi.create_tables(checksum_kind)?;
Ok(moi)
}
@@ -519,7 +536,7 @@ impl V1_0 {
}
}
- fn create_tables(&mut self) -> Result<(), GenerationDbError> {
+ fn create_tables(&mut self, checksum_kind: LabelChecksumKind) -> Result<(), GenerationDbError> {
self.db.create_table(&self.meta)?;
self.db.create_table(&self.files)?;
self.db.create_table(&self.chunks)?;
@@ -538,6 +555,13 @@ impl V1_0 {
Value::text("value", &format!("{}", Self::MINOR)),
],
)?;
+ self.db.insert(
+ &self.meta,
+ &[
+ Value::text("key", "checksum_kind"),
+ Value::text("value", checksum_kind.serialize()),
+ ],
+ )?;
Ok(())
}
diff --git a/src/error.rs b/src/error.rs
index 9c9b432..928f258 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -10,6 +10,7 @@ use crate::db::DatabaseError;
use crate::dbgen::GenerationDbError;
use crate::generation::{LocalGenerationError, NascentError};
use crate::genlist::GenerationListError;
+use crate::label::LabelError;
use crate::passwords::PasswordError;
use std::path::PathBuf;
use std::time::SystemTimeError;
@@ -22,6 +23,10 @@ use tempfile::PersistError;
/// convenience.
#[derive(Debug, thiserror::Error)]
pub enum ObnamError {
+ /// Error from chunk labels.
+ #[error(transparent)]
+ Label(#[from] LabelError),
+
/// Error listing generations on server.
#[error(transparent)]
GenerationListError(#[from] GenerationListError),
diff --git a/src/generation.rs b/src/generation.rs
index 715b426..0a0fc77 100644
--- a/src/generation.rs
+++ b/src/generation.rs
@@ -6,6 +6,7 @@ use crate::db::{DatabaseError, SqlResults};
use crate::dbgen::{FileId, GenerationDb, GenerationDbError};
use crate::fsentry::FilesystemEntry;
use crate::genmeta::{GenerationMeta, GenerationMetaError};
+use crate::label::LabelChecksumKind;
use crate::schema::{SchemaVersion, VersionComponent};
use std::fmt;
use std::path::{Path, PathBuf};
@@ -77,11 +78,15 @@ pub enum NascentError {
impl NascentGeneration {
/// Create a new nascent generation.
- pub fn create<P>(filename: P, schema: SchemaVersion) -> Result<Self, NascentError>
+ pub fn create<P>(
+ filename: P,
+ schema: SchemaVersion,
+ checksum_kind: LabelChecksumKind,
+ ) -> Result<Self, NascentError>
where
P: AsRef<Path>,
{
- let db = GenerationDb::create(filename.as_ref(), schema)?;
+ let db = GenerationDb::create(filename.as_ref(), schema, checksum_kind)?;
Ok(Self { db, fileno: 0 })
}
@@ -290,7 +295,7 @@ impl LocalGeneration {
#[cfg(test)]
mod test {
- use super::{LocalGeneration, NascentGeneration, SchemaVersion};
+ use super::{LabelChecksumKind, LocalGeneration, NascentGeneration, SchemaVersion};
use tempfile::NamedTempFile;
#[test]
@@ -298,7 +303,8 @@ mod test {
let filename = NamedTempFile::new().unwrap().path().to_path_buf();
let schema = SchemaVersion::new(0, 0);
{
- let mut _gen = NascentGeneration::create(&filename, schema).unwrap();
+ let mut _gen =
+ NascentGeneration::create(&filename, schema, LabelChecksumKind::Sha256).unwrap();
// _gen is dropped here; the connection is close; the file
// should not be removed.
}
@@ -328,7 +334,8 @@ mod test {
let tag_path2 = Path::new("/another_dir/a_tag");
let schema = SchemaVersion::new(0, 0);
- let mut gen = NascentGeneration::create(&dbfile, schema).unwrap();
+ let mut gen =
+ NascentGeneration::create(&dbfile, schema, LabelChecksumKind::Sha256).unwrap();
let mut cache = users::UsersCache::new();
gen.insert(
diff --git a/src/genmeta.rs b/src/genmeta.rs
index 2ce4c4c..d5b14a3 100644
--- a/src/genmeta.rs
+++ b/src/genmeta.rs
@@ -26,6 +26,11 @@ impl GenerationMeta {
pub fn schema_version(&self) -> SchemaVersion {
self.schema_version
}
+
+ /// Get a value corresponding to a key in the meta table.
+ pub fn get(&self, key: &str) -> Option<&String> {
+ self.extras.get(key)
+ }
}
fn metastr(map: &mut HashMap<String, String>, key: &str) -> Result<String, GenerationMetaError> {
diff --git a/src/label.rs b/src/label.rs
index 64be341..19d270a 100644
--- a/src/label.rs
+++ b/src/label.rs
@@ -5,10 +5,12 @@
//! does not aim to make these algorithms configurable, so only a very
//! small number of carefully chosen algorithms are supported here.
+use blake2::Blake2s256;
use sha2::{Digest, Sha256};
const LITERAL: char = '0';
const SHA256: char = '1';
+const BLAKE2: char = '2';
/// A checksum of some data.
#[derive(Debug, Clone)]
@@ -18,6 +20,9 @@ pub enum Label {
/// A SHA256 checksum.
Sha256(String),
+
+ /// A BLAKE2s checksum.
+ Blake2(String),
}
impl Label {
@@ -34,11 +39,20 @@ impl Label {
Self::Sha256(format!("{:x}", hash))
}
+ /// Compute a BLAKE2s checksum for a block of data.
+ pub fn blake2(data: &[u8]) -> Self {
+ let mut hasher = Blake2s256::new();
+ hasher.update(data);
+ let hash = hasher.finalize();
+ Self::Sha256(format!("{:x}", hash))
+ }
+
/// Serialize a label into a string representation.
pub fn serialize(&self) -> String {
match self {
Self::Literal(s) => format!("{}{}", LITERAL, s),
Self::Sha256(hash) => format!("{}{}", SHA256, hash),
+ Self::Blake2(hash) => format!("{}{}", BLAKE2, hash),
}
}
@@ -54,6 +68,37 @@ impl Label {
}
}
+/// Kinds of checksum labels.
+#[derive(Debug, Clone, Copy, Eq, PartialEq)]
+pub enum LabelChecksumKind {
+ /// Use a Blake2 checksum.
+ Blake2,
+
+ /// Use a SHA256 checksum.
+ Sha256,
+}
+
+impl LabelChecksumKind {
+ /// Parse a string into a label checksum kind.
+ pub fn from(s: &str) -> Result<Self, LabelError> {
+ if s == "sha256" {
+ Ok(Self::Sha256)
+ } else if s == "blake2" {
+ Ok(Self::Blake2)
+ } else {
+ Err(LabelError::UnknownType(s.to_string()))
+ }
+ }
+
+ /// Serialize a checksum kind into a string.
+ pub fn serialize(self) -> &'static str {
+ match self {
+ Self::Sha256 => "sha256",
+ Self::Blake2 => "blake2",
+ }
+ }
+}
+
/// Possible errors from dealing with chunk labels.
#[derive(Debug, thiserror::Error)]
pub enum LabelError {
@@ -64,7 +109,7 @@ pub enum LabelError {
#[cfg(test)]
mod test {
- use super::Label;
+ use super::{Label, LabelChecksumKind};
#[test]
fn roundtrip_literal() {
@@ -83,4 +128,11 @@ mod test {
let seri2 = de.serialize();
assert_eq!(serialized, seri2);
}
+
+ #[test]
+ fn roundtrip_checksum_kind() {
+ for kind in [LabelChecksumKind::Sha256, LabelChecksumKind::Blake2] {
+ assert_eq!(LabelChecksumKind::from(kind.serialize()).unwrap(), kind);
+ }
+ }
}