summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorLars Wirzenius <liw@liw.fi>2021-06-20 10:37:45 +0300
committerLars Wirzenius <liw@liw.fi>2021-07-05 16:26:14 +0300
commite6147a3b7b58b151fb7ad9b1f748e0a666f271de (patch)
treec58ee06b5f327a569cc7e464f813eaad9dfc6d5d /src
parent79c71be291b3cf1fad52e2fbdc28be12d3e11311 (diff)
downloadobnam2-e6147a3b7b58b151fb7ad9b1f748e0a666f271de.tar.gz
refactor: code to run backups to have less repetition
This should make it easier to introduce async, later.
Diffstat (limited to 'src')
-rw-r--r--src/backup_run.rs126
-rw-r--r--src/cmd/backup.rs82
-rw-r--r--src/generation.rs10
3 files changed, 86 insertions, 132 deletions
diff --git a/src/backup_run.rs b/src/backup_run.rs
index 16d6700..622485d 100644
--- a/src/backup_run.rs
+++ b/src/backup_run.rs
@@ -5,23 +5,17 @@ use crate::client::{BackupClient, ClientError};
use crate::config::ClientConfig;
use crate::error::ObnamError;
use crate::fsentry::FilesystemEntry;
-use crate::fsiter::{FsIterError, FsIterResult};
-use crate::generation::{LocalGeneration, LocalGenerationError};
+use crate::fsiter::{FsIterError, FsIterResult, FsIterator};
+use crate::generation::{LocalGeneration, LocalGenerationError, NascentError, NascentGeneration};
use crate::policy::BackupPolicy;
use log::{info, warn};
use std::path::Path;
-pub struct InitialBackup<'a> {
- client: &'a BackupClient,
- buffer_size: usize,
- progress: BackupProgress,
-}
-
-pub struct IncrementalBackup<'a> {
+pub struct BackupRun<'a> {
client: &'a BackupClient,
policy: BackupPolicy,
buffer_size: usize,
- progress: Option<BackupProgress>,
+ progress: BackupProgress,
}
#[derive(Debug, thiserror::Error)]
@@ -38,85 +32,81 @@ pub enum BackupError {
pub type BackupResult<T> = Result<T, BackupError>;
-impl<'a> InitialBackup<'a> {
- pub fn new(config: &ClientConfig, client: &'a BackupClient) -> BackupResult<Self> {
- let progress = BackupProgress::initial();
+impl<'a> BackupRun<'a> {
+ pub fn initial(config: &ClientConfig, client: &'a BackupClient) -> BackupResult<Self> {
Ok(Self {
client,
+ policy: BackupPolicy::default(),
buffer_size: config.chunk_size,
- progress,
+ progress: BackupProgress::initial(),
})
}
- pub fn drop(&self) {
- self.progress.finish();
- }
-
- pub fn backup(
- &self,
- entry: FsIterResult<FilesystemEntry>,
- ) -> BackupResult<(FilesystemEntry, Vec<ChunkId>, Reason)> {
- match entry {
- Err(err) => {
- warn!("backup: there was a problem: {:?}", err);
- self.progress.found_problem();
- Err(err.into())
- }
- Ok(entry) => {
- let path = &entry.pathbuf();
- info!("backup: {}", path.display());
- self.progress.found_live_file(path);
- Ok(backup_file(
- &self.client,
- &entry,
- &path,
- self.buffer_size,
- Reason::IsNew,
- ))
- }
- }
- }
-}
-
-impl<'a> IncrementalBackup<'a> {
- pub fn new(config: &ClientConfig, client: &'a BackupClient) -> BackupResult<Self> {
- let policy = BackupPolicy::default();
+ pub fn incremental(config: &ClientConfig, client: &'a BackupClient) -> BackupResult<Self> {
Ok(Self {
client,
- policy,
+ policy: BackupPolicy::default(),
buffer_size: config.chunk_size,
- progress: None,
+ progress: BackupProgress::incremental(),
})
}
- pub fn start_backup(&mut self, old: &LocalGeneration) -> Result<(), ObnamError> {
- let progress = BackupProgress::incremental();
- progress.files_in_previous_generation(old.file_count()? as u64);
- self.progress = Some(progress);
- Ok(())
- }
-
- pub fn client(&self) -> &BackupClient {
- self.client
- }
+ pub fn start(
+ &mut self,
+ genid: Option<&str>,
+ oldname: &Path,
+ ) -> Result<LocalGeneration, ObnamError> {
+ match genid {
+ None => {
+ // Create a new, empty generation.
+ NascentGeneration::create(oldname)?;
- pub fn drop(&self) {
- if let Some(progress) = &self.progress {
- progress.finish();
+ // Open the newly created empty generation.
+ Ok(LocalGeneration::open(oldname)?)
+ }
+ Some(genid) => {
+ let old = self.fetch_previous_generation(genid, oldname)?;
+ self.progress
+ .files_in_previous_generation(old.file_count()? as u64);
+ Ok(old)
+ }
}
}
- pub fn fetch_previous_generation(
+ fn fetch_previous_generation(
&self,
genid: &str,
oldname: &Path,
) -> Result<LocalGeneration, ObnamError> {
let progress = BackupProgress::download_generation(genid);
- let old = self.client().fetch_generation(genid, &oldname)?;
+ let old = self.client.fetch_generation(genid, &oldname)?;
progress.finish();
Ok(old)
}
+ pub fn finish(&self) {
+ self.progress.finish();
+ }
+
+ pub fn backup_roots(
+ &self,
+ config: &ClientConfig,
+ old: &LocalGeneration,
+ newpath: &Path,
+ ) -> Result<(i64, Vec<BackupError>), NascentError> {
+ let mut all_warnings = vec![];
+ let count = {
+ let mut new = NascentGeneration::create(newpath)?;
+ for root in &config.roots {
+ let iter = FsIterator::new(root, config.exclude_cache_tag_directories);
+ let mut warnings = new.insert_iter(iter.map(|entry| self.backup(entry, &old)))?;
+ all_warnings.append(&mut warnings);
+ }
+ new.file_count()
+ };
+ self.finish();
+ Ok((count, all_warnings))
+ }
pub fn backup(
&self,
entry: FsIterResult<FilesystemEntry>,
@@ -163,15 +153,11 @@ impl<'a> IncrementalBackup<'a> {
}
fn found_live_file(&self, path: &Path) {
- if let Some(progress) = &self.progress {
- progress.found_live_file(path);
- }
+ self.progress.found_live_file(path);
}
fn found_problem(&self) {
- if let Some(progress) = &self.progress {
- progress.found_problem();
- }
+ self.progress.found_problem();
}
}
diff --git a/src/cmd/backup.rs b/src/cmd/backup.rs
index 22afd6e..d574b96 100644
--- a/src/cmd/backup.rs
+++ b/src/cmd/backup.rs
@@ -1,11 +1,9 @@
use crate::backup_progress::BackupProgress;
-use crate::backup_run::{BackupError, IncrementalBackup, InitialBackup};
+use crate::backup_run::BackupRun;
use crate::chunkid::ChunkId;
use crate::client::BackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
-use crate::fsiter::FsIterator;
-use crate::generation::NascentGeneration;
use bytesize::MIB;
use log::info;
use std::path::Path;
@@ -24,16 +22,32 @@ impl Backup {
let client = BackupClient::new(config)?;
let genlist = client.list_generations()?;
- let (gen_id, file_count, warnings) = match genlist.resolve("latest") {
- Err(_) => initial_backup(&config, &client)?,
- Ok(old_ref) => incremental_backup(&old_ref, &config, &client)?,
+
+ let oldtemp = NamedTempFile::new()?;
+ let newtemp = NamedTempFile::new()?;
+
+ let (count, warnings) = match genlist.resolve("latest") {
+ Err(_) => {
+ info!("fresh backup without a previous generation");
+ let mut run = BackupRun::initial(config, &client)?;
+ let old = run.start(None, oldtemp.path())?;
+ run.backup_roots(config, &old, newtemp.path())?
+ }
+ Ok(old_id) => {
+ info!("incremental backup based on {}", old_id);
+ let mut run = BackupRun::incremental(config, &client)?;
+ let old = run.start(Some(&old_id), oldtemp.path())?;
+ run.backup_roots(config, &old, newtemp.path())?
+ }
};
+ let gen_id = upload_nascent_generation(&client, newtemp.path())?;
+
for w in warnings.iter() {
println!("warning: {}", w);
}
- report_stats(&runtime, file_count, &gen_id, warnings.len())?;
+ report_stats(&runtime, count, &gen_id, warnings.len())?;
Ok(())
}
@@ -53,60 +67,6 @@ fn report_stats(
Ok(())
}
-fn initial_backup(
- config: &ClientConfig,
- client: &BackupClient,
-) -> Result<(ChunkId, i64, Vec<BackupError>), ObnamError> {
- info!("fresh backup without a previous generation");
- let newtemp = NamedTempFile::new()?;
- let run = InitialBackup::new(config, &client)?;
- let mut all_warnings = vec![];
- let count = {
- let mut new = NascentGeneration::create(newtemp.path())?;
- for root in &config.roots {
- let iter = FsIterator::new(root, config.exclude_cache_tag_directories);
- let warnings = new.insert_iter(iter.map(|entry| run.backup(entry)))?;
- for w in warnings {
- all_warnings.push(w);
- }
- }
- new.file_count()
- };
- run.drop();
-
- let gen_id = upload_nascent_generation(client, newtemp.path())?;
- Ok((gen_id, count, all_warnings))
-}
-
-fn incremental_backup(
- old_ref: &str,
- config: &ClientConfig,
- client: &BackupClient,
-) -> Result<(ChunkId, i64, Vec<BackupError>), ObnamError> {
- info!("incremental backup based on {}", old_ref);
- let newtemp = NamedTempFile::new()?;
- let mut run = IncrementalBackup::new(config, &client)?;
- let mut all_warnings = vec![];
- let count = {
- let oldtemp = NamedTempFile::new()?;
- let old = run.fetch_previous_generation(old_ref, oldtemp.path())?;
- run.start_backup(&old)?;
- let mut new = NascentGeneration::create(newtemp.path())?;
- for root in &config.roots {
- let iter = FsIterator::new(root, config.exclude_cache_tag_directories);
- let warnings = new.insert_iter(iter.map(|entry| run.backup(entry, &old)))?;
- for w in warnings {
- all_warnings.push(w);
- }
- }
- new.file_count()
- };
- run.drop();
-
- let gen_id = upload_nascent_generation(client, newtemp.path())?;
- Ok((gen_id, count, all_warnings))
-}
-
fn upload_nascent_generation(
client: &BackupClient,
filename: &Path,
diff --git a/src/generation.rs b/src/generation.rs
index 85af1f5..e48dce2 100644
--- a/src/generation.rs
+++ b/src/generation.rs
@@ -32,6 +32,9 @@ pub enum NascentError {
#[error("SQL commit error: {0}")]
Commit(rusqlite::Error),
+
+ #[error("Failed to create temporary file: {0}")]
+ TempFile(#[from] std::io::Error),
}
pub type NascentResult<T> = Result<T, NascentError>;
@@ -221,6 +224,7 @@ mod sql {
use crate::backup_reason::Reason;
use crate::chunkid::ChunkId;
use crate::fsentry::FilesystemEntry;
+ use log::debug;
use rusqlite::{params, Connection, OpenFlags, Row, Statement, Transaction};
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
@@ -403,12 +407,16 @@ mod sql {
stmt.query_map(params![path_into_blob(filename)], |row| row_to_entry(row))?;
match iter.next() {
None => Ok(None),
- Some(Err(e)) => Err(e.into()),
+ Some(Err(e)) => {
+ debug!("database lookup error: {}", e);
+ Err(e.into())
+ }
Some(Ok((fileno, json, reason))) => {
let entry = serde_json::from_str(&json)?;
if iter.next() == None {
Ok(Some((fileno, entry, reason)))
} else {
+ debug!("too many files in file lookup");
Err(LocalGenerationError::TooManyFiles(filename.to_path_buf()))
}
}