summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLars Wirzenius <liw@liw.fi>2021-07-22 06:29:12 +0000
committerLars Wirzenius <liw@liw.fi>2021-07-22 06:29:12 +0000
commitfb03ec58ee29638cf3492dd2077a7d1f373805e8 (patch)
treef7cc447140c5296c97cf2e517d53fd975146effb
parentdf5d50b2c2fe705de059d9ea983ce21ca49ed132 (diff)
parent2f13fae6d943e4408dad6c1f689e1ecfc369e314 (diff)
downloadobnam2-fb03ec58ee29638cf3492dd2077a7d1f373805e8.tar.gz
Merge branch 'feature/115-no-custom-results' into 'main'
Replace custom result types with standard `Result` Closes #115 See merge request obnam/obnam!166
-rw-r--r--src/backup_run.rs37
-rw-r--r--src/chunk.rs7
-rw-r--r--src/chunker.rs8
-rw-r--r--src/client.rs42
-rw-r--r--src/cmd/restore.rs20
-rw-r--r--src/config.rs4
-rw-r--r--src/fsentry.rs6
-rw-r--r--src/fsiter.rs10
-rw-r--r--src/generation.rs72
-rw-r--r--src/index.rs35
-rw-r--r--src/indexedstore.rs19
-rw-r--r--src/store.rs9
12 files changed, 136 insertions, 133 deletions
diff --git a/src/backup_run.rs b/src/backup_run.rs
index 622485d..3c03ec3 100644
--- a/src/backup_run.rs
+++ b/src/backup_run.rs
@@ -5,7 +5,7 @@ use crate::client::{BackupClient, ClientError};
use crate::config::ClientConfig;
use crate::error::ObnamError;
use crate::fsentry::FilesystemEntry;
-use crate::fsiter::{FsIterError, FsIterResult, FsIterator};
+use crate::fsiter::{FsIterError, FsIterator};
use crate::generation::{LocalGeneration, LocalGenerationError, NascentError, NascentGeneration};
use crate::policy::BackupPolicy;
use log::{info, warn};
@@ -30,10 +30,15 @@ pub enum BackupError {
LocalGenerationError(#[from] LocalGenerationError),
}
-pub type BackupResult<T> = Result<T, BackupError>;
+#[derive(Debug)]
+pub struct FsEntryBackupOutcome {
+ pub entry: FilesystemEntry,
+ pub ids: Vec<ChunkId>,
+ pub reason: Reason,
+}
impl<'a> BackupRun<'a> {
- pub fn initial(config: &ClientConfig, client: &'a BackupClient) -> BackupResult<Self> {
+ pub fn initial(config: &ClientConfig, client: &'a BackupClient) -> Result<Self, BackupError> {
Ok(Self {
client,
policy: BackupPolicy::default(),
@@ -42,7 +47,10 @@ impl<'a> BackupRun<'a> {
})
}
- pub fn incremental(config: &ClientConfig, client: &'a BackupClient) -> BackupResult<Self> {
+ pub fn incremental(
+ config: &ClientConfig,
+ client: &'a BackupClient,
+ ) -> Result<Self, BackupError> {
Ok(Self {
client,
policy: BackupPolicy::default(),
@@ -107,11 +115,12 @@ impl<'a> BackupRun<'a> {
self.finish();
Ok((count, all_warnings))
}
+
pub fn backup(
&self,
- entry: FsIterResult<FilesystemEntry>,
+ entry: Result<FilesystemEntry, FsIterError>,
old: &LocalGeneration,
- ) -> BackupResult<(FilesystemEntry, Vec<ChunkId>, Reason)> {
+ ) -> Result<FsEntryBackupOutcome, BackupError> {
match entry {
Err(err) => {
warn!("backup: {}", err);
@@ -145,7 +154,7 @@ impl<'a> BackupRun<'a> {
} else {
vec![]
};
- Ok((entry, ids, reason))
+ Ok(FsEntryBackupOutcome { entry, ids, reason })
}
}
}
@@ -167,13 +176,21 @@ fn backup_file(
path: &Path,
chunk_size: usize,
reason: Reason,
-) -> (FilesystemEntry, Vec<ChunkId>, Reason) {
+) -> FsEntryBackupOutcome {
let ids = client.upload_filesystem_entry(&entry, chunk_size);
match ids {
Err(err) => {
warn!("error backing up {}, skipping it: {}", path.display(), err);
- (entry.clone(), vec![], Reason::FileError)
+ FsEntryBackupOutcome {
+ entry: entry.clone(),
+ ids: vec![],
+ reason: Reason::FileError,
+ }
}
- Ok(ids) => (entry.clone(), ids, reason),
+ Ok(ids) => FsEntryBackupOutcome {
+ entry: entry.clone(),
+ ids,
+ reason,
+ },
}
}
diff --git a/src/chunk.rs b/src/chunk.rs
index 8631fd9..469185f 100644
--- a/src/chunk.rs
+++ b/src/chunk.rs
@@ -51,15 +51,12 @@ pub enum GenerationChunkError {
JsonGenerate(serde_json::Error),
}
-/// A result from a chunk operation.
-pub type GenerationChunkResult<T> = Result<T, GenerationChunkError>;
-
impl GenerationChunk {
pub fn new(chunk_ids: Vec<ChunkId>) -> Self {
Self { chunk_ids }
}
- pub fn from_data_chunk(chunk: &DataChunk) -> GenerationChunkResult<Self> {
+ pub fn from_data_chunk(chunk: &DataChunk) -> Result<Self, GenerationChunkError> {
let data = chunk.data();
let data = std::str::from_utf8(data)?;
serde_json::from_str(data).map_err(GenerationChunkError::JsonParse)
@@ -77,7 +74,7 @@ impl GenerationChunk {
self.chunk_ids.iter()
}
- pub fn to_data_chunk(&self, ended: &str) -> GenerationChunkResult<DataChunk> {
+ pub fn to_data_chunk(&self, ended: &str) -> Result<DataChunk, GenerationChunkError> {
let json: String =
serde_json::to_string(self).map_err(GenerationChunkError::JsonGenerate)?;
let bytes = json.as_bytes().to_vec();
diff --git a/src/chunker.rs b/src/chunker.rs
index a7a39f1..763c148 100644
--- a/src/chunker.rs
+++ b/src/chunker.rs
@@ -17,8 +17,6 @@ pub enum ChunkerError {
FileRead(PathBuf, std::io::Error),
}
-pub type ChunkerResult<T> = Result<T, ChunkerError>;
-
impl Chunker {
pub fn new(chunk_size: usize, handle: std::fs::File, filename: &Path) -> Self {
let mut buf = vec![];
@@ -31,7 +29,7 @@ impl Chunker {
}
}
- pub fn read_chunk(&mut self) -> ChunkerResult<Option<DataChunk>> {
+ pub fn read_chunk(&mut self) -> Result<Option<DataChunk>, ChunkerError> {
let mut used = 0;
loop {
@@ -58,9 +56,9 @@ impl Chunker {
}
impl Iterator for Chunker {
- type Item = ChunkerResult<DataChunk>;
+ type Item = Result<DataChunk, ChunkerError>;
- fn next(&mut self) -> Option<ChunkerResult<DataChunk>> {
+ fn next(&mut self) -> Option<Result<DataChunk, ChunkerError>> {
match self.read_chunk() {
Ok(None) => None,
Ok(Some(chunk)) => Some(Ok(chunk)),
diff --git a/src/client.rs b/src/client.rs
index b1f9976..9862ad3 100644
--- a/src/client.rs
+++ b/src/client.rs
@@ -81,14 +81,12 @@ pub enum ClientError {
FileWrite(PathBuf, std::io::Error),
}
-pub type ClientResult<T> = Result<T, ClientError>;
-
pub struct BackupClient {
chunk_client: ChunkClient,
}
impl BackupClient {
- pub fn new(config: &ClientConfig) -> ClientResult<Self> {
+ pub fn new(config: &ClientConfig) -> Result<Self, ClientError> {
info!("creating backup client with config: {:#?}", config);
Ok(Self {
chunk_client: ChunkClient::new(config)?,
@@ -99,7 +97,7 @@ impl BackupClient {
&self,
e: &FilesystemEntry,
size: usize,
- ) -> ClientResult<Vec<ChunkId>> {
+ ) -> Result<Vec<ChunkId>, ClientError> {
let path = e.pathbuf();
info!("uploading {:?}", path);
let ids = match e.kind() {
@@ -113,7 +111,7 @@ impl BackupClient {
Ok(ids)
}
- pub fn upload_generation(&self, filename: &Path, size: usize) -> ClientResult<ChunkId> {
+ pub fn upload_generation(&self, filename: &Path, size: usize) -> Result<ChunkId, ClientError> {
info!("upload SQLite {}", filename.display());
let ids = self.read_file(filename, size)?;
let gen = GenerationChunk::new(ids);
@@ -123,7 +121,7 @@ impl BackupClient {
Ok(gen_id)
}
- fn read_file(&self, filename: &Path, size: usize) -> ClientResult<Vec<ChunkId>> {
+ fn read_file(&self, filename: &Path, size: usize) -> Result<Vec<ChunkId>, ClientError> {
info!("upload file {}", filename.display());
let file = std::fs::File::open(filename)
.map_err(|err| ClientError::FileOpen(filename.to_path_buf(), err))?;
@@ -132,15 +130,15 @@ impl BackupClient {
Ok(chunk_ids)
}
- pub fn has_chunk(&self, meta: &ChunkMeta) -> ClientResult<Option<ChunkId>> {
+ pub fn has_chunk(&self, meta: &ChunkMeta) -> Result<Option<ChunkId>, ClientError> {
self.chunk_client.has_chunk(meta)
}
- pub fn upload_chunk(&self, chunk: DataChunk) -> ClientResult<ChunkId> {
+ pub fn upload_chunk(&self, chunk: DataChunk) -> Result<ChunkId, ClientError> {
self.chunk_client.upload_chunk(chunk)
}
- pub fn upload_new_file_chunks(&self, chunker: Chunker) -> ClientResult<Vec<ChunkId>> {
+ pub fn upload_new_file_chunks(&self, chunker: Chunker) -> Result<Vec<ChunkId>, ClientError> {
let mut chunk_ids = vec![];
for item in chunker {
let chunk = item?;
@@ -157,22 +155,26 @@ impl BackupClient {
Ok(chunk_ids)
}
- pub fn list_generations(&self) -> ClientResult<GenerationList> {
+ pub fn list_generations(&self) -> Result<GenerationList, ClientError> {
self.chunk_client.list_generations()
}
- pub fn fetch_chunk(&self, chunk_id: &ChunkId) -> ClientResult<DataChunk> {
+ pub fn fetch_chunk(&self, chunk_id: &ChunkId) -> Result<DataChunk, ClientError> {
self.chunk_client.fetch_chunk(chunk_id)
}
- fn fetch_generation_chunk(&self, gen_id: &str) -> ClientResult<GenerationChunk> {
+ fn fetch_generation_chunk(&self, gen_id: &str) -> Result<GenerationChunk, ClientError> {
let chunk_id = ChunkId::recreate(gen_id);
let chunk = self.fetch_chunk(&chunk_id)?;
let gen = GenerationChunk::from_data_chunk(&chunk)?;
Ok(gen)
}
- pub fn fetch_generation(&self, gen_id: &str, dbname: &Path) -> ClientResult<LocalGeneration> {
+ pub fn fetch_generation(
+ &self,
+ gen_id: &str,
+ dbname: &Path,
+ ) -> Result<LocalGeneration, ClientError> {
let gen = self.fetch_generation_chunk(gen_id)?;
// Fetch the SQLite file, storing it in the named file.
@@ -198,7 +200,7 @@ pub struct ChunkClient {
}
impl ChunkClient {
- pub fn new(config: &ClientConfig) -> ClientResult<Self> {
+ pub fn new(config: &ClientConfig) -> Result<Self, ClientError> {
let pass = config.passwords()?;
let client = Client::builder()
@@ -220,7 +222,7 @@ impl ChunkClient {
format!("{}/chunks", self.base_url())
}
- pub fn has_chunk(&self, meta: &ChunkMeta) -> ClientResult<Option<ChunkId>> {
+ pub fn has_chunk(&self, meta: &ChunkMeta) -> Result<Option<ChunkId>, ClientError> {
let body = match self.get("", &[("sha256", meta.sha256())]) {
Ok((_, body)) => body,
Err(err) => return Err(err),
@@ -238,7 +240,7 @@ impl ChunkClient {
Ok(has)
}
- pub fn upload_chunk(&self, chunk: DataChunk) -> ClientResult<ChunkId> {
+ pub fn upload_chunk(&self, chunk: DataChunk) -> Result<ChunkId, ClientError> {
let enc = self.cipher.encrypt_chunk(&chunk)?;
let res = self
.client
@@ -259,7 +261,7 @@ impl ChunkClient {
Ok(chunk_id)
}
- pub fn list_generations(&self) -> ClientResult<GenerationList> {
+ pub fn list_generations(&self) -> Result<GenerationList, ClientError> {
let (_, body) = self.get("", &[("generation", "true")])?;
let map: HashMap<String, ChunkMeta> =
@@ -272,7 +274,7 @@ impl ChunkClient {
Ok(GenerationList::new(finished))
}
- pub fn fetch_chunk(&self, chunk_id: &ChunkId) -> ClientResult<DataChunk> {
+ pub fn fetch_chunk(&self, chunk_id: &ChunkId) -> Result<DataChunk, ClientError> {
let (headers, body) = self.get(&format!("/{}", chunk_id), &[])?;
let meta = self.get_chunk_meta_header(chunk_id, &headers)?;
@@ -282,7 +284,7 @@ impl ChunkClient {
Ok(chunk)
}
- fn get(&self, path: &str, query: &[(&str, &str)]) -> ClientResult<(HeaderMap, Vec<u8>)> {
+ fn get(&self, path: &str, query: &[(&str, &str)]) -> Result<(HeaderMap, Vec<u8>), ClientError> {
let url = format!("{}{}", &self.chunks_url(), path);
info!("GET {}", url);
@@ -316,7 +318,7 @@ impl ChunkClient {
&self,
chunk_id: &ChunkId,
headers: &HeaderMap,
- ) -> ClientResult<ChunkMeta> {
+ ) -> Result<ChunkMeta, ClientError> {
let meta = headers.get("chunk-meta");
if meta.is_none() {
diff --git a/src/cmd/restore.rs b/src/cmd/restore.rs
index 0e349c2..d794fe4 100644
--- a/src/cmd/restore.rs
+++ b/src/cmd/restore.rs
@@ -118,8 +118,6 @@ pub enum RestoreError {
SetTimestamp(PathBuf, std::io::Error),
}
-pub type RestoreResult<T> = Result<T, RestoreError>;
-
fn restore_generation(
client: &BackupClient,
gen: &LocalGeneration,
@@ -127,7 +125,7 @@ fn restore_generation(
entry: &FilesystemEntry,
to: &Path,
progress: &ProgressBar,
-) -> RestoreResult<()> {
+) -> Result<(), RestoreError> {
info!("restoring {:?}", entry);
progress.set_message(format!("{}", entry.pathbuf().display()));
progress.inc(1);
@@ -143,14 +141,14 @@ fn restore_generation(
Ok(())
}
-fn restore_directory(path: &Path) -> RestoreResult<()> {
+fn restore_directory(path: &Path) -> Result<(), RestoreError> {
debug!("restoring directory {}", path.display());
std::fs::create_dir_all(path)
.map_err(|err| RestoreError::CreateDirs(path.to_path_buf(), err))?;
Ok(())
}
-fn restore_directory_metadata(entry: &FilesystemEntry, to: &Path) -> RestoreResult<()> {
+fn restore_directory_metadata(entry: &FilesystemEntry, to: &Path) -> Result<(), RestoreError> {
let to = restored_path(entry, to)?;
match entry.kind() {
FilesystemKind::Directory => restore_metadata(&to, entry)?,
@@ -162,7 +160,7 @@ fn restore_directory_metadata(entry: &FilesystemEntry, to: &Path) -> RestoreResu
Ok(())
}
-fn restored_path(entry: &FilesystemEntry, to: &Path) -> RestoreResult<PathBuf> {
+fn restored_path(entry: &FilesystemEntry, to: &Path) -> Result<PathBuf, RestoreError> {
let path = &entry.pathbuf();
let path = if path.is_absolute() {
path.strip_prefix("/")?
@@ -178,7 +176,7 @@ fn restore_regular(
path: &Path,
fileid: i64,
entry: &FilesystemEntry,
-) -> RestoreResult<()> {
+) -> Result<(), RestoreError> {
debug!("restoring regular {}", path.display());
let parent = path.parent().unwrap();
debug!(" mkdir {}", parent.display());
@@ -199,7 +197,7 @@ fn restore_regular(
Ok(())
}
-fn restore_symlink(path: &Path, entry: &FilesystemEntry) -> RestoreResult<()> {
+fn restore_symlink(path: &Path, entry: &FilesystemEntry) -> Result<(), RestoreError> {
debug!("restoring symlink {}", path.display());
let parent = path.parent().unwrap();
debug!(" mkdir {}", parent.display());
@@ -214,14 +212,14 @@ fn restore_symlink(path: &Path, entry: &FilesystemEntry) -> RestoreResult<()> {
Ok(())
}
-fn restore_socket(path: &Path, entry: &FilesystemEntry) -> RestoreResult<()> {
+fn restore_socket(path: &Path, entry: &FilesystemEntry) -> Result<(), RestoreError> {
debug!("creating Unix domain socket {:?}", path);
UnixListener::bind(path).map_err(|err| RestoreError::UnixBind(path.to_path_buf(), err))?;
restore_metadata(path, entry)?;
Ok(())
}
-fn restore_fifo(path: &Path, entry: &FilesystemEntry) -> RestoreResult<()> {
+fn restore_fifo(path: &Path, entry: &FilesystemEntry) -> Result<(), RestoreError> {
debug!("creating fifo {:?}", path);
let filename = path_to_cstring(path);
match unsafe { mkfifo(filename.as_ptr(), 0) } {
@@ -233,7 +231,7 @@ fn restore_fifo(path: &Path, entry: &FilesystemEntry) -> RestoreResult<()> {
Ok(())
}
-fn restore_metadata(path: &Path, entry: &FilesystemEntry) -> RestoreResult<()> {
+fn restore_metadata(path: &Path, entry: &FilesystemEntry) -> Result<(), RestoreError> {
debug!("restoring metadata for {}", entry.pathbuf().display());
debug!("restoring metadata for {:?}", path);
diff --git a/src/config.rs b/src/config.rs
index 0d4e9de..8f5d4d8 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -31,7 +31,7 @@ pub struct ClientConfig {
}
impl ClientConfig {
- pub fn read(filename: &Path) -> ClientConfigResult<Self> {
+ pub fn read(filename: &Path) -> Result<Self, ClientConfigError> {
trace!("read_config: filename={:?}", filename);
let config = std::fs::read_to_string(filename)
.map_err(|err| ClientConfigError::Read(filename.to_path_buf(), err))?;
@@ -102,8 +102,6 @@ pub enum ClientConfigError {
YamlParse(PathBuf, serde_yaml::Error),
}
-pub type ClientConfigResult<T> = Result<T, ClientConfigError>;
-
fn expand_tilde(path: &Path) -> PathBuf {
if path.starts_with("~/") {
if let Some(home) = std::env::var_os("HOME") {
diff --git a/src/fsentry.rs b/src/fsentry.rs
index 3f532cc..1a30d41 100644
--- a/src/fsentry.rs
+++ b/src/fsentry.rs
@@ -56,11 +56,9 @@ pub enum FsEntryError {
ReadLink(PathBuf, std::io::Error),
}
-pub type FsEntryResult<T> = Result<T, FsEntryError>;
-
#[allow(clippy::len_without_is_empty)]
impl FilesystemEntry {
- pub fn from_metadata(path: &Path, meta: &Metadata) -> FsEntryResult<Self> {
+ pub fn from_metadata(path: &Path, meta: &Metadata) -> Result<Self, FsEntryError> {
let kind = FilesystemKind::from_file_type(meta.file_type());
let symlink_target = if kind == FilesystemKind::Symlink {
debug!("reading symlink target for {:?}", path);
@@ -184,7 +182,7 @@ impl FilesystemKind {
}
}
- pub fn from_code(code: u8) -> FsEntryResult<Self> {
+ pub fn from_code(code: u8) -> Result<Self, FsEntryError> {
match code {
0 => Ok(FilesystemKind::Regular),
1 => Ok(FilesystemKind::Directory),
diff --git a/src/fsiter.rs b/src/fsiter.rs
index 56630fa..aea9078 100644
--- a/src/fsiter.rs
+++ b/src/fsiter.rs
@@ -20,8 +20,6 @@ pub enum FsIterError {
FsEntryError(#[from] FsEntryError),
}
-pub type FsIterResult<T> = Result<T, FsIterError>;
-
impl FsIterator {
pub fn new(root: &Path, exclude_cache_tag_directories: bool) -> Self {
Self {
@@ -34,7 +32,7 @@ impl FsIterator {
}
impl Iterator for FsIterator {
- type Item = FsIterResult<FilesystemEntry>;
+ type Item = Result<FilesystemEntry, FsIterError>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
@@ -47,7 +45,7 @@ struct SkipCachedirs {
exclude_cache_tag_directories: bool,
// This is the last tag we've found. `next()` will yield it before asking `iter` for more
// entries.
- cachedir_tag: Option<FsIterResult<FilesystemEntry>>,
+ cachedir_tag: Option<Result<FilesystemEntry, FsIterError>>,
}
impl SkipCachedirs {
@@ -102,7 +100,7 @@ impl SkipCachedirs {
}
impl Iterator for SkipCachedirs {
- type Item = FsIterResult<FilesystemEntry>;
+ type Item = Result<FilesystemEntry, FsIterError>;
fn next(&mut self) -> Option<Self::Item> {
self.cachedir_tag.take().or_else(|| {
@@ -120,7 +118,7 @@ impl Iterator for SkipCachedirs {
}
}
-fn new_entry(path: &Path) -> FsIterResult<FilesystemEntry> {
+fn new_entry(path: &Path) -> Result<FilesystemEntry, FsIterError> {
let meta = std::fs::symlink_metadata(path);
debug!("metadata for {:?}: {:?}", path, meta);
let meta = match meta {
diff --git a/src/generation.rs b/src/generation.rs
index e48dce2..eeb0e76 100644
--- a/src/generation.rs
+++ b/src/generation.rs
@@ -1,5 +1,5 @@
use crate::backup_reason::Reason;
-use crate::backup_run::{BackupError, BackupResult};
+use crate::backup_run::{BackupError, FsEntryBackupOutcome};
use crate::chunkid::ChunkId;
use crate::fsentry::FilesystemEntry;
use log::debug;
@@ -37,10 +37,8 @@ pub enum NascentError {
TempFile(#[from] std::io::Error),
}
-pub type NascentResult<T> = Result<T, NascentError>;
-
impl NascentGeneration {
- pub fn create<P>(filename: P) -> NascentResult<Self>
+ pub fn create<P>(filename: P) -> Result<Self, NascentError>
where
P: AsRef<Path>,
{
@@ -57,7 +55,7 @@ impl NascentGeneration {
e: FilesystemEntry,
ids: &[ChunkId],
reason: Reason,
- ) -> NascentResult<()> {
+ ) -> Result<(), NascentError> {
let t = self.conn.transaction().map_err(NascentError::Transaction)?;
self.fileno += 1;
sql::insert_one(&t, e, self.fileno, ids, reason)?;
@@ -67,8 +65,8 @@ impl NascentGeneration {
pub fn insert_iter(
&mut self,
- entries: impl Iterator<Item = BackupResult<(FilesystemEntry, Vec<ChunkId>, Reason)>>,
- ) -> NascentResult<Vec<BackupError>> {
+ entries: impl Iterator<Item = Result<FsEntryBackupOutcome, BackupError>>,
+ ) -> Result<Vec<BackupError>, NascentError> {
let t = self.conn.transaction().map_err(NascentError::Transaction)?;
let mut warnings = vec![];
for r in entries {
@@ -77,9 +75,9 @@ impl NascentGeneration {
debug!("ignoring backup error {}", err);
warnings.push(err);
}
- Ok((e, ids, reason)) => {
+ Ok(FsEntryBackupOutcome { entry, ids, reason }) => {
self.fileno += 1;
- sql::insert_one(&t, e, self.fileno, &ids[..], reason)?;
+ sql::insert_one(&t, entry, self.fileno, &ids[..], reason)?;
}
}
}
@@ -155,8 +153,6 @@ pub enum LocalGenerationError {
IoError(#[from] std::io::Error),
}
-pub type LocalGenerationResult<T> = Result<T, LocalGenerationError>;
-
pub struct BackedUpFile {
fileno: FileId,
entry: FilesystemEntry,
@@ -187,7 +183,7 @@ impl BackedUpFile {
}
impl LocalGeneration {
- pub fn open<P>(filename: P) -> LocalGenerationResult<Self>
+ pub fn open<P>(filename: P) -> Result<Self, LocalGenerationError>
where
P: AsRef<Path>,
{
@@ -195,23 +191,29 @@ impl LocalGeneration {
Ok(Self { conn })
}
- pub fn file_count(&self) -> LocalGenerationResult<i64> {
+ pub fn file_count(&self) -> Result<i64, LocalGenerationError> {
sql::file_count(&self.conn)
}
- pub fn files(&self) -> LocalGenerationResult<sql::SqlResults<BackedUpFile>> {
+ pub fn files(&self) -> Result<sql::SqlResults<BackedUpFile>, LocalGenerationError> {
sql::files(&self.conn)
}
- pub fn chunkids(&self, fileno: FileId) -> LocalGenerationResult<sql::SqlResults<ChunkId>> {
+ pub fn chunkids(
+ &self,
+ fileno: FileId,
+ ) -> Result<sql::SqlResults<ChunkId>, LocalGenerationError> {
sql::chunkids(&self.conn, fileno)
}
- pub fn get_file(&self, filename: &Path) -> LocalGenerationResult<Option<FilesystemEntry>> {
+ pub fn get_file(
+ &self,
+ filename: &Path,
+ ) -> Result<Option<FilesystemEntry>, LocalGenerationError> {
sql::get_file(&self.conn, filename)
}
- pub fn get_fileno(&self, filename: &Path) -> LocalGenerationResult<Option<FileId>> {
+ pub fn get_fileno(&self, filename: &Path) -> Result<Option<FileId>, LocalGenerationError> {
sql::get_fileno(&self.conn, filename)
}
}
@@ -220,7 +222,6 @@ mod sql {
use super::BackedUpFile;
use super::FileId;
use super::LocalGenerationError;
- use super::LocalGenerationResult;
use crate::backup_reason::Reason;
use crate::chunkid::ChunkId;
use crate::fsentry::FilesystemEntry;
@@ -229,7 +230,7 @@ mod sql {
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
- pub fn create_db(filename: &Path) -> LocalGenerationResult<Connection> {
+ pub fn create_db(filename: &Path) -> Result<Connection, LocalGenerationError> {
let flags = OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
conn.execute(
@@ -246,7 +247,7 @@ mod sql {
Ok(conn)
}
- pub fn open_db(filename: &Path) -> LocalGenerationResult<Connection> {
+ pub fn open_db(filename: &Path) -> Result<Connection, LocalGenerationError> {
let flags = OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
conn.pragma_update(None, "journal_mode", &"WAL")?;
@@ -259,7 +260,7 @@ mod sql {
fileno: FileId,
ids: &[ChunkId],
reason: Reason,
- ) -> LocalGenerationResult<()> {
+ ) -> Result<(), LocalGenerationError> {
let json = serde_json::to_string(&e)?;
t.execute(
"INSERT INTO files (fileno, filename, json, reason) VALUES (?1, ?2, ?3, ?4)",
@@ -285,7 +286,7 @@ mod sql {
Ok((fileno, json, reason))
}
- pub fn file_count(conn: &Connection) -> LocalGenerationResult<FileId> {
+ pub fn file_count(conn: &Connection) -> Result<FileId, LocalGenerationError> {
let mut stmt = conn.prepare("SELECT count(*) FROM files")?;
let mut iter = stmt.query_map(params![], |row| row.get(0))?;
let count = iter.next().expect("SQL count result (1)");
@@ -294,12 +295,13 @@ mod sql {
}
// A pointer to a "fallible iterator" over values of type `T`, which is to say it's an iterator
- // over values of type `LocalGenerationResult<T>`. The iterator is only valid for the lifetime
- // 'stmt.
+ // over values of type `Result<T, LocalGenerationError>`. The iterator is only valid for the
+ // lifetime 'stmt.
//
// The fact that it's a pointer (`Box<dyn ...>`) means we don't care what the actual type of
// the iterator is, and who produces it.
- type SqlResultsIterator<'stmt, T> = Box<dyn Iterator<Item = LocalGenerationResult<T>> + 'stmt>;
+ type SqlResultsIterator<'stmt, T> =
+ Box<dyn Iterator<Item = Result<T, LocalGenerationError>> + 'stmt>;
// A pointer to a function which, when called on a prepared SQLite statement, would create
// a "fallible iterator" over values of type `ItemT`. (See above for an explanation of what
@@ -323,7 +325,8 @@ mod sql {
type CreateIterFn<'conn, ItemT> = Box<
dyn for<'stmt> Fn(
&'stmt mut Statement<'conn>,
- ) -> LocalGenerationResult<SqlResultsIterator<'stmt, ItemT>>,
+ )
+ -> Result<SqlResultsIterator<'stmt, ItemT>, LocalGenerationError>,
>;
pub struct SqlResults<'conn, ItemT> {
@@ -336,17 +339,17 @@ mod sql {
conn: &'conn Connection,
statement: &str,
create_iter: CreateIterFn<'conn, ItemT>,
- ) -> LocalGenerationResult<Self> {
+ ) -> Result<Self, LocalGenerationError> {
let stmt = conn.prepare(statement)?;
Ok(Self { stmt, create_iter })
}
- pub fn iter(&'_ mut self) -> LocalGenerationResult<SqlResultsIterator<'_, ItemT>> {
+ pub fn iter(&'_ mut self) -> Result<SqlResultsIterator<'_, ItemT>, LocalGenerationError> {
(self.create_iter)(&mut self.stmt)
}
}
- pub fn files(conn: &Connection) -> LocalGenerationResult<SqlResults<BackedUpFile>> {
+ pub fn files(conn: &Connection) -> Result<SqlResults<BackedUpFile>, LocalGenerationError> {
SqlResults::new(
conn,
"SELECT * FROM files",
@@ -366,7 +369,7 @@ mod sql {
pub fn chunkids(
conn: &Connection,
fileno: FileId,
- ) -> LocalGenerationResult<SqlResults<ChunkId>> {
+ ) -> Result<SqlResults<ChunkId>, LocalGenerationError> {
SqlResults::new(
conn,
"SELECT chunkid FROM chunks WHERE fileno = ?1",
@@ -384,14 +387,17 @@ mod sql {
pub fn get_file(
conn: &Connection,
filename: &Path,
- ) -> LocalGenerationResult<Option<FilesystemEntry>> {
+ ) -> Result<Option<FilesystemEntry>, LocalGenerationError> {
match get_file_and_fileno(conn, filename)? {
None => Ok(None),
Some((_, e, _)) => Ok(Some(e)),
}
}
- pub fn get_fileno(conn: &Connection, filename: &Path) -> LocalGenerationResult<Option<FileId>> {
+ pub fn get_fileno(
+ conn: &Connection,
+ filename: &Path,
+ ) -> Result<Option<FileId>, LocalGenerationError> {
match get_file_and_fileno(conn, filename)? {
None => Ok(None),
Some((id, _, _)) => Ok(Some(id)),
@@ -401,7 +407,7 @@ mod sql {
fn get_file_and_fileno(
conn: &Connection,
filename: &Path,
- ) -> LocalGenerationResult<Option<(FileId, FilesystemEntry, String)>> {
+ ) -> Result<Option<(FileId, FilesystemEntry, String)>, LocalGenerationError> {
let mut stmt = conn.prepare("SELECT * FROM files WHERE filename = ?1")?;
let mut iter =
stmt.query_map(params![path_into_blob(filename)], |row| row_to_entry(row))?;
diff --git a/src/index.rs b/src/index.rs
index 887238c..e6bbb95 100644
--- a/src/index.rs
+++ b/src/index.rs
@@ -33,11 +33,8 @@ pub enum IndexError {
SqlError(#[from] rusqlite::Error),
}
-/// A result from an `Index` operation.
-pub type IndexResult<T> = Result<T, IndexError>;
-
impl Index {
- pub fn new<P: AsRef<Path>>(dirname: P) -> IndexResult<Self> {
+ pub fn new<P: AsRef<Path>>(dirname: P) -> Result<Self, IndexError> {
let filename = dirname.as_ref().join("meta.db");
let conn = if filename.exists() {
sql::open_db(&filename)?
@@ -53,30 +50,30 @@ impl Index {
})
}
- pub fn insert_meta(&mut self, id: ChunkId, meta: ChunkMeta) -> IndexResult<()> {
+ pub fn insert_meta(&mut self, id: ChunkId, meta: ChunkMeta) -> Result<(), IndexError> {
let t = self.conn.transaction()?;
sql::insert(&t, &id, &meta)?;
t.commit()?;
Ok(())
}
- pub fn get_meta(&self, id: &ChunkId) -> IndexResult<ChunkMeta> {
+ pub fn get_meta(&self, id: &ChunkId) -> Result<ChunkMeta, IndexError> {
sql::lookup(&self.conn, id)
}
- pub fn remove_meta(&mut self, id: &ChunkId) -> IndexResult<()> {
+ pub fn remove_meta(&mut self, id: &ChunkId) -> Result<(), IndexError> {
sql::remove(&self.conn, id)
}
- pub fn find_by_sha256(&self, sha256: &str) -> IndexResult<Vec<ChunkId>> {
+ pub fn find_by_sha256(&self, sha256: &str) -> Result<Vec<ChunkId>, IndexError> {
sql::find_by_256(&self.conn, sha256)
}
- pub fn find_generations(&self) -> IndexResult<Vec<ChunkId>> {
+ pub fn find_generations(&self) -> Result<Vec<ChunkId>, IndexError> {
sql::find_generations(&self.conn)
}
- pub fn all_chunks(&self) -> IndexResult<Vec<ChunkId>> {
+ pub fn all_chunks(&self) -> Result<Vec<ChunkId>, IndexError> {
sql::find_chunk_ids(&self.conn)
}
}
@@ -155,14 +152,14 @@ mod test {
}
mod sql {
- use super::{IndexError, IndexResult};
+ use super::IndexError;
use crate::chunkid::ChunkId;
use crate::chunkmeta::ChunkMeta;
use log::error;
use rusqlite::{params, Connection, OpenFlags, Row, Transaction};
use std::path::Path;
- pub fn create_db(filename: &Path) -> IndexResult<Connection> {
+ pub fn create_db(filename: &Path) -> Result<Connection, IndexError> {
let flags = OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
conn.execute(
@@ -178,14 +175,14 @@ mod sql {
Ok(conn)
}
- pub fn open_db(filename: &Path) -> IndexResult<Connection> {
+ pub fn open_db(filename: &Path) -> Result<Connection, IndexError> {
let flags = OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
conn.pragma_update(None, "journal_mode", &"WAL")?;
Ok(conn)
}
- pub fn insert(t: &Transaction, chunkid: &ChunkId, meta: &ChunkMeta) -> IndexResult<()> {
+ pub fn insert(t: &Transaction, chunkid: &ChunkId, meta: &ChunkMeta) -> Result<(), IndexError> {
let chunkid = format!("{}", chunkid);
let sha256 = meta.sha256();
let generation = if meta.is_generation() { 1 } else { 0 };
@@ -197,12 +194,12 @@ mod sql {
Ok(())
}
- pub fn remove(conn: &Connection, chunkid: &ChunkId) -> IndexResult<()> {
+ pub fn remove(conn: &Connection, chunkid: &ChunkId) -> Result<(), IndexError> {
conn.execute("DELETE FROM chunks WHERE id IS ?1", params![chunkid])?;
Ok(())
}
- pub fn lookup(conn: &Connection, id: &ChunkId) -> IndexResult<ChunkMeta> {
+ pub fn lookup(conn: &Connection, id: &ChunkId) -> Result<ChunkMeta, IndexError> {
let mut stmt = conn.prepare("SELECT * FROM chunks WHERE id IS ?1")?;
let iter = stmt.query_map(params![id], |row| row_to_meta(row))?;
let mut metas: Vec<ChunkMeta> = vec![];
@@ -225,7 +222,7 @@ mod sql {
Ok(r)
}
- pub fn find_by_256(conn: &Connection, sha256: &str) -> IndexResult<Vec<ChunkId>> {
+ pub fn find_by_256(conn: &Connection, sha256: &str) -> Result<Vec<ChunkId>, IndexError> {
let mut stmt = conn.prepare("SELECT id FROM chunks WHERE sha256 IS ?1")?;
let iter = stmt.query_map(params![sha256], |row| row_to_id(row))?;
let mut ids = vec![];
@@ -236,7 +233,7 @@ mod sql {
Ok(ids)
}
- pub fn find_generations(conn: &Connection) -> IndexResult<Vec<ChunkId>> {
+ pub fn find_generations(conn: &Connection) -> Result<Vec<ChunkId>, IndexError> {
let mut stmt = conn.prepare("SELECT id FROM chunks WHERE generation IS 1")?;
let iter = stmt.query_map(params![], |row| row_to_id(row))?;
let mut ids = vec![];
@@ -247,7 +244,7 @@ mod sql {
Ok(ids)
}
- pub fn find_chunk_ids(conn: &Connection) -> IndexResult<Vec<ChunkId>> {
+ pub fn find_chunk_ids(conn: &Connection) -> Result<Vec<ChunkId>, IndexError> {
let mut stmt = conn.prepare("SELECT id FROM chunks WHERE generation IS 0")?;
let iter = stmt.query_map(params![], |row| row_to_id(row))?;
let mut ids = vec![];
diff --git a/src/indexedstore.rs b/src/indexedstore.rs
index b05cfba..c77b552 100644
--- a/src/indexedstore.rs
+++ b/src/indexedstore.rs
@@ -29,45 +29,42 @@ pub enum IndexedError {
SqlError(#[from] StoreError),
}
-/// A result from an `Index` operation.
-pub type IndexedResult<T> = Result<T, IndexedError>;
-
impl IndexedStore {
- pub fn new(dirname: &Path) -> IndexedResult<Self> {
+ pub fn new(dirname: &Path) -> Result<Self, IndexedError> {
let store = Store::new(dirname);
let index = Index::new(dirname)?;
Ok(Self { store, index })
}
- pub fn save(&mut self, chunk: &DataChunk) -> IndexedResult<ChunkId> {
+ pub fn save(&mut self, chunk: &DataChunk) -> Result<ChunkId, IndexedError> {
let id = ChunkId::new();
self.store.save(&id, chunk)?;
self.insert_meta(&id, chunk.meta())?;
Ok(id)
}
- fn insert_meta(&mut self, id: &ChunkId, meta: &ChunkMeta) -> IndexedResult<()> {
+ fn insert_meta(&mut self, id: &ChunkId, meta: &ChunkMeta) -> Result<(), IndexedError> {
self.index.insert_meta(id.clone(), meta.clone())?;
Ok(())
}
- pub fn load(&self, id: &ChunkId) -> IndexedResult<(DataChunk, ChunkMeta)> {
+ pub fn load(&self, id: &ChunkId) -> Result<(DataChunk, ChunkMeta), IndexedError> {
Ok((self.store.load(id)?, self.load_meta(id)?))
}
- pub fn load_meta(&self, id: &ChunkId) -> IndexedResult<ChunkMeta> {
+ pub fn load_meta(&self, id: &ChunkId) -> Result<ChunkMeta, IndexedError> {
Ok(self.index.get_meta(id)?)
}
- pub fn find_by_sha256(&self, sha256: &str) -> IndexedResult<Vec<ChunkId>> {
+ pub fn find_by_sha256(&self, sha256: &str) -> Result<Vec<ChunkId>, IndexedError> {
Ok(self.index.find_by_sha256(sha256)?)
}
- pub fn find_generations(&self) -> IndexedResult<Vec<ChunkId>> {
+ pub fn find_generations(&self) -> Result<Vec<ChunkId>, IndexedError> {
Ok(self.index.find_generations()?)
}
- pub fn remove(&mut self, id: &ChunkId) -> IndexedResult<()> {
+ pub fn remove(&mut self, id: &ChunkId) -> Result<(), IndexedError> {
self.index.remove_meta(id)?;
self.store.delete(id)?;
Ok(())
diff --git a/src/store.rs b/src/store.rs
index bccecc7..830074e 100644
--- a/src/store.rs
+++ b/src/store.rs
@@ -14,9 +14,6 @@ pub struct Store {
/// An error from a `Store` operation.
pub type StoreError = std::io::Error;
-/// A result from an `Store` operation.
-pub type StoreResult<T> = Result<T, StoreError>;
-
impl Store {
/// Create a new Store to represent on-disk storage of chunks.x
pub fn new(dir: &Path) -> Self {
@@ -42,7 +39,7 @@ impl Store {
}
/// Save a chunk into a store.
- pub fn save(&self, id: &ChunkId, chunk: &DataChunk) -> StoreResult<()> {
+ pub fn save(&self, id: &ChunkId, chunk: &DataChunk) -> Result<(), StoreError> {
let (dir, metaname, dataname) = &self.filenames(id);
if !dir.exists() {
@@ -55,7 +52,7 @@ impl Store {
}
/// Load a chunk from a store.
- pub fn load(&self, id: &ChunkId) -> StoreResult<DataChunk> {
+ pub fn load(&self, id: &ChunkId) -> Result<DataChunk, StoreError> {
let (_, metaname, dataname) = &self.filenames(id);
let meta = std::fs::read(&metaname)?;
let meta = serde_json::from_slice(&meta)?;
@@ -66,7 +63,7 @@ impl Store {
}
/// Delete a chunk from a store.
- pub fn delete(&self, id: &ChunkId) -> StoreResult<()> {
+ pub fn delete(&self, id: &ChunkId) -> Result<(), StoreError> {
let (_, metaname, dataname) = &self.filenames(id);
std::fs::remove_file(&metaname)?;
std::fs::remove_file(&dataname)?;