summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLars Wirzenius <liw@liw.fi>2021-12-31 09:00:21 +0200
committerLars Wirzenius <liw@liw.fi>2021-12-31 12:10:10 +0200
commitacf1ba3f8f1492b961c9a6eb09eb93e882f5eb3f (patch)
tree1ce0288f878cac71990bb01f358d6035f1626c92
parent686e87981db210fa443404c8473dfe7a3f39b241 (diff)
downloadobnam2-acf1ba3f8f1492b961c9a6eb09eb93e882f5eb3f.tar.gz
docs: add documentation comments to crate
Also, make it an error for a public symbol to not be documented. Sponsored-by: author
-rw-r--r--src/backup_progress.rs22
-rw-r--r--src/backup_reason.rs21
-rw-r--r--src/backup_run.rs26
-rw-r--r--src/benchmark.rs7
-rw-r--r--src/checksummer.rs11
-rw-r--r--src/chunk.rs29
-rw-r--r--src/chunker.rs9
-rw-r--r--src/chunkid.rs13
-rw-r--r--src/chunkmeta.rs2
-rw-r--r--src/cipher.rs27
-rw-r--r--src/client.rs38
-rw-r--r--src/cmd/backup.rs4
-rw-r--r--src/cmd/chunk.rs12
-rw-r--r--src/cmd/chunkify.rs13
-rw-r--r--src/cmd/gen_info.rs5
-rw-r--r--src/cmd/get_chunk.rs5
-rw-r--r--src/cmd/init.rs5
-rw-r--r--src/cmd/list.rs4
-rw-r--r--src/cmd/list_files.rs5
-rw-r--r--src/cmd/mod.rs2
-rw-r--r--src/cmd/resolve.rs5
-rw-r--r--src/cmd/restore.rs18
-rw-r--r--src/cmd/show_config.rs4
-rw-r--r--src/cmd/show_gen.rs5
-rw-r--r--src/config.rs23
-rw-r--r--src/engine.rs2
-rw-r--r--src/error.rs19
-rw-r--r--src/fsentry.rs26
-rw-r--r--src/fsiter.rs8
-rw-r--r--src/generation.rs61
-rw-r--r--src/genlist.rs12
-rw-r--r--src/index.rs19
-rw-r--r--src/indexedstore.rs10
-rw-r--r--src/lib.rs7
-rw-r--r--src/passwords.rs13
-rw-r--r--src/policy.rs5
-rw-r--r--src/server.rs24
-rw-r--r--src/store.rs2
-rw-r--r--src/workqueue.rs2
39 files changed, 508 insertions, 17 deletions
diff --git a/src/backup_progress.rs b/src/backup_progress.rs
index 30b6228..52430e4 100644
--- a/src/backup_progress.rs
+++ b/src/backup_progress.rs
@@ -1,12 +1,19 @@
+//! Progress bars for Obnam.
+
use crate::generation::GenId;
use indicatif::{ProgressBar, ProgressStyle};
use std::path::Path;
+/// A progress bar abstraction specific to backups.
+///
+/// The progress bar is different for initial and incremental backups,
+/// and for different phases of making a backup.
pub struct BackupProgress {
progress: ProgressBar,
}
impl BackupProgress {
+ /// Create a progress bar for an initial backup.
pub fn initial() -> Self {
let progress = if true {
ProgressBar::new(0)
@@ -26,6 +33,7 @@ impl BackupProgress {
Self { progress }
}
+ /// Create a progress bar for an incremental backup.
pub fn incremental() -> Self {
let progress = if true {
ProgressBar::new(0)
@@ -46,6 +54,7 @@ impl BackupProgress {
Self { progress }
}
+ /// Create a progress bar for uploading a new generation's metadata.
pub fn upload_generation() -> Self {
let progress = ProgressBar::new(0);
let parts = vec![
@@ -59,6 +68,8 @@ impl BackupProgress {
Self { progress }
}
+ /// Create a progress bar for downloading an existing generation's
+ /// metadata.
pub fn download_generation(gen_id: &GenId) -> Self {
let progress = ProgressBar::new(0);
let parts = vec!["{msg}", "elapsed: {elapsed}", "{spinner}"];
@@ -72,14 +83,21 @@ impl BackupProgress {
Self { progress }
}
+ /// Set the number of files that were in the previous generation.
+ ///
+ /// The new generation usually has about the same number of files,
+ /// so the progress bar can show progress for incremental backups
+ /// without having to count all the files that actually exist first.
pub fn files_in_previous_generation(&self, count: u64) {
self.progress.set_length(count);
}
+ /// Update progress bar about number of problems found during a backup.
pub fn found_problem(&self) {
self.progress.inc(1);
}
+ /// Update progress bar about number of actual files found.
pub fn found_live_file(&self, filename: &Path) {
self.progress.inc(1);
if self.progress.length() < self.progress.position() {
@@ -88,6 +106,10 @@ impl BackupProgress {
self.progress.set_message(format!("{}", filename.display()));
}
+ /// Tell progress bar it's finished.
+ ///
+ /// This will remove all traces of the progress bar from the
+ /// screen.
pub fn finish(&self) {
self.progress.set_length(self.progress.position());
self.progress.finish_and_clear();
diff --git a/src/backup_reason.rs b/src/backup_reason.rs
index 0a51556..590f470 100644
--- a/src/backup_reason.rs
+++ b/src/backup_reason.rs
@@ -1,19 +1,38 @@
+//! Why was a file backed up?
+
use rusqlite::types::ToSqlOutput;
use rusqlite::ToSql;
use std::fmt;
+/// Represent the reason a file is in a backup.
#[derive(Debug, Copy, Clone)]
pub enum Reason {
+ /// File was skipped for some reason, but carried over without changes.
Skipped,
+ /// File is new, compared to previous backup.
IsNew,
+ /// File has been changed, compared to previous backup,
Changed,
+ /// File has not been changed, compared to previous backup,
Unchanged,
+ /// There was an error looking up the file in the previous backup.
+ ///
+ /// File has been carried over without changes.
GenerationLookupError,
+ /// The was an error backing up the file.
+ ///
+ /// File has been carried over without changes.
FileError,
+ /// Reason is unknown.
+ ///
+ /// The previous backup had a reason that the current version of
+ /// Obnam doesn't recognize. The file has been carried over
+ /// without changes.
Unknown,
}
impl Reason {
+ /// Create a Reason from a string representation.
pub fn from(text: &str) -> Reason {
match text {
"skipped" => Reason::Skipped,
@@ -28,6 +47,7 @@ impl Reason {
}
impl ToSql for Reason {
+ /// Represent Reason as text for SQL.
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput> {
Ok(ToSqlOutput::Owned(rusqlite::types::Value::Text(format!(
"{}",
@@ -37,6 +57,7 @@ impl ToSql for Reason {
}
impl fmt::Display for Reason {
+ /// Represent Reason for display.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let reason = match self {
Reason::Skipped => "skipped",
diff --git a/src/backup_run.rs b/src/backup_run.rs
index 0853bdf..ade5ee0 100644
--- a/src/backup_run.rs
+++ b/src/backup_run.rs
@@ -1,3 +1,5 @@
+//! Run one backup.
+
use crate::backup_progress::BackupProgress;
use crate::backup_reason::Reason;
use crate::chunk::{GenerationChunk, GenerationChunkError};
@@ -20,6 +22,7 @@ use std::path::{Path, PathBuf};
const SQLITE_CHUNK_SIZE: usize = MIB as usize;
+/// A running backup.
pub struct BackupRun<'a> {
client: &'a AsyncBackupClient,
policy: BackupPolicy,
@@ -27,41 +30,57 @@ pub struct BackupRun<'a> {
progress: Option<BackupProgress>,
}
+/// Possible errors that can occur during a backup.
#[derive(Debug, thiserror::Error)]
pub enum BackupError {
+ /// An error from communicating with the server.
#[error(transparent)]
ClientError(#[from] ClientError),
+ /// An error iterating over a directory tree.
#[error(transparent)]
FsIterError(#[from] FsIterError),
+ /// An error from creating a new backup's metadata.
#[error(transparent)]
NascentError(#[from] NascentError),
+ /// An error using an existing backup's metadata.
#[error(transparent)]
LocalGenerationError(#[from] LocalGenerationError),
+ /// An error splitting data into chunks.
#[error(transparent)]
ChunkerError(#[from] ChunkerError),
+ /// A error splitting backup metadata into chunks.
#[error(transparent)]
GenerationChunkError(#[from] GenerationChunkError),
}
+/// The outcome of backing up a file system entry.
#[derive(Debug)]
pub struct FsEntryBackupOutcome {
+ /// The file system entry.
pub entry: FilesystemEntry,
+ /// The chunk identifiers for the file's content.
pub ids: Vec<ChunkId>,
+ /// Why this entry is added to the new backup.
pub reason: Reason,
+ /// Does this entry represent a cache directory?
pub is_cachedir_tag: bool,
}
+/// The outcome of backing up a backup root.
#[derive(Debug)]
struct OneRootBackupOutcome {
+ /// Any warnings (non-fatal errors) from backing up the backup root.
pub warnings: Vec<BackupError>,
+ /// New cache directories in this root.
pub new_cachedir_tags: Vec<PathBuf>,
}
+/// The outcome of a backup run.
#[derive(Debug)]
pub struct RootsBackupOutcome {
/// The number of backed up files.
@@ -75,6 +94,7 @@ pub struct RootsBackupOutcome {
}
impl<'a> BackupRun<'a> {
+ /// Create a new run for an initial backup.
pub fn initial(
config: &ClientConfig,
client: &'a AsyncBackupClient,
@@ -87,6 +107,7 @@ impl<'a> BackupRun<'a> {
})
}
+ /// Create a new run for an incremental backup.
pub fn incremental(
config: &ClientConfig,
client: &'a AsyncBackupClient,
@@ -99,6 +120,7 @@ impl<'a> BackupRun<'a> {
})
}
+ /// Start the backup run.
pub async fn start(
&mut self,
genid: Option<&GenId>,
@@ -135,12 +157,14 @@ impl<'a> BackupRun<'a> {
Ok(old)
}
+ /// Finish this backup run.
pub fn finish(&self) {
if let Some(progress) = &self.progress {
progress.finish();
}
}
+ /// Back up all the roots for this run.
pub async fn backup_roots(
&self,
config: &ClientConfig,
@@ -294,6 +318,7 @@ impl<'a> BackupRun<'a> {
}
}
+ /// Upload any file content for a file system entry.
pub async fn upload_filesystem_entry(
&self,
e: &FilesystemEntry,
@@ -312,6 +337,7 @@ impl<'a> BackupRun<'a> {
Ok(ids)
}
+ /// Upload the metadata for the backup of this run.
pub async fn upload_generation(
&self,
filename: &Path,
diff --git a/src/benchmark.rs b/src/benchmark.rs
index e5057ac..d2d9003 100644
--- a/src/benchmark.rs
+++ b/src/benchmark.rs
@@ -1,15 +1,20 @@
+//! Benchmark chunk generation.
+//!
+//! This is only for development.
+
use crate::checksummer::Checksum;
use crate::chunk::DataChunk;
use crate::chunkid::ChunkId;
use crate::chunkmeta::ChunkMeta;
-// Generate a desired number of empty data chunks with id and metadata.
+/// Generate a desired number of empty data chunks with id and metadata.
pub struct ChunkGenerator {
goal: u32,
next: u32,
}
impl ChunkGenerator {
+ /// Create a new ChunkGenerator.
pub fn new(goal: u32) -> Self {
Self { goal, next: 0 }
}
diff --git a/src/checksummer.rs b/src/checksummer.rs
index 18b8afb..50bce04 100644
--- a/src/checksummer.rs
+++ b/src/checksummer.rs
@@ -1,13 +1,22 @@
+//! Compute checksums of data.
+//!
+//! De-duplication of backed up data in Obnam relies on cryptographic
+//! checksums. They are implemented in this module. Note that Obnam
+//! does not aim to make these algorithms configurable, so only a very
+//! small number of carefully chosen algorithms are supported here.
+
use sha2::{Digest, Sha256};
use std::fmt;
/// A checksum of some data.
#[derive(Debug, Clone)]
pub enum Checksum {
+ /// A SHA256 checksum.
Sha256(String),
}
impl Checksum {
+ /// Compute a SHA256 checksum for a block of data.
pub fn sha256(data: &[u8]) -> Self {
let mut hasher = Sha256::new();
hasher.update(data);
@@ -15,12 +24,14 @@ impl Checksum {
Self::Sha256(format!("{:x}", hash))
}
+ /// Create a `Checksum` from a known, previously computed hash.
pub fn sha256_from_str_unchecked(hash: &str) -> Self {
Self::Sha256(hash.to_string())
}
}
impl fmt::Display for Checksum {
+ /// Format a checksum for display.
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let hash = match self {
Self::Sha256(hash) => hash,
diff --git a/src/chunk.rs b/src/chunk.rs
index 266d1a7..15e3288 100644
--- a/src/chunk.rs
+++ b/src/chunk.rs
@@ -1,15 +1,18 @@
+//! Chunks of data.
+
use crate::checksummer::Checksum;
use crate::chunkid::ChunkId;
use crate::chunkmeta::ChunkMeta;
use serde::{Deserialize, Serialize};
use std::default::Default;
-/// Store an arbitrary chunk of data.
-///
-/// The data is just arbitrary binary data.
+/// An arbitrary chunk of arbitrary binary data.
///
/// A chunk also contains its associated metadata, except its
-/// identifier.
+/// identifier, so that it's easy to keep the data and metadata
+/// together. The identifier is used to find the chunk, and it's
+/// assigned by the server when the chunk is uploaded, so it's not
+/// stored in the chunk itself.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DataChunk {
data: Vec<u8>,
@@ -17,7 +20,7 @@ pub struct DataChunk {
}
impl DataChunk {
- /// Construct a new chunk.
+ /// Create a new chunk.
pub fn new(data: Vec<u8>, meta: ChunkMeta) -> Self {
Self { data, meta }
}
@@ -33,6 +36,13 @@ impl DataChunk {
}
}
+/// A chunk representing a backup generation.
+///
+/// A generation chunk lists all the data chunks for the SQLite file
+/// with the backup's metadata. It's different from a normal data
+/// chunk so that we can do things that make no sense to a data chunk.
+/// Generation chunks can be converted into or created from data
+/// chunks, for uploading to or downloading from the server.
#[derive(Default, Debug, Serialize, Deserialize)]
pub struct GenerationChunk {
chunk_ids: Vec<ChunkId>,
@@ -41,39 +51,48 @@ pub struct GenerationChunk {
/// All the errors that may be returned for `GenerationChunk` operations.
#[derive(Debug, thiserror::Error)]
pub enum GenerationChunkError {
+ /// Error converting text from UTF8.
#[error(transparent)]
Utf8Error(#[from] std::str::Utf8Error),
+ /// Error parsing JSON as chunk metadata.
#[error("failed to parse JSON: {0}")]
JsonParse(serde_json::Error),
+ /// Error generating JSON from chunk metadata.
#[error("failed to serialize to JSON: {0}")]
JsonGenerate(serde_json::Error),
}
impl GenerationChunk {
+ /// Create a new backup generation chunk from metadata chunk ids.
pub fn new(chunk_ids: Vec<ChunkId>) -> Self {
Self { chunk_ids }
}
+ /// Create a new backup generation chunk from a data chunk.
pub fn from_data_chunk(chunk: &DataChunk) -> Result<Self, GenerationChunkError> {
let data = chunk.data();
let data = std::str::from_utf8(data)?;
serde_json::from_str(data).map_err(GenerationChunkError::JsonParse)
}
+ /// Does the generation chunk contain any metadata chunks?
pub fn is_empty(&self) -> bool {
self.chunk_ids.is_empty()
}
+ /// How many metadata chunks does generation chunk contain?
pub fn len(&self) -> usize {
self.chunk_ids.len()
}
+ /// Return iterator over the metadata chunk identifiers.
pub fn chunk_ids(&self) -> impl Iterator<Item = &ChunkId> {
self.chunk_ids.iter()
}
+ /// Convert generation chunk to a data chunk.
pub fn to_data_chunk(&self, ended: &str) -> Result<DataChunk, GenerationChunkError> {
let json: String =
serde_json::to_string(self).map_err(GenerationChunkError::JsonGenerate)?;
diff --git a/src/chunker.rs b/src/chunker.rs
index f096365..e8e31e1 100644
--- a/src/chunker.rs
+++ b/src/chunker.rs
@@ -1,9 +1,12 @@
+//! Split file data into chunks.
+
use crate::checksummer::Checksum;
use crate::chunk::DataChunk;
use crate::chunkmeta::ChunkMeta;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
+/// Iterator over chunks in a file.
pub struct Chunker {
chunk_size: usize,
buf: Vec<u8>,
@@ -11,13 +14,16 @@ pub struct Chunker {
handle: std::fs::File,
}
+/// Possible errors from data chunking.
#[derive(Debug, thiserror::Error)]
pub enum ChunkerError {
+ /// Error reading from a file.
#[error("failed to read file {0}: {1}")]
FileRead(PathBuf, std::io::Error),
}
impl Chunker {
+ /// Create new iterator.
pub fn new(chunk_size: usize, handle: std::fs::File, filename: &Path) -> Self {
let mut buf = vec![];
buf.resize(chunk_size, 0);
@@ -29,7 +35,7 @@ impl Chunker {
}
}
- pub fn read_chunk(&mut self) -> Result<Option<DataChunk>, ChunkerError> {
+ fn read_chunk(&mut self) -> Result<Option<DataChunk>, ChunkerError> {
let mut used = 0;
loop {
@@ -58,6 +64,7 @@ impl Chunker {
impl Iterator for Chunker {
type Item = Result<DataChunk, ChunkerError>;
+ /// Return the next chunk, if any, or an error.
fn next(&mut self) -> Option<Result<DataChunk, ChunkerError>> {
match self.read_chunk() {
Ok(None) => None,
diff --git a/src/chunkid.rs b/src/chunkid.rs
index 39e3ee1..3534627 100644
--- a/src/chunkid.rs
+++ b/src/chunkid.rs
@@ -1,3 +1,8 @@
+//! The identifier for a chunk.
+//!
+//! Chunk identifiers are chosen by the server. Each chunk has a
+//! unique identifier, which isn't based on the contents of the chunk.
+
use crate::checksummer::Checksum;
use rusqlite::types::ToSqlOutput;
use rusqlite::ToSql;
@@ -37,21 +42,24 @@ impl ChunkId {
}
}
- /// Re-construct an identifier from a previous values.
+ /// Re-construct an identifier from a previous value.
pub fn recreate(s: &str) -> Self {
ChunkId { id: s.to_string() }
}
+ /// Return the identifier as a slice of bytes.
pub fn as_bytes(&self) -> &[u8] {
self.id.as_bytes()
}
+ /// Return the SHA256 checksum of the identifier.
pub fn sha256(&self) -> Checksum {
Checksum::sha256(self.id.as_bytes())
}
}
impl ToSql for ChunkId {
+ /// Format identifier for SQL.
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput> {
Ok(ToSqlOutput::Owned(rusqlite::types::Value::Text(
self.id.clone(),
@@ -69,12 +77,14 @@ impl fmt::Display for ChunkId {
}
impl From<&String> for ChunkId {
+ /// Create a chunk identifier from a string.
fn from(s: &String) -> Self {
ChunkId { id: s.to_string() }
}
}
impl From<&OsStr> for ChunkId {
+ /// Create a chunk identifier from an operating system string.
fn from(s: &OsStr) -> Self {
ChunkId {
id: s.to_string_lossy().to_string(),
@@ -85,6 +95,7 @@ impl From<&OsStr> for ChunkId {
impl FromStr for ChunkId {
type Err = ();
+ /// Create a chunk from a string.
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(ChunkId::recreate(s))
}
diff --git a/src/chunkmeta.rs b/src/chunkmeta.rs
index f8a8114..06a187b 100644
--- a/src/chunkmeta.rs
+++ b/src/chunkmeta.rs
@@ -1,3 +1,5 @@
+//! Metadata about a chunk.
+
use crate::checksummer::Checksum;
use serde::{Deserialize, Serialize};
use std::default::Default;
diff --git a/src/cipher.rs b/src/cipher.rs
index 04b2944..ee7fb8f 100644
--- a/src/cipher.rs
+++ b/src/cipher.rs
@@ -1,3 +1,5 @@
+//! Encryption cipher algorithms.
+
use crate::chunk::DataChunk;
use crate::chunkmeta::ChunkMeta;
use crate::passwords::Passwords;
@@ -10,30 +12,43 @@ use std::str::FromStr;
const CHUNK_V1: &[u8] = b"0001";
+/// An encrypted chunk.
+///
+/// This consists of encrypted ciphertext, and un-encrypted (or
+/// cleartext) additional associated data, which could be the metadata
+/// of the chunk, and be used to, for example, find chunks.
+///
+/// Encrypted chunks are the only chunks that can be uploaded to the
+/// server.
pub struct EncryptedChunk {
ciphertext: Vec<u8>,
aad: Vec<u8>,
}
impl EncryptedChunk {
+ /// Create an encrypted chunk.
fn new(ciphertext: Vec<u8>, aad: Vec<u8>) -> Self {
Self { ciphertext, aad }
}
+ /// Return the encrypted data.
pub fn ciphertext(&self) -> &[u8] {
&self.ciphertext
}
+ /// Return the cleartext associated additional data.
pub fn aad(&self) -> &[u8] {
&self.aad
}
}
+/// An engine for encrypting and decrypting chunks.
pub struct CipherEngine {
cipher: Aes256Gcm,
}
impl CipherEngine {
+ /// Create a new cipher engine using cleartext passwords.
pub fn new(pass: &Passwords) -> Self {
let key = GenericArray::from_slice(pass.encryption_key());
Self {
@@ -41,6 +56,7 @@ impl CipherEngine {
}
}
+ /// Encrypt a chunk.
pub fn encrypt_chunk(&self, chunk: &DataChunk) -> Result<EncryptedChunk, CipherError> {
// Payload with metadata as associated data, to be encrypted.
//
@@ -70,6 +86,7 @@ impl CipherEngine {
Ok(EncryptedChunk::new(vec, aad))
}
+ /// Decrypt a chunk.
pub fn decrypt_chunk(&self, bytes: &[u8], meta: &[u8]) -> Result<DataChunk, CipherError> {
// Does encrypted chunk start with the right version?
if !bytes.starts_with(CHUNK_V1) {
@@ -109,26 +126,36 @@ fn push_bytes(vec: &mut Vec<u8>, bytes: &[u8]) {
}
}
+/// Possible errors when encrypting or decrypting chunks.
#[derive(Debug, thiserror::Error)]
pub enum CipherError {
+ /// Encryption failed.
#[error("failed to encrypt with AES-GEM: {0}")]
EncryptError(aes_gcm::Error),
+ /// The encrypted chunk has an unsupported version or is
+ /// corrupted.
#[error("encrypted chunk does not start with correct version")]
UnknownChunkVersion,
+ /// The encrypted chunk lacks a complete nonce value, and is
+ /// probably corrupted.
#[error("encrypted chunk does not have a complete nonce")]
NoNonce,
+ /// Decryption failed.
#[error("failed to decrypt with AES-GEM: {0}")]
DecryptError(aes_gcm::Error),
+ /// The decryption succeeded, by data isn't valid YAML.
#[error("failed to parse decrypted data as a DataChunk: {0}")]
Parse(serde_yaml::Error),
+ /// Error parsing UTF8 data.
#[error(transparent)]
Utf8Error(#[from] std::str::Utf8Error),
+ /// Error parsing JSON data.
#[error("failed to parse JSON: {0}")]
JsonParse(#[from] serde_json::Error),
}
diff --git a/src/client.rs b/src/client.rs
index 5451dfb..ed6b86b 100644
--- a/src/client.rs
+++ b/src/client.rs
@@ -1,3 +1,5 @@
+//! Client to the Obnam server HTTP API.
+
use crate::chunk::{DataChunk, GenerationChunk, GenerationChunkError};
use crate::chunkid::ChunkId;
use crate::chunkmeta::ChunkMeta;
@@ -13,89 +15,118 @@ use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
+/// Possible errors when using the server API.
#[derive(Debug, thiserror::Error)]
pub enum ClientError {
+ /// No chunk id for uploaded chunk.
#[error("Server response claimed it had created a chunk, but lacked chunk id")]
NoCreatedChunkId,
+ /// Server claims to not have an entity.
#[error("Server does not have {0}")]
NotFound(String),
+ /// Server does not have a chunk.
#[error("Server does not have chunk {0}")]
ChunkNotFound(ChunkId),
+ /// Server does not have generation.
#[error("Server does not have generation {0}")]
GenerationNotFound(ChunkId),
+ /// Server didn't give us a chunk's metadata.
#[error("Server response did not have a 'chunk-meta' header for chunk {0}")]
NoChunkMeta(ChunkId),
+ /// Chunk has wrong checksum and may be corrupted.
#[error("Wrong checksum for chunk {0}, got {1}, expected {2}")]
WrongChecksum(ChunkId, String, String),
+ /// Client configuration is wrong.
#[error(transparent)]
ClientConfigError(#[from] ClientConfigError),
+ /// An error encrypting or decrypting chunks.
#[error(transparent)]
CipherError(#[from] CipherError),
+ /// An error regarding generation chunks.
#[error(transparent)]
GenerationChunkError(#[from] GenerationChunkError),
+ /// An error using a backup's local metadata.
#[error(transparent)]
LocalGenerationError(#[from] LocalGenerationError),
+ /// An error with the `chunk-meta` header.
#[error("couldn't convert response chunk-meta header to string: {0}")]
MetaHeaderToString(reqwest::header::ToStrError),
+ /// An error from the HTTP library.
#[error("error from reqwest library: {0}")]
ReqwestError(reqwest::Error),
+ /// Couldn't look up a chunk via checksum.
#[error("lookup by chunk checksum failed: {0}")]
ChunkExists(reqwest::Error),
+ /// Error parsing JSON.
#[error("failed to parse JSON: {0}")]
JsonParse(serde_json::Error),
+ /// Error generating JSON.
#[error("failed to generate JSON: {0}")]
JsonGenerate(serde_json::Error),
+ /// Error parsing YAML.
#[error("failed to parse YAML: {0}")]
YamlParse(serde_yaml::Error),
+ /// Failed to open a file.
#[error("failed to open file {0}: {1}")]
FileOpen(PathBuf, std::io::Error),
+ /// Failed to create a file.
#[error("failed to create file {0}: {1}")]
FileCreate(PathBuf, std::io::Error),
+ /// Failed to write a file.
#[error("failed to write to file {0}: {1}")]
FileWrite(PathBuf, std::io::Error),
}
+/// Client for the Obnam server HTTP API.
+///
+/// This is the async version.
pub struct AsyncBackupClient {
chunk_client: AsyncChunkClient,
}
impl AsyncBackupClient {
+ /// Create a new backup client.
pub fn new(config: &ClientConfig) -> Result<Self, ClientError> {
info!("creating backup client with config: {:#?}", config);
Ok(Self {
chunk_client: AsyncChunkClient::new(config)?,
})
}
+
+ /// Does the server have a chunk?
pub async fn has_chunk(&self, meta: &ChunkMeta) -> Result<Option<ChunkId>, ClientError> {
self.chunk_client.has_chunk(meta).await
}
+ /// Upload a data chunk to the srver.
pub async fn upload_chunk(&self, chunk: DataChunk) -> Result<ChunkId, ClientError> {
self.chunk_client.upload_chunk(chunk).await
}
+ /// List backup generations known by the server.
pub async fn list_generations(&self) -> Result<GenerationList, ClientError> {
self.chunk_client.list_generations().await
}
+ /// Fetch a data chunk from the server, given the chunk identifier.
pub async fn fetch_chunk(&self, chunk_id: &ChunkId) -> Result<DataChunk, ClientError> {
self.chunk_client.fetch_chunk(chunk_id).await
}
@@ -106,6 +137,7 @@ impl AsyncBackupClient {
Ok(gen)
}
+ /// Fetch a backup generation's metadata, given it's identifier.
pub async fn fetch_generation(
&self,
gen_id: &GenId,
@@ -129,6 +161,7 @@ impl AsyncBackupClient {
}
}
+/// Client for using chunk part of Obnam server HTTP API.
pub struct AsyncChunkClient {
client: reqwest::Client,
base_url: String,
@@ -136,6 +169,7 @@ pub struct AsyncChunkClient {
}
impl AsyncChunkClient {
+ /// Create a new chunk client.
pub fn new(config: &ClientConfig) -> Result<Self, ClientError> {
let pass = config.passwords()?;
@@ -158,6 +192,7 @@ impl AsyncChunkClient {
format!("{}/chunks", self.base_url())
}
+ /// Does server have a chunk?
pub async fn has_chunk(&self, meta: &ChunkMeta) -> Result<Option<ChunkId>, ClientError> {
let body = match self.get("", &[("sha256", meta.sha256())]).await {
Ok((_, body)) => body,
@@ -176,6 +211,7 @@ impl AsyncChunkClient {
Ok(has)
}
+ /// Upload a new chunk to the server.
pub async fn upload_chunk(&self, chunk: DataChunk) -> Result<ChunkId, ClientError> {
let enc = self.cipher.encrypt_chunk(&chunk)?;
let res = self
@@ -198,6 +234,7 @@ impl AsyncChunkClient {
Ok(chunk_id)
}
+ /// List all generation chunks on the server.
pub async fn list_generations(&self) -> Result<GenerationList, ClientError> {
let (_, body) = self.get("", &[("generation", "true")]).await?;
@@ -211,6 +248,7 @@ impl AsyncChunkClient {
Ok(GenerationList::new(finished))
}
+ /// Fetch a chunk from the server, given its id.
pub async fn fetch_chunk(&self, chunk_id: &ChunkId) -> Result<DataChunk, ClientError> {
let (headers, body) = self.get(&format!("/{}", chunk_id), &[]).await?;
let meta = self.get_chunk_meta_header(chunk_id, &headers)?;
diff --git a/src/cmd/backup.rs b/src/cmd/backup.rs
index 8f3d6d5..6e09d37 100644
--- a/src/cmd/backup.rs
+++ b/src/cmd/backup.rs
@@ -1,3 +1,5 @@
+//! The `backup` subcommand.
+
use crate::backup_run::BackupRun;
use crate::client::AsyncBackupClient;
use crate::config::ClientConfig;
@@ -10,10 +12,12 @@ use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
+/// Make a backup.
#[derive(Debug, StructOpt)]
pub struct Backup {}
impl Backup {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
diff --git a/src/cmd/chunk.rs b/src/cmd/chunk.rs
index e0e91b1..445d23f 100644
--- a/src/cmd/chunk.rs
+++ b/src/cmd/chunk.rs
@@ -1,3 +1,5 @@
+//! The `encrypt-chunk` and `decrypt-chunk` subcommands.
+
use crate::chunk::DataChunk;
use crate::chunkmeta::ChunkMeta;
use crate::cipher::CipherEngine;
@@ -6,19 +8,24 @@ use crate::error::ObnamError;
use std::path::PathBuf;
use structopt::StructOpt;
+/// Encrypt a chunk.
#[derive(Debug, StructOpt)]
pub struct EncryptChunk {
+ /// The name of the file containing the cleartext chunk.
#[structopt(parse(from_os_str))]
filename: PathBuf,
+ /// Name of file where to write the encrypted chunk.
#[structopt(parse(from_os_str))]
output: PathBuf,
+ /// Chunk metadata as JSON.
#[structopt()]
json: String,
}
impl EncryptChunk {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let pass = config.passwords()?;
let cipher = CipherEngine::new(&pass);
@@ -35,19 +42,24 @@ impl EncryptChunk {
}
}
+/// Decrypt a chunk.
#[derive(Debug, StructOpt)]
pub struct DecryptChunk {
+ /// Name of file containing encrypted chunk.
#[structopt(parse(from_os_str))]
filename: PathBuf,
+ /// Name of file where to write the cleartext chunk.
#[structopt(parse(from_os_str))]
output: PathBuf,
+ /// Chunk metadata as JSON.
#[structopt()]
json: String,
}
impl DecryptChunk {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let pass = config.passwords()?;
let cipher = CipherEngine::new(&pass);
diff --git a/src/cmd/chunkify.rs b/src/cmd/chunkify.rs
index 79fbdea..e2ce05f 100644
--- a/src/cmd/chunkify.rs
+++ b/src/cmd/chunkify.rs
@@ -1,3 +1,5 @@
+//! The `chunkify` subcommand.
+
use crate::config::ClientConfig;
use crate::engine::Engine;
use crate::error::ObnamError;
@@ -15,18 +17,21 @@ use tokio::sync::mpsc;
// checksums.
const Q: usize = 8;
+/// Split files into chunks and show their metadata.
#[derive(Debug, StructOpt)]
pub struct Chunkify {
+ /// Names of files to split into chunks.
filenames: Vec<PathBuf>,
}
impl Chunkify {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
}
- pub async fn run_async(&self, config: &ClientConfig) -> Result<(), ObnamError> {
+ async fn run_async(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let mut q = WorkQueue::new(Q);
for filename in self.filenames.iter() {
tokio::spawn(split_file(
@@ -51,21 +56,21 @@ impl Chunkify {
}
#[derive(Debug, Clone)]
-pub struct Chunk {
+struct Chunk {
filename: PathBuf,
offset: u64,
data: Vec<u8>,
}
#[derive(Debug, Clone, Serialize)]
-pub struct Checksum {
+struct Checksum {
filename: PathBuf,
offset: u64,
pub len: u64,
checksum: String,
}
-pub async fn split_file(filename: PathBuf, chunk_size: usize, tx: mpsc::Sender<Chunk>) {
+async fn split_file(filename: PathBuf, chunk_size: usize, tx: mpsc::Sender<Chunk>) {
// println!("split_file {}", filename.display());
let mut file = BufReader::new(File::open(&*filename).await.unwrap());
diff --git a/src/cmd/gen_info.rs b/src/cmd/gen_info.rs
index 6d12bd8..2663d9b 100644
--- a/src/cmd/gen_info.rs
+++ b/src/cmd/gen_info.rs
@@ -1,3 +1,5 @@
+//! The `gen-info` subcommand.
+
use crate::client::AsyncBackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
@@ -6,13 +8,16 @@ use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
+/// Show metadata for a generation.
#[derive(Debug, StructOpt)]
pub struct GenInfo {
+ /// Reference of the generation.
#[structopt()]
gen_ref: String,
}
impl GenInfo {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
diff --git a/src/cmd/get_chunk.rs b/src/cmd/get_chunk.rs
index f574c99..905e997 100644
--- a/src/cmd/get_chunk.rs
+++ b/src/cmd/get_chunk.rs
@@ -1,3 +1,5 @@
+//! The `get-chunk` subcommand.
+
use crate::chunkid::ChunkId;
use crate::client::AsyncBackupClient;
use crate::config::ClientConfig;
@@ -6,13 +8,16 @@ use std::io::{stdout, Write};
use structopt::StructOpt;
use tokio::runtime::Runtime;
+/// Fetch a chunk from the server.
#[derive(Debug, StructOpt)]
pub struct GetChunk {
+ /// Identifier of chunk to fetch.
#[structopt()]
chunk_id: String,
}
impl GetChunk {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
diff --git a/src/cmd/init.rs b/src/cmd/init.rs
index 08060f7..8e555ca 100644
--- a/src/cmd/init.rs
+++ b/src/cmd/init.rs
@@ -1,3 +1,5 @@
+//! The `init` subcommand.
+
use crate::config::ClientConfig;
use crate::error::ObnamError;
use crate::passwords::{passwords_filename, Passwords};
@@ -5,13 +7,16 @@ use structopt::StructOpt;
const PROMPT: &str = "Obnam passphrase: ";
+/// Initialize client by setting passwords.
#[derive(Debug, StructOpt)]
pub struct Init {
+ /// Only for testing.
#[structopt(long)]
insecure_passphrase: Option<String>,
}
impl Init {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let passphrase = match &self.insecure_passphrase {
Some(x) => x.to_string(),
diff --git a/src/cmd/list.rs b/src/cmd/list.rs
index 691f2bf..6c58e30 100644
--- a/src/cmd/list.rs
+++ b/src/cmd/list.rs
@@ -1,13 +1,17 @@
+//! The `list` subcommand.
+
use crate::client::AsyncBackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
use structopt::StructOpt;
use tokio::runtime::Runtime;
+/// List generations on the server.
#[derive(Debug, StructOpt)]
pub struct List {}
impl List {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
diff --git a/src/cmd/list_files.rs b/src/cmd/list_files.rs
index bdec55b..888943e 100644
--- a/src/cmd/list_files.rs
+++ b/src/cmd/list_files.rs
@@ -1,3 +1,5 @@
+//! The `list-files` subcommand.
+
use crate::backup_reason::Reason;
use crate::client::AsyncBackupClient;
use crate::config::ClientConfig;
@@ -7,13 +9,16 @@ use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
+/// List files in a backup.
#[derive(Debug, StructOpt)]
pub struct ListFiles {
+ /// Reference to backup to list files in.
#[structopt(default_value = "latest")]
gen_id: String,
}
impl ListFiles {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs
index ee5efd9..5e5226f 100644
--- a/src/cmd/mod.rs
+++ b/src/cmd/mod.rs
@@ -1,3 +1,5 @@
+//! Subcommand implementations.
+
pub mod backup;
pub mod chunk;
pub mod chunkify;
diff --git a/src/cmd/resolve.rs b/src/cmd/resolve.rs
index 9b36445..cd08908 100644
--- a/src/cmd/resolve.rs
+++ b/src/cmd/resolve.rs
@@ -1,15 +1,20 @@
+//! The `resolve` subcommand.
+
use crate::client::AsyncBackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
use structopt::StructOpt;
use tokio::runtime::Runtime;
+/// Resolve a generation reference into a generation id.
#[derive(Debug, StructOpt)]
pub struct Resolve {
+ /// The generation reference.
generation: String,
}
impl Resolve {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
diff --git a/src/cmd/restore.rs b/src/cmd/restore.rs
index 9848caf..2a36986 100644
--- a/src/cmd/restore.rs
+++ b/src/cmd/restore.rs
@@ -1,3 +1,5 @@
+//! The `restore` subcommand.
+
use crate::backup_reason::Reason;
use crate::client::{AsyncBackupClient, ClientError};
use crate::config::ClientConfig;
@@ -19,16 +21,20 @@ use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
+/// Restore a backup.
#[derive(Debug, StructOpt)]
pub struct Restore {
+ /// Reference to generation to restore.
#[structopt()]
gen_id: String,
+ /// Path to directory where restored files are written.
#[structopt(parse(from_os_str))]
to: PathBuf,
}
impl Restore {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
@@ -75,38 +81,50 @@ impl Restore {
}
}
+/// Possible errors from restoring.
#[derive(Debug, thiserror::Error)]
pub enum RestoreError {
+ /// Failed to create a name pipe.
#[error("Could not create named pipe (FIFO) {0}")]
NamedPipeCreationError(PathBuf),
+ /// Error from HTTP client.
#[error(transparent)]
ClientError(#[from] ClientError),
+ /// Error from local generation.
#[error(transparent)]
LocalGenerationError(#[from] LocalGenerationError),
+ /// Error removing a prefix.
#[error(transparent)]
StripPrefixError(#[from] StripPrefixError),
+ /// Error creating a directory.
#[error("failed to create directory {0}: {1}")]
CreateDirs(PathBuf, std::io::Error),
+ /// Error creating a file.
#[error("failed to create file {0}: {1}")]
CreateFile(PathBuf, std::io::Error),
+ /// Error writing a file.
#[error("failed to write file {0}: {1}")]
WriteFile(PathBuf, std::io::Error),
+ /// Error creating a symbolic link.
#[error("failed to create symbolic link {0}: {1}")]
Symlink(PathBuf, std::io::Error),
+ /// Error creating a UNIX domain socket.
#[error("failed to create UNIX domain socket {0}: {1}")]
UnixBind(PathBuf, std::io::Error),
+ /// Error setting permissions.
#[error("failed to set permissions for {0}: {1}")]
Chmod(PathBuf, std::io::Error),
+ /// Error settting timestamp.
#[error("failed to set timestamp for {0}: {1}")]
SetTimestamp(PathBuf, std::io::Error),
}
diff --git a/src/cmd/show_config.rs b/src/cmd/show_config.rs
index 05e83c1..7ac52ec 100644
--- a/src/cmd/show_config.rs
+++ b/src/cmd/show_config.rs
@@ -1,11 +1,15 @@
+//! The `show-config` subcommand.
+
use crate::config::ClientConfig;
use crate::error::ObnamError;
use structopt::StructOpt;
+/// Show actual client configuration.
#[derive(Debug, StructOpt)]
pub struct ShowConfig {}
impl ShowConfig {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
println!("{}", serde_json::to_string_pretty(config)?);
Ok(())
diff --git a/src/cmd/show_gen.rs b/src/cmd/show_gen.rs
index fb7e1bd..6ec1203 100644
--- a/src/cmd/show_gen.rs
+++ b/src/cmd/show_gen.rs
@@ -1,3 +1,5 @@
+//! The `show-generation` subcommand.
+
use crate::client::AsyncBackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
@@ -7,13 +9,16 @@ use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
+/// Show information about a generation.
#[derive(Debug, StructOpt)]
pub struct ShowGeneration {
+ /// Reference to the generation. Defaults to latest.
#[structopt(default_value = "latest")]
gen_id: String,
}
impl ShowGeneration {
+ /// Run the command.
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let rt = Runtime::new()?;
rt.block_on(self.run_async(config))
diff --git a/src/config.rs b/src/config.rs
index 8f5d4d8..a9be716 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -1,3 +1,5 @@
+//! Client configuration.
+
use crate::passwords::{passwords_filename, PasswordError, Passwords};
use bytesize::MIB;
@@ -19,18 +21,29 @@ struct TentativeClientConfig {
exclude_cache_tag_directories: Option<bool>,
}
+/// Configuration for the Obnam client.
#[derive(Debug, Serialize, Clone)]
pub struct ClientConfig {
+ /// Name of configuration file.
pub filename: PathBuf,
+ /// URL of Obnam server.
pub server_url: String,
+ /// Should server's TLS certificate be verified using CA
+ /// signatures? Set to false, for self-signed certificates.
pub verify_tls_cert: bool,
+ /// Size of chunks when splitting files for backup.
pub chunk_size: usize,
+ /// Backup root directories.
pub roots: Vec<PathBuf>,
+ /// File where logs should be written.
pub log: PathBuf,
+ /// Should cache directories be excluded? Cache directories
+ /// contain a specially formatted CACHEDIR.TAG file.
pub exclude_cache_tag_directories: bool,
}
impl ClientConfig {
+ /// Read a client configuration from a file.
pub fn read(filename: &Path) -> Result<Self, ClientConfigError> {
trace!("read_config: filename={:?}", filename);
let config = std::fs::read_to_string(filename)
@@ -75,29 +88,39 @@ impl ClientConfig {
Ok(())
}
+ /// Read encryption passwords from a file.
+ ///
+ /// The password file is expected to be next to the configuration file.
pub fn passwords(&self) -> Result<Passwords, ClientConfigError> {
Passwords::load(&passwords_filename(&self.filename))
.map_err(ClientConfigError::PasswordsMissing)
}
}
+/// Possible errors from configuration files.
#[derive(Debug, thiserror::Error)]
pub enum ClientConfigError {
+ /// The configuration specifies the server URL as an empty string.
#[error("server_url is empty")]
ServerUrlIsEmpty,
+ /// The configuration does not specify any backup root directories.
#[error("No backup roots in config; at least one is needed")]
NoBackupRoot,
+ /// The server URL is not an https: one.
#[error("server URL doesn't use https: {0}")]
NotHttps(String),
+ /// There are no passwords stored.
#[error("No passwords are set: you may need to run 'obnam init': {0}")]
PasswordsMissing(PasswordError),
+ /// Error reading a configuation file.
#[error("failed to read configuration file {0}: {1}")]
Read(PathBuf, std::io::Error),
+ /// Error parsing configuration file as YAML.
#[error("failed to parse configuration file {0} as YAML: {1}")]
YamlParse(PathBuf, serde_yaml::Error),
}
diff --git a/src/engine.rs b/src/engine.rs
index 252d3c9..384f591 100644
--- a/src/engine.rs
+++ b/src/engine.rs
@@ -1,3 +1,5 @@
+//! Engine for doing CPU heavy work in the background.
+
use crate::workqueue::WorkQueue;
use futures::stream::{FuturesOrdered, StreamExt};
use tokio::select;
diff --git a/src/error.rs b/src/error.rs
index 30571ec..e8f5ee8 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -1,3 +1,5 @@
+//! Errors from Obnam client.
+
use crate::backup_run::BackupError;
use crate::cipher::CipherError;
use crate::client::ClientError;
@@ -12,47 +14,64 @@ use tempfile::PersistError;
/// Define all the kinds of errors that functions corresponding to
/// subcommands of the main program can return.
+///
+/// This collects all kinds of errors the Obnam client may get, for
+/// convenience.
#[derive(Debug, thiserror::Error)]
pub enum ObnamError {
+ /// Error listing generations on server.
#[error(transparent)]
GenerationListError(#[from] GenerationListError),
+ /// Error saving passwords.
#[error("couldn't save passwords to {0}: {1}")]
PasswordSave(PathBuf, PasswordError),
+ /// Error using server HTTP API.
#[error(transparent)]
ClientError(#[from] ClientError),
+ /// Error in client configuration.
#[error(transparent)]
ClientConfigError(#[from] ClientConfigError),
+ /// Error making a backup.
#[error(transparent)]
BackupError(#[from] BackupError),
+ /// Error making a new backup generation.
#[error(transparent)]
NascentError(#[from] NascentError),
+ /// Error encrypting or decrypting.
#[error(transparent)]
CipherError(#[from] CipherError),
+ /// Error using local copy of existing backup generation.
#[error(transparent)]
LocalGenerationError(#[from] LocalGenerationError),
+ /// Error restoring a backup.
#[error(transparent)]
RestoreError(#[from] RestoreError),
+ /// Error making temporary file persistent.
#[error(transparent)]
PersistError(#[from] PersistError),
+ /// Error doing I/O.
#[error(transparent)]
IoError(#[from] std::io::Error),
+ /// Error reading system clock.
#[error(transparent)]
SystemTimeError(#[from] SystemTimeError),
+ /// Error regarding JSON.
#[error(transparent)]
SerdeJsonError(#[from] serde_json::Error),
+ /// Unexpected cache directories found.
#[error(
"found CACHEDIR.TAG files that aren't present in the previous backup, might be an attack"
)]
diff --git a/src/fsentry.rs b/src/fsentry.rs
index 8338cc2..6b29c4e 100644
--- a/src/fsentry.rs
+++ b/src/fsentry.rs
@@ -1,3 +1,5 @@
+//! An entry in the file system.
+
use log::{debug, error};
use serde::{Deserialize, Serialize};
use std::ffi::OsString;
@@ -52,17 +54,21 @@ pub struct FilesystemEntry {
group: String,
}
+/// Possible errors related to file system entries.
#[derive(Debug, thiserror::Error)]
pub enum FsEntryError {
+ /// File kind numeric representation is unknown.
#[error("Unknown file kind {0}")]
UnknownFileKindCode(u8),
+ /// Failed to read a symbolic link's target.
#[error("failed to read symbolic link target {0}: {1}")]
ReadLink(PathBuf, std::io::Error),
}
#[allow(clippy::len_without_is_empty)]
impl FilesystemEntry {
+ /// Create an `FsEntry` from a file's metadata.
pub fn from_metadata(path: &Path, meta: &Metadata) -> Result<Self, FsEntryError> {
let kind = FilesystemKind::from_file_type(meta.file_type());
let symlink_target = if kind == FilesystemKind::Symlink {
@@ -94,43 +100,53 @@ impl FilesystemEntry {
})
}
+ /// Return the kind of file the entry refers to.
pub fn kind(&self) -> FilesystemKind {
self.kind
}
+ /// Return full path to the entry.
pub fn pathbuf(&self) -> PathBuf {
let path = self.path.clone();
PathBuf::from(OsString::from_vec(path))
}
+ /// Return number of bytes for the entity represented by the entry.
pub fn len(&self) -> u64 {
self.len
}
+ /// Return the entry's mode bits.
pub fn mode(&self) -> u32 {
self.mode
}
+ /// Return the entry's access time, whole seconds.
pub fn atime(&self) -> i64 {
self.atime
}
+ /// Return the entry's access time, nanoseconds since the last full second.
pub fn atime_ns(&self) -> i64 {
self.atime_ns
}
+ /// Return the entry's modification time, whole seconds.
pub fn mtime(&self) -> i64 {
self.mtime
}
+ /// Return the entry's modification time, nanoseconds since the last full second.
pub fn mtime_ns(&self) -> i64 {
self.mtime_ns
}
+ /// Does the entry represent a directory?
pub fn is_dir(&self) -> bool {
self.kind() == FilesystemKind::Directory
}
+ /// Return target of the symlink the entry represents.
pub fn symlink_target(&self) -> Option<PathBuf> {
self.symlink_target.clone()
}
@@ -153,14 +169,20 @@ fn get_groupname(gid: u32) -> String {
/// Different types of file system entries.
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub enum FilesystemKind {
+ /// Regular file, including a hard link to one.
Regular,
+ /// A directory.
Directory,
+ /// A symbolic link.
Symlink,
+ /// A UNIX domain socket.
Socket,
+ /// A UNIX named pipe.
Fifo,
}
impl FilesystemKind {
+ /// Create a kind from a file type.
pub fn from_file_type(file_type: FileType) -> Self {
if file_type.is_file() {
FilesystemKind::Regular
@@ -177,6 +199,7 @@ impl FilesystemKind {
}
}
+ /// Represent a kind as a numeric code.
pub fn as_code(&self) -> u8 {
match self {
FilesystemKind::Regular => 0,
@@ -187,6 +210,7 @@ impl FilesystemKind {
}
}
+ /// Create a kind from a numeric code.
pub fn from_code(code: u8) -> Result<Self, FsEntryError> {
match code {
0 => Ok(FilesystemKind::Regular),
@@ -199,8 +223,10 @@ impl FilesystemKind {
}
}
+/// Possible errors from FileKind conversions.
#[derive(Debug, thiserror::Error)]
pub enum Error {
+ /// The code was unknown.
#[error("unknown file kind code {0}")]
UnknownFileKindCode(u8),
}
diff --git a/src/fsiter.rs b/src/fsiter.rs
index 2325793..2747cce 100644
--- a/src/fsiter.rs
+++ b/src/fsiter.rs
@@ -1,3 +1,5 @@
+//! Iterate over directory tree.
+
use crate::fsentry::{FilesystemEntry, FsEntryError};
use log::{debug, warn};
use std::path::{Path, PathBuf};
@@ -5,6 +7,7 @@ use walkdir::{DirEntry, IntoIter, WalkDir};
/// Filesystem entry along with additional info about it.
pub struct AnnotatedFsEntry {
+ /// The file system entry being annotated.
pub inner: FilesystemEntry,
/// Is `entry` a valid CACHEDIR.TAG?
pub is_cachedir_tag: bool,
@@ -15,19 +18,24 @@ pub struct FsIterator {
iter: SkipCachedirs,
}
+/// Possible errors from iterating over a directory tree.
#[derive(Debug, thiserror::Error)]
pub enum FsIterError {
+ /// Error from the walkdir crate.
#[error("walkdir failed: {0}")]
WalkDir(walkdir::Error),
+ /// Error reading a file's metadata.
#[error("failed to get file system metadata for {0}: {1}")]
Metadata(PathBuf, std::io::Error),
+ /// Error related to file system entries.
#[error(transparent)]
FsEntryError(#[from] FsEntryError),
}
impl FsIterator {
+ /// Create a new iterator.
pub fn new(root: &Path, exclude_cache_tag_directories: bool) -> Self {
Self {
iter: SkipCachedirs::new(
diff --git a/src/generation.rs b/src/generation.rs
index 71821d8..5560eaf 100644
--- a/src/generation.rs
+++ b/src/generation.rs
@@ -1,3 +1,5 @@
+//! Backup generations of various kinds.
+
use crate::backup_reason::Reason;
use crate::chunkid::ChunkId;
use crate::fsentry::FilesystemEntry;
@@ -23,10 +25,12 @@ pub struct GenId {
}
impl GenId {
+ /// Create a generation identifier from a chunk identifier.
pub fn from_chunk_id(id: ChunkId) -> Self {
Self { id }
}
+ /// Convert a generation identifier into a chunk identifier.
pub fn as_chunk_id(&self) -> &ChunkId {
&self.id
}
@@ -51,25 +55,32 @@ pub struct NascentGeneration {
fileno: FileId,
}
+/// Possible errors from nascent backup generations.
#[derive(Debug, thiserror::Error)]
pub enum NascentError {
+ /// Error backing up a backup root.
#[error("Could not back up a backup root directory: {0}: {1}")]
BackupRootFailed(PathBuf, crate::fsiter::FsIterError),
+ /// Error using a local generation.
#[error(transparent)]
LocalGenerationError(#[from] LocalGenerationError),
+ /// Error from an SQL transaction.
#[error("SQL transaction error: {0}")]
Transaction(rusqlite::Error),
+ /// Error from committing an SQL transaction.
#[error("SQL commit error: {0}")]
Commit(rusqlite::Error),
+ /// Error creating a temporary file.
#[error("Failed to create temporary file: {0}")]
TempFile(#[from] std::io::Error),
}
impl NascentGeneration {
+ /// Create a new nascent generation.
pub fn create<P>(filename: P) -> Result<Self, NascentError>
where
P: AsRef<Path>,
@@ -78,10 +89,12 @@ impl NascentGeneration {
Ok(Self { conn, fileno: 0 })
}
+ /// How many files are there now in the nascent generation?
pub fn file_count(&self) -> FileId {
self.fileno
}
+ /// Insert a new file system entry into a nascent generation.
pub fn insert(
&mut self,
e: FilesystemEntry,
@@ -97,9 +110,10 @@ impl NascentGeneration {
}
}
-/// A finished generation.
+/// A finished generation on the server.
///
-/// A generation is finished when it's on the server. It can be restored.
+/// A generation is finished when it's on the server. It can be
+/// fetched so it can be used as a [`LocalGeneration`].
#[derive(Debug, Clone)]
pub struct FinishedGeneration {
id: GenId,
@@ -107,6 +121,7 @@ pub struct FinishedGeneration {
}
impl FinishedGeneration {
+ /// Create a new finished generation.
pub fn new(id: &str, ended: &str) -> Self {
let id = GenId::from_chunk_id(id.parse().unwrap()); // this never fails
Self {
@@ -115,10 +130,12 @@ impl FinishedGeneration {
}
}
+ /// Get the generation's identifier.
pub fn id(&self) -> &GenId {
&self.id
}
+ /// When was generation finished?
pub fn ended(&self) -> &str {
&self.ended
}
@@ -132,30 +149,39 @@ pub struct LocalGeneration {
conn: Connection,
}
+/// Possible errors from using local generations.
#[derive(Debug, thiserror::Error)]
pub enum LocalGenerationError {
+ /// Duplicate file names.
#[error("Generation has more than one file with the name {0}")]
TooManyFiles(PathBuf),
+ /// No 'meta' table in generation.
#[error("Generation does not have a 'meta' table")]
NoMeta,
+ /// Missing from from 'meta' table.
#[error("Generation 'meta' table does not have a row {0}")]
NoMetaKey(String),
+ /// Bad data in 'meta' table.
#[error("Generation 'meta' row {0} has badly formed integer: {1}")]
BadMetaInteger(String, std::num::ParseIntError),
+ /// Error from SQL.
#[error(transparent)]
RusqliteError(#[from] rusqlite::Error),
+ /// Error from JSON.
#[error(transparent)]
SerdeJsonError(#[from] serde_json::Error),
+ /// Error from I/O.
#[error(transparent)]
IoError(#[from] std::io::Error),
}
+/// A backed up file in a local generation.
pub struct BackedUpFile {
fileno: FileId,
entry: FilesystemEntry,
@@ -163,6 +189,7 @@ pub struct BackedUpFile {
}
impl BackedUpFile {
+ /// Create a new `BackedUpFile`.
pub fn new(fileno: FileId, entry: FilesystemEntry, reason: &str) -> Self {
let reason = Reason::from(reason);
Self {
@@ -172,20 +199,24 @@ impl BackedUpFile {
}
}
+ /// Return id for file in its local generation.
pub fn fileno(&self) -> FileId {
self.fileno
}
+ /// Return file system entry for file.
pub fn entry(&self) -> &FilesystemEntry {
&self.entry
}
+ /// Return reason why file is in its local generation.
pub fn reason(&self) -> Reason {
self.reason
}
}
impl LocalGeneration {
+ /// Open a local file as a local generation.
pub fn open<P>(filename: P) -> Result<Self, LocalGenerationError>
where
P: AsRef<Path>,
@@ -194,19 +225,23 @@ impl LocalGeneration {
Ok(Self { conn })
}
+ /// Return generation metadata for local generation.
pub fn meta(&self) -> Result<GenMeta, LocalGenerationError> {
let map = sql::meta(&self.conn)?;
GenMeta::from(map)
}
+ /// How many files are there in the local generation?
pub fn file_count(&self) -> Result<i64, LocalGenerationError> {
sql::file_count(&self.conn)
}
+ /// Return all files in the local generation.
pub fn files(&self) -> Result<sql::SqlResults<BackedUpFile>, LocalGenerationError> {
sql::files(&self.conn)
}
+ /// Return ids for all chunks in local generation.
pub fn chunkids(
&self,
fileno: FileId,
@@ -214,6 +249,7 @@ impl LocalGeneration {
sql::chunkids(&self.conn, fileno)
}
+ /// Return entry for a file, given its pathname.
pub fn get_file(
&self,
filename: &Path,
@@ -221,16 +257,18 @@ impl LocalGeneration {
sql::get_file(&self.conn, filename)
}
+ /// Get the id in the local generation of a file, given its pathname.
pub fn get_fileno(&self, filename: &Path) -> Result<Option<FileId>, LocalGenerationError> {
sql::get_fileno(&self.conn, filename)
}
+ /// Does a pathname refer to a cache directory?
pub fn is_cachedir_tag(&self, filename: &Path) -> Result<bool, LocalGenerationError> {
sql::is_cachedir_tag(&self.conn, filename)
}
}
-/// Metadata about the generation.
+/// Metadata about the local generation.
#[derive(Debug, Serialize)]
pub struct GenMeta {
schema_version: SchemaVersion,
@@ -238,6 +276,7 @@ pub struct GenMeta {
}
impl GenMeta {
+ /// Create from a hash map.
fn from(mut map: HashMap<String, String>) -> Result<Self, LocalGenerationError> {
let major: u32 = metaint(&mut map, "schema_version_major")?;
let minor: u32 = metaint(&mut map, "schema_version_minor")?;
@@ -247,6 +286,7 @@ impl GenMeta {
})
}
+ /// Return schema version of local generation.
pub fn schema_version(&self) -> SchemaVersion {
self.schema_version
}
@@ -279,7 +319,9 @@ fn metaint(map: &mut HashMap<String, String>, key: &str) -> Result<u32, LocalGen
/// at all.
#[derive(Debug, Clone, Copy, Serialize)]
pub struct SchemaVersion {
+ /// Major version.
pub major: u32,
+ /// Minor version.
pub minor: u32,
}
@@ -304,6 +346,7 @@ mod sql {
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
+ /// Create a new database in a file.
pub fn create_db(filename: &Path) -> Result<Connection, LocalGenerationError> {
let flags = OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
@@ -335,6 +378,7 @@ mod sql {
Ok(())
}
+ /// Open an existing database in a file.
pub fn open_db(filename: &Path) -> Result<Connection, LocalGenerationError> {
let flags = OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
@@ -342,6 +386,7 @@ mod sql {
Ok(conn)
}
+ /// Return generation metadata from a database.
pub fn meta(conn: &Connection) -> Result<HashMap<String, String>, LocalGenerationError> {
let mut stmt = conn.prepare("SELECT key, value FROM meta")?;
let iter = stmt.query_map(params![], row_to_key_value)?;
@@ -359,6 +404,7 @@ mod sql {
Ok((key, value))
}
+ /// Insert one file system entry into the database.
pub fn insert_one(
t: &Transaction,
e: FilesystemEntry,
@@ -385,6 +431,7 @@ mod sql {
path.as_os_str().as_bytes().to_vec()
}
+ /// Parse an SQL query result row.
pub fn row_to_entry(row: &Row) -> rusqlite::Result<(FileId, String, String)> {
let fileno: FileId = row.get("fileno")?;
let json: String = row.get("json")?;
@@ -392,6 +439,7 @@ mod sql {
Ok((fileno, json, reason))
}
+ /// Count number of file system entries.
pub fn file_count(conn: &Connection) -> Result<FileId, LocalGenerationError> {
let mut stmt = conn.prepare("SELECT count(*) FROM files")?;
let mut iter = stmt.query_map(params![], |row| row.get(0))?;
@@ -435,6 +483,7 @@ mod sql {
-> Result<SqlResultsIterator<'stmt, ItemT>, LocalGenerationError>,
>;
+ /// Iterator of SQL results.
pub struct SqlResults<'conn, ItemT> {
stmt: Statement<'conn>,
create_iter: CreateIterFn<'conn, ItemT>,
@@ -450,11 +499,13 @@ mod sql {
Ok(Self { stmt, create_iter })
}
+ /// Create an iterator over results.
pub fn iter(&'_ mut self) -> Result<SqlResultsIterator<'_, ItemT>, LocalGenerationError> {
(self.create_iter)(&mut self.stmt)
}
}
+ /// Return all file system entries in database.
pub fn files(conn: &Connection) -> Result<SqlResults<BackedUpFile>, LocalGenerationError> {
SqlResults::new(
conn,
@@ -472,6 +523,7 @@ mod sql {
)
}
+ /// Return all chunk ids in database.
pub fn chunkids(
conn: &Connection,
fileno: FileId,
@@ -490,6 +542,7 @@ mod sql {
)
}
+ /// Get a file's information given its path.
pub fn get_file(
conn: &Connection,
filename: &Path,
@@ -500,6 +553,7 @@ mod sql {
}
}
+ /// Get a file's information given it's id in the database.
pub fn get_fileno(
conn: &Connection,
filename: &Path,
@@ -534,6 +588,7 @@ mod sql {
}
}
+ /// Does a path refer to a cache directory?
pub fn is_cachedir_tag(
conn: &Connection,
filename: &Path,
diff --git a/src/genlist.rs b/src/genlist.rs
index a81a997..3a0d81a 100644
--- a/src/genlist.rs
+++ b/src/genlist.rs
@@ -1,27 +1,39 @@
+//! A list of generations on the server.
+
use crate::chunkid::ChunkId;
use crate::generation::{FinishedGeneration, GenId};
+/// A list of generations on the server.
pub struct GenerationList {
list: Vec<FinishedGeneration>,
}
+/// Possible errors from listing generations.
#[derive(Debug, thiserror::Error)]
pub enum GenerationListError {
+ /// Server doesn't know about a generation.
#[error("Unknown generation: {0}")]
UnknownGeneration(ChunkId),
}
impl GenerationList {
+ /// Create a new list of generations.
pub fn new(gens: Vec<FinishedGeneration>) -> Self {
let mut list = gens;
list.sort_by_cached_key(|gen| gen.ended().to_string());
Self { list }
}
+ /// Return an iterator over the generations.
pub fn iter(&self) -> impl Iterator<Item = &FinishedGeneration> {
self.list.iter()
}
+ /// Resolve a symbolic name of a generation into its identifier.
+ ///
+ /// For example, "latest" refers to the latest backup, but needs
+ /// to be resolved into an actual, immutable id to actually be
+ /// restored.
pub fn resolve(&self, genref: &str) -> Result<GenId, GenerationListError> {
let gen = if self.list.is_empty() {
None
diff --git a/src/index.rs b/src/index.rs
index d76e4a3..b9d29a2 100644
--- a/src/index.rs
+++ b/src/index.rs
@@ -1,10 +1,12 @@
+//! An on-disk index of chunks for the server.
+
use crate::checksummer::Checksum;
use crate::chunkid::ChunkId;
use crate::chunkmeta::ChunkMeta;
use rusqlite::Connection;
use std::path::Path;
-/// A chunk index.
+/// A chunk index stored on the disk.
///
/// A chunk index lets the server quickly find chunks based on a
/// string key/value pair, or whether they are generations.
@@ -30,6 +32,7 @@ pub enum IndexError {
}
impl Index {
+ /// Create a new index.
pub fn new<P: AsRef<Path>>(dirname: P) -> Result<Self, IndexError> {
let filename = dirname.as_ref().join("meta.db");
let conn = if filename.exists() {
@@ -40,6 +43,7 @@ impl Index {
Ok(Self { conn })
}
+ /// Insert metadata for a new chunk into index.
pub fn insert_meta(&mut self, id: ChunkId, meta: ChunkMeta) -> Result<(), IndexError> {
let t = self.conn.transaction()?;
sql::insert(&t, &id, &meta)?;
@@ -47,22 +51,27 @@ impl Index {
Ok(())
}
+ /// Look up metadata for a chunk, given its id.
pub fn get_meta(&self, id: &ChunkId) -> Result<ChunkMeta, IndexError> {
sql::lookup(&self.conn, id)
}
+ /// Remove a chunk's metadata.
pub fn remove_meta(&mut self, id: &ChunkId) -> Result<(), IndexError> {
sql::remove(&self.conn, id)
}
+ /// Find chunks with a given checksum.
pub fn find_by_sha256(&self, sha256: &str) -> Result<Vec<ChunkId>, IndexError> {
sql::find_by_256(&self.conn, sha256)
}
+ /// Find all backup generations.
pub fn find_generations(&self) -> Result<Vec<ChunkId>, IndexError> {
sql::find_generations(&self.conn)
}
+ /// Find all chunks.
pub fn all_chunks(&self) -> Result<Vec<ChunkId>, IndexError> {
sql::find_chunk_ids(&self.conn)
}
@@ -156,6 +165,7 @@ mod sql {
use rusqlite::{params, Connection, OpenFlags, Row, Transaction};
use std::path::Path;
+ /// Create a database in a file.
pub fn create_db(filename: &Path) -> Result<Connection, IndexError> {
let flags = OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
@@ -172,6 +182,7 @@ mod sql {
Ok(conn)
}
+ /// Open an existing database in a file.
pub fn open_db(filename: &Path) -> Result<Connection, IndexError> {
let flags = OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
@@ -179,6 +190,7 @@ mod sql {
Ok(conn)
}
+ /// Insert a new chunk's metadata into database.
pub fn insert(t: &Transaction, chunkid: &ChunkId, meta: &ChunkMeta) -> Result<(), IndexError> {
let chunkid = format!("{}", chunkid);
let sha256 = meta.sha256();
@@ -191,11 +203,13 @@ mod sql {
Ok(())
}
+ /// Remove a chunk's metadata from the database.
pub fn remove(conn: &Connection, chunkid: &ChunkId) -> Result<(), IndexError> {
conn.execute("DELETE FROM chunks WHERE id IS ?1", params![chunkid])?;
Ok(())
}
+ /// Look up a chunk using its id.
pub fn lookup(conn: &Connection, id: &ChunkId) -> Result<ChunkMeta, IndexError> {
let mut stmt = conn.prepare("SELECT * FROM chunks WHERE id IS ?1")?;
let iter = stmt.query_map(params![id], row_to_meta)?;
@@ -217,6 +231,7 @@ mod sql {
Ok(r)
}
+ /// Find chunks with a given checksum.
pub fn find_by_256(conn: &Connection, sha256: &str) -> Result<Vec<ChunkId>, IndexError> {
let mut stmt = conn.prepare("SELECT id FROM chunks WHERE sha256 IS ?1")?;
let iter = stmt.query_map(params![sha256], row_to_id)?;
@@ -228,6 +243,7 @@ mod sql {
Ok(ids)
}
+ /// Find all generations.
pub fn find_generations(conn: &Connection) -> Result<Vec<ChunkId>, IndexError> {
let mut stmt = conn.prepare("SELECT id FROM chunks WHERE generation IS 1")?;
let iter = stmt.query_map(params![], row_to_id)?;
@@ -239,6 +255,7 @@ mod sql {
Ok(ids)
}
+ /// Find ids of all chunks.
pub fn find_chunk_ids(conn: &Connection) -> Result<Vec<ChunkId>, IndexError> {
let mut stmt = conn.prepare("SELECT id FROM chunks WHERE generation IS 0")?;
let iter = stmt.query_map(params![], row_to_id)?;
diff --git a/src/indexedstore.rs b/src/indexedstore.rs
index c77b552..49953ee 100644
--- a/src/indexedstore.rs
+++ b/src/indexedstore.rs
@@ -1,3 +1,5 @@
+//! An indexed, on-disk store for chunks on the server.
+
use crate::chunk::{DataChunk, GenerationChunkError};
use crate::chunkid::ChunkId;
use crate::chunkmeta::ChunkMeta;
@@ -21,6 +23,7 @@ pub enum IndexedError {
#[error(transparent)]
IndexError(#[from] IndexError),
+ /// Error regarding generation chunks.
#[error(transparent)]
GenerationChunkError(#[from] GenerationChunkError),
@@ -30,12 +33,14 @@ pub enum IndexedError {
}
impl IndexedStore {
+ /// Create a new indexed store.
pub fn new(dirname: &Path) -> Result<Self, IndexedError> {
let store = Store::new(dirname);
let index = Index::new(dirname)?;
Ok(Self { store, index })
}
+ /// Save a chunk in the store.
pub fn save(&mut self, chunk: &DataChunk) -> Result<ChunkId, IndexedError> {
let id = ChunkId::new();
self.store.save(&id, chunk)?;
@@ -48,22 +53,27 @@ impl IndexedStore {
Ok(())
}
+ /// Get a chunk from the store, given its id.
pub fn load(&self, id: &ChunkId) -> Result<(DataChunk, ChunkMeta), IndexedError> {
Ok((self.store.load(id)?, self.load_meta(id)?))
}
+ /// Get a chunk's metadata form the store, given its id.
pub fn load_meta(&self, id: &ChunkId) -> Result<ChunkMeta, IndexedError> {
Ok(self.index.get_meta(id)?)
}
+ /// Find chunks with a given checksum.
pub fn find_by_sha256(&self, sha256: &str) -> Result<Vec<ChunkId>, IndexedError> {
Ok(self.index.find_by_sha256(sha256)?)
}
+ /// Find all generations.
pub fn find_generations(&self) -> Result<Vec<ChunkId>, IndexedError> {
Ok(self.index.find_generations()?)
}
+ /// Remove a chunk from the store.
pub fn remove(&mut self, id: &ChunkId) -> Result<(), IndexedError> {
self.index.remove_meta(id)?;
self.store.delete(id)?;
diff --git a/src/lib.rs b/src/lib.rs
index 3e378f6..957ec13 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,3 +1,10 @@
+//! Encrypted backups.
+//!
+//! Obnam is a backup program that encrypts the backups. This crate
+//! provides access to all the functionality of Obnam as a library.
+
+#![deny(missing_docs)]
+
pub mod backup_progress;
pub mod backup_reason;
pub mod backup_run;
diff --git a/src/passwords.rs b/src/passwords.rs
index bc1a1d7..c448087 100644
--- a/src/passwords.rs
+++ b/src/passwords.rs
@@ -1,3 +1,5 @@
+//! Passwords for encryption.
+
use pbkdf2::{
password_hash::{PasswordHasher, SaltString},
Pbkdf2,
@@ -10,12 +12,14 @@ use std::path::{Path, PathBuf};
const KEY_LEN: usize = 32; // Only size accepted by aead crate?
+/// Encryption password.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Passwords {
encryption: String,
}
impl Passwords {
+ /// Create a new encryption password from a user-supplied passphrase.
pub fn new(passphrase: &str) -> Self {
let mut key = derive_password(passphrase);
let _ = key.split_off(KEY_LEN);
@@ -23,10 +27,12 @@ impl Passwords {
Self { encryption: key }
}
+ /// Get encryption key.
pub fn encryption_key(&self) -> &[u8] {
self.encryption.as_bytes()
}
+ /// Load passwords from file.
pub fn load(filename: &Path) -> Result<Self, PasswordError> {
let data = std::fs::read(filename)
.map_err(|err| PasswordError::Read(filename.to_path_buf(), err))?;
@@ -34,6 +40,7 @@ impl Passwords {
.map_err(|err| PasswordError::Parse(filename.to_path_buf(), err))
}
+ /// Save passwords to file.
pub fn save(&self, filename: &Path) -> Result<(), PasswordError> {
eprintln!("saving passwords to {:?}", filename);
@@ -60,6 +67,7 @@ impl Passwords {
}
}
+/// Return name of password file, relative to configuration file.
pub fn passwords_filename(config_filename: &Path) -> PathBuf {
let mut filename = config_filename.to_path_buf();
filename.set_file_name("passwords.yaml");
@@ -75,17 +83,22 @@ fn derive_password(passphrase: &str) -> String {
.to_string()
}
+/// Possible errors from passwords.
#[derive(Debug, thiserror::Error)]
pub enum PasswordError {
+ /// Failed to make YAML when saving passwords.
#[error("failed to serialize passwords for saving: {0}")]
Serialize(serde_yaml::Error),
+ /// Failed to save to file.
#[error("failed to save passwords to {0}: {1}")]
Write(PathBuf, std::io::Error),
+ /// Failed read passwords file.
#[error("failed to read passwords from {0}: {1}")]
Read(PathBuf, std::io::Error),
+ /// Failed to parse passwords file.
#[error("failed to parse saved passwords from {0}: {1}")]
Parse(PathBuf, serde_yaml::Error),
}
diff --git a/src/policy.rs b/src/policy.rs
index 39c73fc..9b66c1d 100644
--- a/src/policy.rs
+++ b/src/policy.rs
@@ -1,14 +1,18 @@
+//! Policy for what gets backed up.
+
use crate::backup_reason::Reason;
use crate::fsentry::FilesystemEntry;
use crate::generation::LocalGeneration;
use log::{debug, warn};
+/// Policy for what gets backed up.
pub struct BackupPolicy {
new: bool,
old_if_changed: bool,
}
impl BackupPolicy {
+ /// Create a default policy.
pub fn default() -> Self {
Self {
new: true,
@@ -16,6 +20,7 @@ impl BackupPolicy {
}
}
+ /// Does a given file need to be backed up?
pub fn needs_backup(&self, old: &LocalGeneration, new_entry: &FilesystemEntry) -> Reason {
let new_name = new_entry.pathbuf();
let reason = match old.get_file(&new_name) {
diff --git a/src/server.rs b/src/server.rs
index 26f67bd..31a03fc 100644
--- a/src/server.rs
+++ b/src/server.rs
@@ -1,3 +1,5 @@
+//! Stuff related to the Obnam chunk server.
+
use crate::chunk::DataChunk;
use crate::chunkid::ChunkId;
use crate::chunkmeta::ChunkMeta;
@@ -6,37 +8,50 @@ use std::collections::HashMap;
use std::default::Default;
use std::path::{Path, PathBuf};
+/// Server configuration.
#[derive(Debug, Deserialize, Clone)]
#[serde(deny_unknown_fields)]
pub struct ServerConfig {
+ /// Path to directory where chunks are stored.
pub chunks: PathBuf,
+ /// Address where server is to listen.
pub address: String,
+ /// Path to TLS key.
pub tls_key: PathBuf,
+ /// Path to TLS certificate.
pub tls_cert: PathBuf,
}
+/// Possible errors wittht server configuration.
#[derive(Debug, thiserror::Error)]
pub enum ServerConfigError {
+ /// The chunks directory doesn't exist.
#[error("Directory for chunks {0} does not exist")]
ChunksDirNotFound(PathBuf),
+ /// The TLS certificate doesn't exist.
#[error("TLS certificate {0} does not exist")]
TlsCertNotFound(PathBuf),
+ /// The TLS key doesn't exist.
#[error("TLS key {0} does not exist")]
TlsKeyNotFound(PathBuf),
+ /// Server address is wrong.
#[error("server address can't be resolved")]
BadServerAddress,
+ /// Failed to read configuration file.
#[error("failed to read configuration file {0}: {1}")]
Read(PathBuf, std::io::Error),
+ /// Failed to parse configuration file as YAML.
#[error("failed to parse configuration file as YAML: {0}")]
YamlParse(serde_yaml::Error),
}
impl ServerConfig {
+ /// Read, parse, and check the server configuration file.
pub fn read_config(filename: &Path) -> Result<Self, ServerConfigError> {
let config = match std::fs::read_to_string(filename) {
Ok(config) => config,
@@ -47,6 +62,7 @@ impl ServerConfig {
Ok(config)
}
+ /// Check the configuration.
pub fn check(&self) -> Result<(), ServerConfigError> {
if !self.chunks.exists() {
return Err(ServerConfigError::ChunksDirNotFound(self.chunks.clone()));
@@ -68,17 +84,18 @@ pub struct Created {
}
impl Created {
+ /// Create a new created chunk id.
pub fn new(id: ChunkId) -> Self {
Created { id }
}
+ /// Convert to JSON.
pub fn to_json(&self) -> String {
serde_json::to_string(&self).unwrap()
}
}
/// Result of retrieving a chunk.
-
#[derive(Debug, Serialize)]
pub struct Fetched {
id: ChunkId,
@@ -86,10 +103,12 @@ pub struct Fetched {
}
impl Fetched {
+ /// Create a new id for a fetched chunk.
pub fn new(id: ChunkId, chunk: DataChunk) -> Self {
Fetched { id, chunk }
}
+ /// Convert to JSON.
pub fn to_json(&self) -> String {
serde_json::to_string(&self).unwrap()
}
@@ -102,15 +121,18 @@ pub struct SearchHits {
}
impl SearchHits {
+ /// Insert a new chunk id to search results.
pub fn insert(&mut self, id: ChunkId, meta: ChunkMeta) {
self.map.insert(id.to_string(), meta);
}
+ /// Convert from JSON.
pub fn from_json(s: &str) -> Result<Self, serde_json::Error> {
let map = serde_json::from_str(s)?;
Ok(SearchHits { map })
}
+ /// Convert to JSON.
pub fn to_json(&self) -> String {
serde_json::to_string(&self.map).unwrap()
}
diff --git a/src/store.rs b/src/store.rs
index 830074e..4e85ba1 100644
--- a/src/store.rs
+++ b/src/store.rs
@@ -1,3 +1,5 @@
+//! Store chunks on-disk on server.
+
use crate::chunk::DataChunk;
use crate::chunkid::ChunkId;
use std::path::{Path, PathBuf};
diff --git a/src/workqueue.rs b/src/workqueue.rs
index 44ba5e4..6b3ce80 100644
--- a/src/workqueue.rs
+++ b/src/workqueue.rs
@@ -1,3 +1,5 @@
+//! A queue of work for [`crate::engine::Engine`].
+
use tokio::sync::mpsc;
/// A queue of work items.