summaryrefslogtreecommitdiff
path: root/src/generation.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/generation.rs')
-rw-r--r--src/generation.rs476
1 files changed, 294 insertions, 182 deletions
diff --git a/src/generation.rs b/src/generation.rs
index 8a15363..477edc0 100644
--- a/src/generation.rs
+++ b/src/generation.rs
@@ -1,11 +1,43 @@
+//! Backup generations of various kinds.
+
use crate::backup_reason::Reason;
use crate::chunkid::ChunkId;
+use crate::db::{DatabaseError, SqlResults};
+use crate::dbgen::{FileId, GenerationDb, GenerationDbError};
use crate::fsentry::FilesystemEntry;
-use rusqlite::Connection;
-use std::path::Path;
+use crate::genmeta::{GenerationMeta, GenerationMetaError};
+use crate::label::LabelChecksumKind;
+use crate::schema::{SchemaVersion, VersionComponent};
+use serde::Serialize;
+use std::fmt;
+use std::path::{Path, PathBuf};
+
+/// An identifier for a generation.
+#[derive(Debug, Clone, Serialize)]
+pub struct GenId {
+ id: ChunkId,
+}
+
+impl GenId {
+ /// Create a generation identifier from a chunk identifier.
+ pub fn from_chunk_id(id: ChunkId) -> Self {
+ Self { id }
+ }
-/// An identifier for a file in a generation.
-type FileId = i64;
+ /// Convert a generation identifier into a chunk identifier.
+ pub fn as_chunk_id(&self) -> &ChunkId {
+ &self.id
+ }
+}
+
+impl fmt::Display for GenId {
+ /// Format an identifier for display.
+ ///
+ /// The output can be parsed to re-created an identical identifier.
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", self.id)
+ }
+}
/// A nascent backup generation.
///
@@ -13,90 +45,103 @@ type FileId = i64;
/// finished yet, and it's not actually on the server until the upload
/// of its generation chunk.
pub struct NascentGeneration {
- conn: Connection,
+ db: GenerationDb,
fileno: FileId,
}
+/// Possible errors from nascent backup generations.
+#[derive(Debug, thiserror::Error)]
+pub enum NascentError {
+ /// Error backing up a backup root.
+ #[error("Could not back up a backup root directory: {0}: {1}")]
+ BackupRootFailed(PathBuf, crate::fsiter::FsIterError),
+
+ /// Error using a local generation.
+ #[error(transparent)]
+ LocalGenerationError(#[from] LocalGenerationError),
+
+ /// Error from a GenerationDb.
+ #[error(transparent)]
+ GenerationDb(#[from] GenerationDbError),
+
+ /// Error from an SQL transaction.
+ #[error("SQL transaction error: {0}")]
+ Transaction(rusqlite::Error),
+
+ /// Error from committing an SQL transaction.
+ #[error("SQL commit error: {0}")]
+ Commit(rusqlite::Error),
+
+ /// Error creating a temporary file.
+ #[error("Failed to create temporary file: {0}")]
+ TempFile(#[from] std::io::Error),
+}
+
impl NascentGeneration {
- pub fn create<P>(filename: P) -> anyhow::Result<Self>
+ /// Create a new nascent generation.
+ pub fn create<P>(
+ filename: P,
+ schema: SchemaVersion,
+ checksum_kind: LabelChecksumKind,
+ ) -> Result<Self, NascentError>
where
P: AsRef<Path>,
{
- let conn = sql::create_db(filename.as_ref())?;
- Ok(Self { conn, fileno: 0 })
+ let db = GenerationDb::create(filename.as_ref(), schema, checksum_kind)?;
+ Ok(Self { db, fileno: 0 })
+ }
+
+ /// Commit any changes, and close the database.
+ pub fn close(self) -> Result<(), NascentError> {
+ self.db.close().map_err(NascentError::GenerationDb)
}
+ /// How many files are there now in the nascent generation?
pub fn file_count(&self) -> FileId {
self.fileno
}
+ /// Insert a new file system entry into a nascent generation.
pub fn insert(
&mut self,
e: FilesystemEntry,
ids: &[ChunkId],
reason: Reason,
- ) -> anyhow::Result<()> {
- let t = self.conn.transaction()?;
+ is_cachedir_tag: bool,
+ ) -> Result<(), NascentError> {
self.fileno += 1;
- sql::insert_one(&t, e, self.fileno, ids, reason)?;
- t.commit()?;
+ self.db
+ .insert(e, self.fileno, ids, reason, is_cachedir_tag)?;
Ok(())
}
-
- pub fn insert_iter<'a>(
- &mut self,
- entries: impl Iterator<Item = anyhow::Result<(FilesystemEntry, Vec<ChunkId>, Reason)>>,
- ) -> anyhow::Result<()> {
- let t = self.conn.transaction()?;
- for r in entries {
- let (e, ids, reason) = r?;
- self.fileno += 1;
- sql::insert_one(&t, e, self.fileno, &ids[..], reason)?;
- }
- t.commit()?;
- Ok(())
- }
-}
-
-#[cfg(test)]
-mod test {
- use super::NascentGeneration;
- use tempfile::NamedTempFile;
-
- #[test]
- fn empty() {
- let filename = NamedTempFile::new().unwrap().path().to_path_buf();
- {
- let mut _gen = NascentGeneration::create(&filename).unwrap();
- // _gen is dropped here; the connection is close; the file
- // should not be removed.
- }
- assert!(filename.exists());
- }
}
-/// A finished generation.
+/// A finished generation on the server.
///
-/// A generation is finished when it's on the server. It can be restored.
+/// A generation is finished when it's on the server. It can be
+/// fetched so it can be used as a [`LocalGeneration`].
#[derive(Debug, Clone)]
pub struct FinishedGeneration {
- id: ChunkId,
+ id: GenId,
ended: String,
}
impl FinishedGeneration {
+ /// Create a new finished generation.
pub fn new(id: &str, ended: &str) -> Self {
- let id = id.parse().unwrap(); // this never fails
+ let id = GenId::from_chunk_id(id.parse().unwrap()); // this never fails
Self {
id,
ended: ended.to_string(),
}
}
- pub fn id(&self) -> ChunkId {
- self.id.clone()
+ /// Get the generation's identifier.
+ pub fn id(&self) -> &GenId {
+ &self.id
}
+ /// When was generation finished?
pub fn ended(&self) -> &str {
&self.ended
}
@@ -107,9 +152,51 @@ impl FinishedGeneration {
/// This is for querying an existing generation, and other read-only
/// operations.
pub struct LocalGeneration {
- conn: Connection,
+ db: GenerationDb,
+}
+
+/// Possible errors from using local generations.
+#[derive(Debug, thiserror::Error)]
+pub enum LocalGenerationError {
+ /// Duplicate file names.
+ #[error("Generation has more than one file with the name {0}")]
+ TooManyFiles(PathBuf),
+
+ /// No 'meta' table in generation.
+ #[error("Generation does not have a 'meta' table")]
+ NoMeta,
+
+ /// Local generation uses a schema version that this version of
+ /// Obnam isn't compatible with.
+ #[error("Backup is not compatible with this version of Obnam: {0}.{1}")]
+ Incompatible(VersionComponent, VersionComponent),
+
+ /// Error from generation metadata.
+ #[error(transparent)]
+ GenerationMeta(#[from] GenerationMetaError),
+
+ /// Error from SQL.
+ #[error(transparent)]
+ RusqliteError(#[from] rusqlite::Error),
+
+ /// Error from a GenerationDb.
+ #[error(transparent)]
+ GenerationDb(#[from] GenerationDbError),
+
+ /// Error from a Database.
+ #[error(transparent)]
+ Database(#[from] DatabaseError),
+
+ /// Error from JSON.
+ #[error(transparent)]
+ SerdeJsonError(#[from] serde_json::Error),
+
+ /// Error from I/O.
+ #[error(transparent)]
+ IoError(#[from] std::io::Error),
}
+/// A backed up file in a local generation.
pub struct BackedUpFile {
fileno: FileId,
entry: FilesystemEntry,
@@ -117,8 +204,8 @@ pub struct BackedUpFile {
}
impl BackedUpFile {
- pub fn new(fileno: FileId, entry: FilesystemEntry, reason: &str) -> Self {
- let reason = Reason::from_str(reason);
+ /// Create a new `BackedUpFile`.
+ pub fn new(fileno: FileId, entry: FilesystemEntry, reason: Reason) -> Self {
Self {
fileno,
entry,
@@ -126,179 +213,204 @@ impl BackedUpFile {
}
}
+ /// Return id for file in its local generation.
pub fn fileno(&self) -> FileId {
self.fileno
}
+ /// Return file system entry for file.
pub fn entry(&self) -> &FilesystemEntry {
&self.entry
}
+ /// Return reason why file is in its local generation.
pub fn reason(&self) -> Reason {
self.reason
}
}
impl LocalGeneration {
- pub fn open<P>(filename: P) -> anyhow::Result<Self>
+ fn new(db: GenerationDb) -> Self {
+ Self { db }
+ }
+
+ /// Open a local file as a local generation.
+ pub fn open<P>(filename: P) -> Result<Self, LocalGenerationError>
where
P: AsRef<Path>,
{
- let conn = sql::open_db(filename.as_ref())?;
- Ok(Self { conn })
+ let db = GenerationDb::open(filename.as_ref())?;
+ let gen = Self::new(db);
+ Ok(gen)
}
- pub fn file_count(&self) -> anyhow::Result<i64> {
- Ok(sql::file_count(&self.conn)?)
+ /// Return generation metadata for local generation.
+ pub fn meta(&self) -> Result<GenerationMeta, LocalGenerationError> {
+ let map = self.db.meta()?;
+ GenerationMeta::from(map).map_err(LocalGenerationError::GenerationMeta)
}
- pub fn files(&self) -> anyhow::Result<Vec<BackedUpFile>> {
- Ok(sql::files(&self.conn)?)
+ /// How many files are there in the local generation?
+ pub fn file_count(&self) -> Result<FileId, LocalGenerationError> {
+ Ok(self.db.file_count()?)
}
- pub fn chunkids(&self, fileno: FileId) -> anyhow::Result<Vec<ChunkId>> {
- Ok(sql::chunkids(&self.conn, fileno)?)
+ /// Return all files in the local generation.
+ pub fn files(
+ &self,
+ ) -> Result<SqlResults<(FileId, FilesystemEntry, Reason, bool)>, LocalGenerationError> {
+ self.db.files().map_err(LocalGenerationError::GenerationDb)
}
- pub fn get_file(&self, filename: &Path) -> anyhow::Result<Option<FilesystemEntry>> {
- Ok(sql::get_file(&self.conn, filename)?)
+ /// Return ids for all chunks in local generation.
+ pub fn chunkids(&self, fileid: FileId) -> Result<SqlResults<ChunkId>, LocalGenerationError> {
+ self.db
+ .chunkids(fileid)
+ .map_err(LocalGenerationError::GenerationDb)
}
- pub fn get_fileno(&self, filename: &Path) -> anyhow::Result<Option<FileId>> {
- Ok(sql::get_fileno(&self.conn, filename)?)
+ /// Return entry for a file, given its pathname.
+ pub fn get_file(
+ &self,
+ filename: &Path,
+ ) -> Result<Option<FilesystemEntry>, LocalGenerationError> {
+ self.db
+ .get_file(filename)
+ .map_err(LocalGenerationError::GenerationDb)
}
-}
-mod sql {
- use super::BackedUpFile;
- use super::FileId;
- use crate::backup_reason::Reason;
- use crate::chunkid::ChunkId;
- use crate::error::ObnamError;
- use crate::fsentry::FilesystemEntry;
- use rusqlite::{params, Connection, OpenFlags, Row, Transaction};
- use std::os::unix::ffi::OsStrExt;
- use std::path::Path;
-
- pub fn create_db(filename: &Path) -> anyhow::Result<Connection> {
- let flags = OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE;
- let conn = Connection::open_with_flags(filename, flags)?;
- conn.execute(
- "CREATE TABLE files (fileno INTEGER PRIMARY KEY, filename BLOB, json TEXT, reason TEXT)",
- params![],
- )?;
- conn.execute(
- "CREATE TABLE chunks (fileno INTEGER, chunkid TEXT)",
- params![],
- )?;
- conn.execute("CREATE INDEX filenames ON files (filename)", params![])?;
- conn.execute("CREATE INDEX filenos ON chunks (fileno)", params![])?;
- conn.pragma_update(None, "journal_mode", &"WAL")?;
- Ok(conn)
+ /// Get the id in the local generation of a file, given its pathname.
+ pub fn get_fileno(&self, filename: &Path) -> Result<Option<FileId>, LocalGenerationError> {
+ self.db
+ .get_fileno(filename)
+ .map_err(LocalGenerationError::GenerationDb)
}
- pub fn open_db(filename: &Path) -> anyhow::Result<Connection> {
- let flags = OpenFlags::SQLITE_OPEN_READ_WRITE;
- let conn = Connection::open_with_flags(filename, flags)?;
- conn.pragma_update(None, "journal_mode", &"WAL")?;
- Ok(conn)
- }
-
- pub fn insert_one(
- t: &Transaction,
- e: FilesystemEntry,
- fileno: FileId,
- ids: &[ChunkId],
- reason: Reason,
- ) -> anyhow::Result<()> {
- let json = serde_json::to_string(&e)?;
- t.execute(
- "INSERT INTO files (fileno, filename, json, reason) VALUES (?1, ?2, ?3, ?4)",
- params![fileno, path_into_blob(&e.pathbuf()), &json, reason,],
- )?;
- for id in ids {
- t.execute(
- "INSERT INTO chunks (fileno, chunkid) VALUES (?1, ?2)",
- params![fileno, id],
- )?;
- }
- Ok(())
+ /// Does a pathname refer to a cache directory?
+ pub fn is_cachedir_tag(&self, filename: &Path) -> Result<bool, LocalGenerationError> {
+ self.db
+ .is_cachedir_tag(filename)
+ .map_err(LocalGenerationError::GenerationDb)
}
+}
- fn path_into_blob(path: &Path) -> Vec<u8> {
- path.as_os_str().as_bytes().to_vec()
- }
+#[cfg(test)]
+mod test {
+ use super::{LabelChecksumKind, LocalGeneration, NascentGeneration, Reason, SchemaVersion};
+ use crate::fsentry::EntryBuilder;
+ use crate::fsentry::FilesystemKind;
+ use std::path::PathBuf;
+ use tempfile::{tempdir, NamedTempFile};
- pub fn row_to_entry(row: &Row) -> rusqlite::Result<(FileId, String, String)> {
- let fileno: FileId = row.get(row.column_index("fileno")?)?;
- let json: String = row.get(row.column_index("json")?)?;
- let reason: String = row.get(row.column_index("reason")?)?;
- Ok((fileno, json, reason))
- }
+ #[test]
+ fn round_trips_u64_max() {
+ let tmp = tempdir().unwrap();
+ let filename = tmp.path().join("test.db");
+ let path = PathBuf::from("/");
+ let schema = SchemaVersion::new(0, 0);
+ {
+ let e = EntryBuilder::new(FilesystemKind::Directory)
+ .path(path.clone())
+ .len(u64::MAX)
+ .build();
+ let mut gen =
+ NascentGeneration::create(&filename, schema, LabelChecksumKind::Sha256).unwrap();
+ gen.insert(e, &[], Reason::IsNew, false).unwrap();
+ gen.close().unwrap();
+ }
- pub fn file_count(conn: &Connection) -> anyhow::Result<FileId> {
- let mut stmt = conn.prepare("SELECT count(*) FROM files")?;
- let mut iter = stmt.query_map(params![], |row| row.get(0))?;
- let count = iter.next().expect("SQL count result (1)");
- let count = count?;
- Ok(count)
+ let db = LocalGeneration::open(&filename).unwrap();
+ let e = db.get_file(&path).unwrap().unwrap();
+ assert_eq!(e.len(), u64::MAX);
}
- pub fn files(conn: &Connection) -> anyhow::Result<Vec<BackedUpFile>> {
- let mut stmt = conn.prepare("SELECT * FROM files")?;
- let iter = stmt.query_map(params![], |row| row_to_entry(row))?;
- let mut files = vec![];
- for x in iter {
- let (fileno, json, reason) = x?;
- let entry = serde_json::from_str(&json)?;
- files.push(BackedUpFile::new(fileno, entry, &reason));
+ #[test]
+ fn empty() {
+ let filename = NamedTempFile::new().unwrap().path().to_path_buf();
+ let schema = SchemaVersion::new(0, 0);
+ {
+ let mut _gen =
+ NascentGeneration::create(&filename, schema, LabelChecksumKind::Sha256).unwrap();
+ // _gen is dropped here; the connection is close; the file
+ // should not be removed.
}
- Ok(files)
+ assert!(filename.exists());
}
- pub fn chunkids(conn: &Connection, fileno: FileId) -> anyhow::Result<Vec<ChunkId>> {
- let mut stmt = conn.prepare("SELECT chunkid FROM chunks WHERE fileno = ?1")?;
- let iter = stmt.query_map(params![fileno], |row| Ok(row.get(0)?))?;
- let mut ids: Vec<ChunkId> = vec![];
- for x in iter {
- let fileno: String = x?;
- ids.push(ChunkId::from(&fileno));
+ // FIXME: This is way too complicated a test function. It should
+ // be simplified, possibly by re-thinking the abstractions of the
+ // code it calls.
+ #[test]
+ fn remembers_cachedir_tags() {
+ use crate::{
+ backup_reason::Reason, backup_run::FsEntryBackupOutcome, fsentry::FilesystemEntry,
+ };
+ use std::{fs::metadata, path::Path};
+
+ // Create a `Metadata` structure to pass to other functions (we don't care about the
+ // contents)
+ let src_file = NamedTempFile::new().unwrap();
+ let metadata = metadata(src_file.path()).unwrap();
+
+ let dbfile = NamedTempFile::new().unwrap().path().to_path_buf();
+
+ let nontag_path1 = Path::new("/nontag1");
+ let nontag_path2 = Path::new("/dir/nontag2");
+ let tag_path1 = Path::new("/a_tag");
+ let tag_path2 = Path::new("/another_dir/a_tag");
+
+ let schema = SchemaVersion::new(0, 0);
+ let mut gen =
+ NascentGeneration::create(&dbfile, schema, LabelChecksumKind::Sha256).unwrap();
+ let mut cache = users::UsersCache::new();
+
+ gen.insert(
+ FilesystemEntry::from_metadata(nontag_path1, &metadata, &mut cache).unwrap(),
+ &[],
+ Reason::IsNew,
+ false,
+ )
+ .unwrap();
+ gen.insert(
+ FilesystemEntry::from_metadata(tag_path1, &metadata, &mut cache).unwrap(),
+ &[],
+ Reason::IsNew,
+ true,
+ )
+ .unwrap();
+
+ let entries = vec![
+ FsEntryBackupOutcome {
+ entry: FilesystemEntry::from_metadata(nontag_path2, &metadata, &mut cache).unwrap(),
+ ids: vec![],
+ reason: Reason::IsNew,
+ is_cachedir_tag: false,
+ },
+ FsEntryBackupOutcome {
+ entry: FilesystemEntry::from_metadata(tag_path2, &metadata, &mut cache).unwrap(),
+ ids: vec![],
+ reason: Reason::IsNew,
+ is_cachedir_tag: true,
+ },
+ ];
+
+ for o in entries {
+ gen.insert(o.entry, &o.ids, o.reason, o.is_cachedir_tag)
+ .unwrap();
}
- Ok(ids)
- }
- pub fn get_file(conn: &Connection, filename: &Path) -> anyhow::Result<Option<FilesystemEntry>> {
- match get_file_and_fileno(conn, filename)? {
- None => Ok(None),
- Some((_, e, _)) => Ok(Some(e)),
- }
- }
+ gen.close().unwrap();
- pub fn get_fileno(conn: &Connection, filename: &Path) -> anyhow::Result<Option<FileId>> {
- match get_file_and_fileno(conn, filename)? {
- None => Ok(None),
- Some((id, _, _)) => Ok(Some(id)),
- }
- }
+ let gen = LocalGeneration::open(dbfile).unwrap();
+ assert!(!gen.is_cachedir_tag(nontag_path1).unwrap());
+ assert!(!gen.is_cachedir_tag(nontag_path2).unwrap());
+ assert!(gen.is_cachedir_tag(tag_path1).unwrap());
+ assert!(gen.is_cachedir_tag(tag_path2).unwrap());
- fn get_file_and_fileno(
- conn: &Connection,
- filename: &Path,
- ) -> anyhow::Result<Option<(FileId, FilesystemEntry, String)>> {
- let mut stmt = conn.prepare("SELECT * FROM files WHERE filename = ?1")?;
- let mut iter =
- stmt.query_map(params![path_into_blob(filename)], |row| row_to_entry(row))?;
- match iter.next() {
- None => Ok(None),
- Some(Err(e)) => Err(e.into()),
- Some(Ok((fileno, json, reason))) => {
- let entry = serde_json::from_str(&json)?;
- if iter.next() == None {
- Ok(Some((fileno, entry, reason)))
- } else {
- Err(ObnamError::TooManyFiles(filename.to_path_buf()).into())
- }
- }
- }
+ // Nonexistent files are not cachedir tags
+ assert!(!gen.is_cachedir_tag(Path::new("/hello/world")).unwrap());
+ assert!(!gen
+ .is_cachedir_tag(Path::new("/different path/to/another file.txt"))
+ .unwrap());
}
}