summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backup_run.rs9
-rw-r--r--src/cmd/backup.rs23
-rw-r--r--src/dbgen.rs507
-rw-r--r--src/error.rs5
-rw-r--r--src/generation.rs151
5 files changed, 509 insertions, 186 deletions
diff --git a/src/backup_run.rs b/src/backup_run.rs
index 9454625..b03a7ee 100644
--- a/src/backup_run.rs
+++ b/src/backup_run.rs
@@ -8,7 +8,7 @@ use crate::chunkid::ChunkId;
use crate::client::{BackupClient, ClientError};
use crate::config::ClientConfig;
use crate::db::DatabaseError;
-use crate::dbgen::FileId;
+use crate::dbgen::{schema_version, FileId, DEFAULT_SCHEMA_MAJOR};
use crate::error::ObnamError;
use crate::fsentry::{FilesystemEntry, FilesystemKind};
use crate::fsiter::{AnnotatedFsEntry, FsIterError, FsIterator};
@@ -16,6 +16,7 @@ use crate::generation::{
GenId, LocalGeneration, LocalGenerationError, NascentError, NascentGeneration,
};
use crate::policy::BackupPolicy;
+use crate::schema::SchemaVersion;
use bytesize::MIB;
use chrono::{DateTime, Local};
@@ -132,7 +133,8 @@ impl<'a> BackupRun<'a> {
match genid {
None => {
// Create a new, empty generation.
- NascentGeneration::create(oldname)?.close()?;
+ let schema = schema_version(DEFAULT_SCHEMA_MAJOR).unwrap();
+ NascentGeneration::create(oldname, schema)?.close()?;
// Open the newly created empty generation.
Ok(LocalGeneration::open(oldname)?)
@@ -173,11 +175,12 @@ impl<'a> BackupRun<'a> {
config: &ClientConfig,
old: &LocalGeneration,
newpath: &Path,
+ schema: SchemaVersion,
) -> Result<RootsBackupOutcome, ObnamError> {
let mut warnings: Vec<BackupError> = vec![];
let mut new_cachedir_tags = vec![];
let files_count = {
- let mut new = NascentGeneration::create(newpath)?;
+ let mut new = NascentGeneration::create(newpath, schema)?;
for root in &config.roots {
match self.backup_one_root(config, old, &mut new, root).await {
Ok(mut o) => {
diff --git a/src/cmd/backup.rs b/src/cmd/backup.rs
index dae9811..51d0329 100644
--- a/src/cmd/backup.rs
+++ b/src/cmd/backup.rs
@@ -3,9 +3,10 @@
use crate::backup_run::BackupRun;
use crate::client::BackupClient;
use crate::config::ClientConfig;
-use crate::dbgen::FileId;
+use crate::dbgen::{schema_version, FileId, DEFAULT_SCHEMA_MAJOR};
use crate::error::ObnamError;
use crate::generation::GenId;
+use crate::schema::VersionComponent;
use log::info;
use std::time::SystemTime;
@@ -15,7 +16,11 @@ use tokio::runtime::Runtime;
/// Make a backup.
#[derive(Debug, StructOpt)]
-pub struct Backup {}
+pub struct Backup {
+ /// Backup schema major version.
+ #[structopt(long)]
+ backup_version: Option<VersionComponent>,
+}
impl Backup {
/// Run the command.
@@ -27,6 +32,10 @@ impl Backup {
async fn run_async(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let runtime = SystemTime::now();
+ let major = self.backup_version.or(Some(DEFAULT_SCHEMA_MAJOR)).unwrap();
+ let schema = schema_version(major)?;
+ eprintln!("backup: schema: {schema}");
+
let client = BackupClient::new(config)?;
let genlist = client.list_generations().await?;
@@ -39,13 +48,19 @@ impl Backup {
info!("fresh backup without a previous generation");
let mut run = BackupRun::initial(config, &client)?;
let old = run.start(None, &oldtemp).await?;
- (false, run.backup_roots(config, &old, &newtemp).await?)
+ (
+ false,
+ run.backup_roots(config, &old, &newtemp, schema).await?,
+ )
}
Ok(old_id) => {
info!("incremental backup based on {}", old_id);
let mut run = BackupRun::incremental(config, &client)?;
let old = run.start(Some(&old_id), &oldtemp).await?;
- (true, run.backup_roots(config, &old, &newtemp).await?)
+ (
+ true,
+ run.backup_roots(config, &old, &newtemp, schema).await?,
+ )
}
};
diff --git a/src/dbgen.rs b/src/dbgen.rs
index 7e54d7d..816ea11 100644
--- a/src/dbgen.rs
+++ b/src/dbgen.rs
@@ -4,16 +4,28 @@ use crate::backup_reason::Reason;
use crate::chunkid::ChunkId;
use crate::db::{Column, Database, DatabaseError, SqlResults, Table, Value};
use crate::fsentry::FilesystemEntry;
+use crate::genmeta::{GenerationMeta, GenerationMetaError};
+use crate::schema::{SchemaVersion, VersionComponent};
use log::error;
use std::collections::HashMap;
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
-/// Current generation database schema major version.
-pub const SCHEMA_MAJOR: u32 = 0;
+/// Return latest supported schema version for a supported major
+/// version.
+pub fn schema_version(major: VersionComponent) -> Result<SchemaVersion, GenerationDbError> {
+ match major {
+ 0 => Ok(SchemaVersion::new(0, 0)),
+ 1 => Ok(SchemaVersion::new(1, 0)),
+ _ => Err(GenerationDbError::Unsupported(major)),
+ }
+}
+
+/// Default database schema major version.a
+pub const DEFAULT_SCHEMA_MAJOR: VersionComponent = V0_0::MAJOR;
-/// Current generation database schema minor version.
-pub const SCHEMA_MINOR: u32 = 0;
+/// Major schema versions supported by this version of Obnam.
+pub const SCHEMA_MAJORS: &[VersionComponent] = &[0, 1];
/// An identifier for a file in a generation.
pub type FileId = u64;
@@ -37,15 +49,23 @@ pub enum GenerationDbError {
#[error("Generation 'meta' row {0} has badly formed integer: {1}")]
BadMetaInteger(String, std::num::ParseIntError),
+ /// A major schema version is unsupported.
+ #[error("Unsupported backup schema major version: {0}")]
+ Unsupported(VersionComponent),
+
/// Local generation uses a schema version that this version of
/// Obnam isn't compatible with.
#[error("Backup is not compatible with this version of Obnam: {0}.{1}")]
- Incompatible(u32, u32),
+ Incompatible(VersionComponent, VersionComponent),
/// Error from a database
#[error(transparent)]
Database(#[from] DatabaseError),
+ /// Error from generation metadata.
+ #[error(transparent)]
+ GenerationMeta(#[from] GenerationMetaError),
+
/// Error from JSON.
#[error(transparent)]
SerdeJsonError(#[from] serde_json::Error),
@@ -57,6 +77,157 @@ pub enum GenerationDbError {
/// A database representing a backup generation.
pub struct GenerationDb {
+ variant: GenerationDbVariant,
+}
+
+enum GenerationDbVariant {
+ V0_0(V0_0),
+ V1_0(V1_0),
+}
+
+impl GenerationDb {
+ /// Create a new generation database in read/write mode.
+ pub fn create<P: AsRef<Path>>(
+ filename: P,
+ schema: SchemaVersion,
+ ) -> Result<Self, GenerationDbError> {
+ let meta_table = Self::meta_table();
+ let variant = match schema.version() {
+ (V0_0::MAJOR, V0_0::MINOR) => {
+ GenerationDbVariant::V0_0(V0_0::create(filename, meta_table)?)
+ }
+ (V1_0::MAJOR, V1_0::MINOR) => {
+ GenerationDbVariant::V1_0(V1_0::create(filename, meta_table)?)
+ }
+ (major, minor) => return Err(GenerationDbError::Incompatible(major, minor)),
+ };
+ Ok(Self { variant })
+ }
+
+ /// Open an existing generation database in read-only mode.
+ pub fn open<P: AsRef<Path>>(filename: P) -> Result<Self, GenerationDbError> {
+ let filename = filename.as_ref();
+ let meta_table = Self::meta_table();
+ let schema = {
+ let plain_db = Database::open(filename)?;
+ let rows = Self::meta_rows(&plain_db, &meta_table)?;
+ GenerationMeta::from(rows)?.schema_version()
+ };
+ let variant = match schema.version() {
+ (V0_0::MAJOR, V0_0::MINOR) => {
+ GenerationDbVariant::V0_0(V0_0::open(filename, meta_table)?)
+ }
+ (V1_0::MAJOR, V1_0::MINOR) => {
+ GenerationDbVariant::V1_0(V1_0::open(filename, meta_table)?)
+ }
+ (major, minor) => return Err(GenerationDbError::Incompatible(major, minor)),
+ };
+ Ok(Self { variant })
+ }
+
+ fn meta_table() -> Table {
+ Table::new("meta")
+ .column(Column::text("key"))
+ .column(Column::text("value"))
+ .build()
+ }
+
+ fn meta_rows(
+ db: &Database,
+ table: &Table,
+ ) -> Result<HashMap<String, String>, GenerationDbError> {
+ let mut map = HashMap::new();
+ let mut iter = db.all_rows(table, &row_to_kv)?;
+ for kv in iter.iter()? {
+ let (key, value) = kv?;
+ map.insert(key, value);
+ }
+ Ok(map)
+ }
+
+ /// Close a database, commit any changes.
+ pub fn close(self) -> Result<(), GenerationDbError> {
+ match self.variant {
+ GenerationDbVariant::V0_0(v) => v.close(),
+ GenerationDbVariant::V1_0(v) => v.close(),
+ }
+ }
+
+ /// Return contents of "meta" table as a HashMap.
+ pub fn meta(&self) -> Result<HashMap<String, String>, GenerationDbError> {
+ match &self.variant {
+ GenerationDbVariant::V0_0(v) => v.meta(),
+ GenerationDbVariant::V1_0(v) => v.meta(),
+ }
+ }
+
+ /// Insert a file system entry into the database.
+ pub fn insert(
+ &mut self,
+ e: FilesystemEntry,
+ fileid: FileId,
+ ids: &[ChunkId],
+ reason: Reason,
+ is_cachedir_tag: bool,
+ ) -> Result<(), GenerationDbError> {
+ match &mut self.variant {
+ GenerationDbVariant::V0_0(v) => v.insert(e, fileid, ids, reason, is_cachedir_tag),
+ GenerationDbVariant::V1_0(v) => v.insert(e, fileid, ids, reason, is_cachedir_tag),
+ }
+ }
+
+ /// Count number of file system entries.
+ pub fn file_count(&self) -> Result<FileId, GenerationDbError> {
+ match &self.variant {
+ GenerationDbVariant::V0_0(v) => v.file_count(),
+ GenerationDbVariant::V1_0(v) => v.file_count(),
+ }
+ }
+
+ /// Does a path refer to a cache directory?
+ pub fn is_cachedir_tag(&self, filename: &Path) -> Result<bool, GenerationDbError> {
+ match &self.variant {
+ GenerationDbVariant::V0_0(v) => v.is_cachedir_tag(filename),
+ GenerationDbVariant::V1_0(v) => v.is_cachedir_tag(filename),
+ }
+ }
+
+ /// Return all chunk ids in database.
+ pub fn chunkids(&self, fileid: FileId) -> Result<SqlResults<ChunkId>, GenerationDbError> {
+ match &self.variant {
+ GenerationDbVariant::V0_0(v) => v.chunkids(fileid),
+ GenerationDbVariant::V1_0(v) => v.chunkids(fileid),
+ }
+ }
+
+ /// Return all file descriptions in database.
+ pub fn files(
+ &self,
+ ) -> Result<SqlResults<(FileId, FilesystemEntry, Reason, bool)>, GenerationDbError> {
+ match &self.variant {
+ GenerationDbVariant::V0_0(v) => v.files(),
+ GenerationDbVariant::V1_0(v) => v.files(),
+ }
+ }
+
+ /// Get a file's information given its path.
+ pub fn get_file(&self, filename: &Path) -> Result<Option<FilesystemEntry>, GenerationDbError> {
+ match &self.variant {
+ GenerationDbVariant::V0_0(v) => v.get_file(filename),
+ GenerationDbVariant::V1_0(v) => v.get_file(filename),
+ }
+ }
+
+ /// Get a file's information given its id in the database.
+ pub fn get_fileno(&self, filename: &Path) -> Result<Option<FileId>, GenerationDbError> {
+ match &self.variant {
+ GenerationDbVariant::V0_0(v) => v.get_fileno(filename),
+ GenerationDbVariant::V1_0(v) => v.get_fileno(filename),
+ }
+ }
+}
+
+struct V0_0 {
created: bool,
db: Database,
meta: Table,
@@ -64,36 +235,35 @@ pub struct GenerationDb {
chunks: Table,
}
-impl GenerationDb {
+impl V0_0 {
+ const MAJOR: VersionComponent = 0;
+ const MINOR: VersionComponent = 0;
+
/// Create a new generation database in read/write mode.
- pub fn create<P: AsRef<Path>>(filename: P) -> Result<Self, GenerationDbError> {
+ pub fn create<P: AsRef<Path>>(filename: P, meta: Table) -> Result<Self, GenerationDbError> {
let db = Database::create(filename.as_ref())?;
- let mut moi = Self::new(db);
+ let mut moi = Self::new(db, meta);
moi.created = true;
moi.create_tables()?;
Ok(moi)
}
/// Open an existing generation database in read-only mode.
- pub fn open<P: AsRef<Path>>(filename: P) -> Result<Self, GenerationDbError> {
+ pub fn open<P: AsRef<Path>>(filename: P, meta: Table) -> Result<Self, GenerationDbError> {
let db = Database::open(filename.as_ref())?;
- Ok(Self::new(db))
+ Ok(Self::new(db, meta))
}
- fn new(db: Database) -> Self {
- let meta = Table::new("meta")
- .column(Column::text("key"))
- .column(Column::text("value"))
- .build();
+ fn new(db: Database, meta: Table) -> Self {
let files = Table::new("files")
- .column(Column::primary_key("fileno")) // FIXME: rename to fileid
+ .column(Column::primary_key("fileno"))
.column(Column::blob("filename"))
.column(Column::text("json"))
.column(Column::text("reason"))
.column(Column::bool("is_cachedir_tag"))
.build();
let chunks = Table::new("chunks")
- .column(Column::int("fileno")) // FIXME: rename to fileid
+ .column(Column::int("fileno"))
.column(Column::text("chunkid"))
.build();
@@ -115,14 +285,14 @@ impl GenerationDb {
&self.meta,
&[
Value::text("key", "schema_version_major"),
- Value::text("value", &format!("{}", SCHEMA_MAJOR)),
+ Value::text("value", &format!("{}", Self::MAJOR)),
],
)?;
self.db.insert(
&self.meta,
&[
Value::text("key", "schema_version_minor"),
- Value::text("value", &format!("{}", SCHEMA_MINOR)),
+ Value::text("value", &format!("{}", Self::MINOR)),
],
)?;
@@ -187,7 +357,7 @@ impl GenerationDb {
// FIXME: this needs to be done use "SELECT count(*) FROM
// files", but the Database abstraction doesn't support that
// yet.
- let mut iter = self.db.all_rows(&self.files, &row_to_entry)?;
+ let mut iter = self.db.all_rows(&self.files, &Self::row_to_entry)?;
let mut count = 0;
for _ in iter.iter()? {
count += 1;
@@ -199,7 +369,9 @@ impl GenerationDb {
pub fn is_cachedir_tag(&self, filename: &Path) -> Result<bool, GenerationDbError> {
let filename_vec = path_into_blob(filename);
let value = Value::blob("filename", &filename_vec);
- let mut rows = self.db.some_rows(&self.files, &value, &row_to_entry)?;
+ let mut rows = self
+ .db
+ .some_rows(&self.files, &value, &Self::row_to_entry)?;
let mut iter = rows.iter()?;
if let Some(row) = iter.next() {
@@ -228,7 +400,7 @@ impl GenerationDb {
pub fn files(
&self,
) -> Result<SqlResults<(FileId, FilesystemEntry, Reason, bool)>, GenerationDbError> {
- Ok(self.db.all_rows(&self.files, &row_to_fsentry)?)
+ Ok(self.db.all_rows(&self.files, &Self::row_to_fsentry)?)
}
/// Get a file's information given its path.
@@ -253,7 +425,9 @@ impl GenerationDb {
) -> Result<Option<(FileId, FilesystemEntry, String)>, GenerationDbError> {
let filename_bytes = path_into_blob(filename);
let value = Value::blob("filename", &filename_bytes);
- let mut rows = self.db.some_rows(&self.files, &value, &row_to_entry)?;
+ let mut rows = self
+ .db
+ .some_rows(&self.files, &value, &Self::row_to_entry)?;
let mut iter = rows.iter()?;
if let Some(row) = iter.next() {
@@ -272,6 +446,271 @@ impl GenerationDb {
Ok(None)
}
}
+
+ fn row_to_entry(row: &rusqlite::Row) -> rusqlite::Result<(FileId, String, String, bool)> {
+ let fileno: FileId = row.get("fileno")?;
+ let json: String = row.get("json")?;
+ let reason: String = row.get("reason")?;
+ let is_cachedir_tag: bool = row.get("is_cachedir_tag")?;
+ Ok((fileno, json, reason, is_cachedir_tag))
+ }
+
+ fn row_to_fsentry(
+ row: &rusqlite::Row,
+ ) -> rusqlite::Result<(FileId, FilesystemEntry, Reason, bool)> {
+ let fileno: FileId = row.get("fileno")?;
+ let json: String = row.get("json")?;
+ let entry = serde_json::from_str(&json).map_err(|err| {
+ rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(err))
+ })?;
+ let reason: String = row.get("reason")?;
+ let reason = Reason::from(&reason);
+ let is_cachedir_tag: bool = row.get("is_cachedir_tag")?;
+ Ok((fileno, entry, reason, is_cachedir_tag))
+ }
+}
+
+struct V1_0 {
+ created: bool,
+ db: Database,
+ meta: Table,
+ files: Table,
+ chunks: Table,
+}
+
+impl V1_0 {
+ const MAJOR: VersionComponent = 1;
+ const MINOR: VersionComponent = 0;
+
+ /// Create a new generation database in read/write mode.
+ pub fn create<P: AsRef<Path>>(filename: P, meta: Table) -> Result<Self, GenerationDbError> {
+ let db = Database::create(filename.as_ref())?;
+ let mut moi = Self::new(db, meta);
+ moi.created = true;
+ moi.create_tables()?;
+ Ok(moi)
+ }
+
+ /// Open an existing generation database in read-only mode.
+ pub fn open<P: AsRef<Path>>(filename: P, meta: Table) -> Result<Self, GenerationDbError> {
+ let db = Database::open(filename.as_ref())?;
+ Ok(Self::new(db, meta))
+ }
+
+ fn new(db: Database, meta: Table) -> Self {
+ let files = Table::new("files")
+ .column(Column::primary_key("fileid"))
+ .column(Column::blob("filename"))
+ .column(Column::text("json"))
+ .column(Column::text("reason"))
+ .column(Column::bool("is_cachedir_tag"))
+ .build();
+ let chunks = Table::new("chunks")
+ .column(Column::int("fileid"))
+ .column(Column::text("chunkid"))
+ .build();
+
+ Self {
+ created: false,
+ db,
+ meta,
+ files,
+ chunks,
+ }
+ }
+
+ fn create_tables(&mut self) -> Result<(), GenerationDbError> {
+ self.db.create_table(&self.meta)?;
+ self.db.create_table(&self.files)?;
+ self.db.create_table(&self.chunks)?;
+
+ self.db.insert(
+ &self.meta,
+ &[
+ Value::text("key", "schema_version_major"),
+ Value::text("value", &format!("{}", Self::MAJOR)),
+ ],
+ )?;
+ self.db.insert(
+ &self.meta,
+ &[
+ Value::text("key", "schema_version_minor"),
+ Value::text("value", &format!("{}", Self::MINOR)),
+ ],
+ )?;
+
+ Ok(())
+ }
+
+ /// Close a database, commit any changes.
+ pub fn close(self) -> Result<(), GenerationDbError> {
+ if self.created {
+ self.db
+ .create_index("filenames_idx", &self.files, "filename")?;
+ self.db.create_index("fileid_idx", &self.chunks, "fileid")?;
+ }
+ self.db.close().map_err(GenerationDbError::Database)
+ }
+
+ /// Return contents of "meta" table as a HashMap.
+ pub fn meta(&self) -> Result<HashMap<String, String>, GenerationDbError> {
+ let mut map = HashMap::new();
+ let mut iter = self.db.all_rows(&self.meta, &row_to_kv)?;
+ for kv in iter.iter()? {
+ let (key, value) = kv?;
+ map.insert(key, value);
+ }
+ Ok(map)
+ }
+
+ /// Insert a file system entry into the database.
+ pub fn insert(
+ &mut self,
+ e: FilesystemEntry,
+ fileid: FileId,
+ ids: &[ChunkId],
+ reason: Reason,
+ is_cachedir_tag: bool,
+ ) -> Result<(), GenerationDbError> {
+ let json = serde_json::to_string(&e)?;
+ self.db.insert(
+ &self.files,
+ &[
+ Value::primary_key("fileid", fileid),
+ Value::blob("filename", &path_into_blob(&e.pathbuf())),
+ Value::text("json", &json),
+ Value::text("reason", &format!("{}", reason)),
+ Value::bool("is_cachedir_tag", is_cachedir_tag),
+ ],
+ )?;
+ for id in ids {
+ self.db.insert(
+ &self.chunks,
+ &[
+ Value::int("fileid", fileid),
+ Value::text("chunkid", &format!("{}", id)),
+ ],
+ )?;
+ }
+ Ok(())
+ }
+
+ /// Count number of file system entries.
+ pub fn file_count(&self) -> Result<FileId, GenerationDbError> {
+ // FIXME: this needs to be done use "SELECT count(*) FROM
+ // files", but the Database abstraction doesn't support that
+ // yet.
+ let mut iter = self.db.all_rows(&self.files, &Self::row_to_entry)?;
+ let mut count = 0;
+ for _ in iter.iter()? {
+ count += 1;
+ }
+ Ok(count)
+ }
+
+ /// Does a path refer to a cache directory?
+ pub fn is_cachedir_tag(&self, filename: &Path) -> Result<bool, GenerationDbError> {
+ let filename_vec = path_into_blob(filename);
+ let value = Value::blob("filename", &filename_vec);
+ let mut rows = self
+ .db
+ .some_rows(&self.files, &value, &Self::row_to_entry)?;
+ let mut iter = rows.iter()?;
+
+ if let Some(row) = iter.next() {
+ // Make sure there's only one row for a given filename. A
+ // bug in a previous version, or a maliciously constructed
+ // generation, could result in there being more than one.
+ if iter.next().is_some() {
+ error!("too many files in file lookup");
+ Err(GenerationDbError::TooManyFiles(filename.to_path_buf()))
+ } else {
+ let (_, _, _, is_cachedir_tag) = row?;
+ Ok(is_cachedir_tag)
+ }
+ } else {
+ Ok(false)
+ }
+ }
+
+ /// Return all chunk ids in database.
+ pub fn chunkids(&self, fileid: FileId) -> Result<SqlResults<ChunkId>, GenerationDbError> {
+ let fileid = Value::int("fileid", fileid);
+ Ok(self.db.some_rows(&self.chunks, &fileid, &row_to_chunkid)?)
+ }
+
+ /// Return all file descriptions in database.
+ pub fn files(
+ &self,
+ ) -> Result<SqlResults<(FileId, FilesystemEntry, Reason, bool)>, GenerationDbError> {
+ Ok(self.db.all_rows(&self.files, &Self::row_to_fsentry)?)
+ }
+
+ /// Get a file's information given its path.
+ pub fn get_file(&self, filename: &Path) -> Result<Option<FilesystemEntry>, GenerationDbError> {
+ match self.get_file_and_fileno(filename)? {
+ None => Ok(None),
+ Some((_, e, _)) => Ok(Some(e)),
+ }
+ }
+
+ /// Get a file's information given its id in the database.
+ pub fn get_fileno(&self, filename: &Path) -> Result<Option<FileId>, GenerationDbError> {
+ match self.get_file_and_fileno(filename)? {
+ None => Ok(None),
+ Some((id, _, _)) => Ok(Some(id)),
+ }
+ }
+
+ fn get_file_and_fileno(
+ &self,
+ filename: &Path,
+ ) -> Result<Option<(FileId, FilesystemEntry, String)>, GenerationDbError> {
+ let filename_bytes = path_into_blob(filename);
+ let value = Value::blob("filename", &filename_bytes);
+ let mut rows = self
+ .db
+ .some_rows(&self.files, &value, &Self::row_to_entry)?;
+ let mut iter = rows.iter()?;
+
+ if let Some(row) = iter.next() {
+ // Make sure there's only one row for a given filename. A
+ // bug in a previous version, or a maliciously constructed
+ // generation, could result in there being more than one.
+ if iter.next().is_some() {
+ error!("too many files in file lookup");
+ Err(GenerationDbError::TooManyFiles(filename.to_path_buf()))
+ } else {
+ let (fileid, ref json, ref reason, _) = row?;
+ let entry = serde_json::from_str(json)?;
+ Ok(Some((fileid, entry, reason.to_string())))
+ }
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn row_to_entry(row: &rusqlite::Row) -> rusqlite::Result<(FileId, String, String, bool)> {
+ let fileno: FileId = row.get("fileid")?;
+ let json: String = row.get("json")?;
+ let reason: String = row.get("reason")?;
+ let is_cachedir_tag: bool = row.get("is_cachedir_tag")?;
+ Ok((fileno, json, reason, is_cachedir_tag))
+ }
+
+ fn row_to_fsentry(
+ row: &rusqlite::Row,
+ ) -> rusqlite::Result<(FileId, FilesystemEntry, Reason, bool)> {
+ let fileno: FileId = row.get("fileid")?;
+ let json: String = row.get("json")?;
+ let entry = serde_json::from_str(&json).map_err(|err| {
+ rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(err))
+ })?;
+ let reason: String = row.get("reason")?;
+ let reason = Reason::from(&reason);
+ let is_cachedir_tag: bool = row.get("is_cachedir_tag")?;
+ Ok((fileno, entry, reason, is_cachedir_tag))
+ }
}
fn row_to_kv(row: &rusqlite::Row) -> rusqlite::Result<(String, String)> {
@@ -284,28 +723,6 @@ fn path_into_blob(path: &Path) -> Vec<u8> {
path.as_os_str().as_bytes().to_vec()
}
-fn row_to_entry(row: &rusqlite::Row) -> rusqlite::Result<(FileId, String, String, bool)> {
- let fileno: FileId = row.get("fileno")?;
- let json: String = row.get("json")?;
- let reason: String = row.get("reason")?;
- let is_cachedir_tag: bool = row.get("is_cachedir_tag")?;
- Ok((fileno, json, reason, is_cachedir_tag))
-}
-
-fn row_to_fsentry(
- row: &rusqlite::Row,
-) -> rusqlite::Result<(FileId, FilesystemEntry, Reason, bool)> {
- let fileno: FileId = row.get("fileno")?;
- let json: String = row.get("json")?;
- let entry = serde_json::from_str(&json).map_err(|err| {
- rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(err))
- })?;
- let reason: String = row.get("reason")?;
- let reason = Reason::from(&reason);
- let is_cachedir_tag: bool = row.get("is_cachedir_tag")?;
- Ok((fileno, entry, reason, is_cachedir_tag))
-}
-
fn row_to_chunkid(row: &rusqlite::Row) -> rusqlite::Result<ChunkId> {
let chunkid: String = row.get("chunkid")?;
let chunkid = ChunkId::recreate(&chunkid);
diff --git a/src/error.rs b/src/error.rs
index cf18c83..7812081 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -6,6 +6,7 @@ use crate::client::ClientError;
use crate::cmd::restore::RestoreError;
use crate::config::ClientConfigError;
use crate::db::DatabaseError;
+use crate::dbgen::GenerationDbError;
use crate::generation::{LocalGenerationError, NascentError};
use crate::genlist::GenerationListError;
use crate::passwords::PasswordError;
@@ -52,6 +53,10 @@ pub enum ObnamError {
#[error(transparent)]
LocalGenerationError(#[from] LocalGenerationError),
+ /// Error from generation database.
+ #[error(transparent)]
+ GenerationDb(#[from] GenerationDbError),
+
/// Error using a Database.
#[error(transparent)]
Database(#[from] DatabaseError),
diff --git a/src/generation.rs b/src/generation.rs
index 05163be..715b426 100644
--- a/src/generation.rs
+++ b/src/generation.rs
@@ -3,10 +3,10 @@
use crate::backup_reason::Reason;
use crate::chunkid::ChunkId;
use crate::db::{DatabaseError, SqlResults};
-use crate::dbgen::{FileId, GenerationDb, GenerationDbError, SCHEMA_MAJOR, SCHEMA_MINOR};
+use crate::dbgen::{FileId, GenerationDb, GenerationDbError};
use crate::fsentry::FilesystemEntry;
-use serde::Serialize;
-use std::collections::HashMap;
+use crate::genmeta::{GenerationMeta, GenerationMetaError};
+use crate::schema::{SchemaVersion, VersionComponent};
use std::fmt;
use std::path::{Path, PathBuf};
@@ -77,11 +77,11 @@ pub enum NascentError {
impl NascentGeneration {
/// Create a new nascent generation.
- pub fn create<P>(filename: P) -> Result<Self, NascentError>
+ pub fn create<P>(filename: P, schema: SchemaVersion) -> Result<Self, NascentError>
where
P: AsRef<Path>,
{
- let db = GenerationDb::create(filename.as_ref())?;
+ let db = GenerationDb::create(filename.as_ref(), schema)?;
Ok(Self { db, fileno: 0 })
}
@@ -160,18 +160,14 @@ pub enum LocalGenerationError {
#[error("Generation does not have a 'meta' table")]
NoMeta,
- /// Missing from from 'meta' table.
- #[error("Generation 'meta' table does not have a row {0}")]
- NoMetaKey(String),
-
- /// Bad data in 'meta' table.
- #[error("Generation 'meta' row {0} has badly formed integer: {1}")]
- BadMetaInteger(String, std::num::ParseIntError),
-
/// Local generation uses a schema version that this version of
/// Obnam isn't compatible with.
#[error("Backup is not compatible with this version of Obnam: {0}.{1}")]
- Incompatible(u32, u32),
+ Incompatible(VersionComponent, VersionComponent),
+
+ /// Error from generation metadata.
+ #[error(transparent)]
+ GenerationMeta(#[from] GenerationMetaError),
/// Error from SQL.
#[error(transparent)]
@@ -239,21 +235,13 @@ impl LocalGeneration {
{
let db = GenerationDb::open(filename.as_ref())?;
let gen = Self::new(db);
- let schema = gen.meta()?.schema_version();
- let our_schema = SchemaVersion::new(SCHEMA_MAJOR, SCHEMA_MINOR);
- if !our_schema.is_compatible_with(&schema) {
- return Err(LocalGenerationError::Incompatible(
- schema.major,
- schema.minor,
- ));
- }
Ok(gen)
}
/// Return generation metadata for local generation.
- pub fn meta(&self) -> Result<GenMeta, LocalGenerationError> {
+ pub fn meta(&self) -> Result<GenerationMeta, LocalGenerationError> {
let map = self.db.meta()?;
- GenMeta::from(map)
+ GenerationMeta::from(map).map_err(LocalGenerationError::GenerationMeta)
}
/// How many files are there in the local generation?
@@ -300,123 +288,17 @@ impl LocalGeneration {
}
}
-/// Metadata about the local generation.
-#[derive(Debug, Serialize)]
-pub struct GenMeta {
- schema_version: SchemaVersion,
- extras: HashMap<String, String>,
-}
-
-impl GenMeta {
- /// Create from a hash map.
- fn from(mut map: HashMap<String, String>) -> Result<Self, LocalGenerationError> {
- let major: u32 = metaint(&mut map, "schema_version_major")?;
- let minor: u32 = metaint(&mut map, "schema_version_minor")?;
- Ok(Self {
- schema_version: SchemaVersion::new(major, minor),
- extras: map,
- })
- }
-
- /// Return schema version of local generation.
- pub fn schema_version(&self) -> SchemaVersion {
- self.schema_version
- }
-}
-
-fn metastr(map: &mut HashMap<String, String>, key: &str) -> Result<String, LocalGenerationError> {
- if let Some(v) = map.remove(key) {
- Ok(v)
- } else {
- Err(LocalGenerationError::NoMetaKey(key.to_string()))
- }
-}
-
-fn metaint(map: &mut HashMap<String, String>, key: &str) -> Result<u32, LocalGenerationError> {
- let v = metastr(map, key)?;
- let v = v
- .parse()
- .map_err(|err| LocalGenerationError::BadMetaInteger(key.to_string(), err))?;
- Ok(v)
-}
-
-/// Schema version of the database storing the generation.
-///
-/// An Obnam client can restore a generation using schema version
-/// (x,y), if the client supports a schema version (x,z). If z < y,
-/// the client knows it may not be able to the generation faithfully,
-/// and should warn the user about this. If z >= y, the client knows
-/// it can restore the generation faithfully. If the client does not
-/// support any schema version x, it knows it can't restore the backup
-/// at all.
-#[derive(Debug, Clone, Copy, Serialize)]
-pub struct SchemaVersion {
- /// Major version.
- pub major: u32,
- /// Minor version.
- pub minor: u32,
-}
-
-impl SchemaVersion {
- fn new(major: u32, minor: u32) -> Self {
- Self { major, minor }
- }
-
- /// Is this schema version compatible with another schema version?
- pub fn is_compatible_with(&self, other: &Self) -> bool {
- self.major == other.major && self.minor >= other.minor
- }
-}
-
-#[cfg(test)]
-mod test_schema {
- use super::*;
-
- #[test]
- fn compatible_with_self() {
- let v = SchemaVersion::new(1, 2);
- assert!(v.is_compatible_with(&v));
- }
-
- #[test]
- fn compatible_with_older_minor_version() {
- let old = SchemaVersion::new(1, 2);
- let new = SchemaVersion::new(1, 3);
- assert!(new.is_compatible_with(&old));
- }
-
- #[test]
- fn not_compatible_with_newer_minor_version() {
- let old = SchemaVersion::new(1, 2);
- let new = SchemaVersion::new(1, 3);
- assert!(!old.is_compatible_with(&new));
- }
-
- #[test]
- fn not_compatible_with_older_major_version() {
- let old = SchemaVersion::new(1, 2);
- let new = SchemaVersion::new(2, 0);
- assert!(!new.is_compatible_with(&old));
- }
-
- #[test]
- fn not_compatible_with_newer_major_version() {
- let old = SchemaVersion::new(1, 2);
- let new = SchemaVersion::new(2, 0);
- assert!(!old.is_compatible_with(&new));
- }
-}
-
#[cfg(test)]
mod test {
- use super::{LocalGeneration, NascentGeneration};
+ use super::{LocalGeneration, NascentGeneration, SchemaVersion};
use tempfile::NamedTempFile;
#[test]
fn empty() {
let filename = NamedTempFile::new().unwrap().path().to_path_buf();
+ let schema = SchemaVersion::new(0, 0);
{
- let mut _gen = NascentGeneration::create(&filename).unwrap();
+ let mut _gen = NascentGeneration::create(&filename, schema).unwrap();
// _gen is dropped here; the connection is close; the file
// should not be removed.
}
@@ -445,7 +327,8 @@ mod test {
let tag_path1 = Path::new("/a_tag");
let tag_path2 = Path::new("/another_dir/a_tag");
- let mut gen = NascentGeneration::create(&dbfile).unwrap();
+ let schema = SchemaVersion::new(0, 0);
+ let mut gen = NascentGeneration::create(&dbfile, schema).unwrap();
let mut cache = users::UsersCache::new();
gen.insert(