summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/accumulated_time.rs4
-rw-r--r--src/backup_progress.rs44
-rw-r--r--src/backup_run.rs25
-rw-r--r--src/bin/obnam-server.rs75
-rw-r--r--src/bin/obnam.rs14
-rw-r--r--src/chunker.rs3
-rw-r--r--src/chunkstore.rs307
-rw-r--r--src/cipher.rs2
-rw-r--r--src/client.rs144
-rw-r--r--src/cmd/backup.rs14
-rw-r--r--src/cmd/chunk.rs12
-rw-r--r--src/cmd/chunkify.rs4
-rw-r--r--src/cmd/gen_info.rs5
-rw-r--r--src/cmd/get_chunk.rs5
-rw-r--r--src/cmd/init.rs8
-rw-r--r--src/cmd/inspect.rs5
-rw-r--r--src/cmd/list.rs4
-rw-r--r--src/cmd/list_backup_versions.rs6
-rw-r--r--src/cmd/list_files.rs6
-rw-r--r--src/cmd/resolve.rs4
-rw-r--r--src/cmd/restore.rs14
-rw-r--r--src/cmd/show_config.rs4
-rw-r--r--src/cmd/show_gen.rs6
-rw-r--r--src/db.rs10
-rw-r--r--src/engine.rs2
-rw-r--r--src/index.rs4
-rw-r--r--src/indexedstore.rs77
-rw-r--r--src/lib.rs2
-rw-r--r--src/passwords.rs2
-rw-r--r--src/policy.rs6
-rw-r--r--src/store.rs12
31 files changed, 467 insertions, 363 deletions
diff --git a/src/accumulated_time.rs b/src/accumulated_time.rs
index e633a10..cdf34b2 100644
--- a/src/accumulated_time.rs
+++ b/src/accumulated_time.rs
@@ -37,7 +37,7 @@ impl<T: Eq + PartialEq + Hash + Copy> AccumulatedTime<T> {
/// clock is stopped.
pub fn start(&mut self, clock: T) {
let mut map = self.accumulated.lock().unwrap();
- let ct = map.entry(clock).or_insert_with(ClockTime::default);
+ let ct = map.entry(clock).or_default();
assert!(ct.started.is_none());
ct.started = Some(Instant::now());
}
@@ -47,7 +47,7 @@ impl<T: Eq + PartialEq + Hash + Copy> AccumulatedTime<T> {
/// Its run time is added to the accumulated time for that kind of clock.
pub fn stop(&mut self, clock: T) {
let mut map = self.accumulated.lock().unwrap();
- if let Some(mut ct) = map.get_mut(&clock) {
+ if let Some(ct) = map.get_mut(&clock) {
assert!(ct.started.is_some());
if let Some(started) = ct.started.take() {
ct.nanos += started.elapsed().as_nanos();
diff --git a/src/backup_progress.rs b/src/backup_progress.rs
index f119210..e3995f0 100644
--- a/src/backup_progress.rs
+++ b/src/backup_progress.rs
@@ -2,7 +2,7 @@
use crate::generation::GenId;
use indicatif::{ProgressBar, ProgressStyle};
-use std::path::Path;
+use std::{path::Path, time::Duration};
const SHOW_PROGRESS: bool = true;
@@ -22,15 +22,19 @@ impl BackupProgress {
} else {
ProgressBar::hidden()
};
- let parts = vec![
+ let parts = [
"initial backup",
"elapsed: {elapsed}",
"files: {pos}",
"current: {wide_msg}",
"{spinner}",
];
- progress.set_style(ProgressStyle::default_bar().template(&parts.join("\n")));
- progress.enable_steady_tick(100);
+ progress.set_style(
+ ProgressStyle::default_bar()
+ .template(&parts.join("\n"))
+ .expect("create indicatif ProgressStyle value"),
+ );
+ progress.enable_steady_tick(Duration::from_millis(100));
Self { progress }
}
@@ -42,7 +46,7 @@ impl BackupProgress {
} else {
ProgressBar::hidden()
};
- let parts = vec![
+ let parts = [
"incremental backup",
"{wide_bar}",
"elapsed: {elapsed}",
@@ -50,8 +54,12 @@ impl BackupProgress {
"current: {wide_msg}",
"{spinner}",
];
- progress.set_style(ProgressStyle::default_bar().template(&parts.join("\n")));
- progress.enable_steady_tick(100);
+ progress.set_style(
+ ProgressStyle::default_bar()
+ .template(&parts.join("\n"))
+ .expect("create indicatif ProgressStyle value"),
+ );
+ progress.enable_steady_tick(Duration::from_millis(100));
Self { progress }
}
@@ -59,13 +67,17 @@ impl BackupProgress {
/// Create a progress bar for uploading a new generation's metadata.
pub fn upload_generation() -> Self {
let progress = ProgressBar::new(0);
- let parts = vec![
+ let parts = [
"uploading new generation metadata",
"elapsed: {elapsed}",
"{spinner}",
];
- progress.set_style(ProgressStyle::default_bar().template(&parts.join("\n")));
- progress.enable_steady_tick(100);
+ progress.set_style(
+ ProgressStyle::default_bar()
+ .template(&parts.join("\n"))
+ .expect("create indicatif ProgressStyle value"),
+ );
+ progress.enable_steady_tick(Duration::from_millis(100));
Self { progress }
}
@@ -74,9 +86,13 @@ impl BackupProgress {
/// metadata.
pub fn download_generation(gen_id: &GenId) -> Self {
let progress = ProgressBar::new(0);
- let parts = vec!["{msg}", "elapsed: {elapsed}", "{spinner}"];
- progress.set_style(ProgressStyle::default_bar().template(&parts.join("\n")));
- progress.enable_steady_tick(100);
+ let parts = ["{msg}", "elapsed: {elapsed}", "{spinner}"];
+ progress.set_style(
+ ProgressStyle::default_bar()
+ .template(&parts.join("\n"))
+ .expect("create indicatif ProgressStyle value"),
+ );
+ progress.enable_steady_tick(Duration::from_millis(100));
progress.set_message(format!(
"downloading previous generation metadata: {}",
gen_id
@@ -102,7 +118,7 @@ impl BackupProgress {
/// Update progress bar about number of actual files found.
pub fn found_live_file(&self, filename: &Path) {
self.progress.inc(1);
- if self.progress.length() < self.progress.position() {
+ if self.progress.length() < Some(self.progress.position()) {
self.progress.set_length(self.progress.position());
}
self.progress.set_message(format!("{}", filename.display()));
diff --git a/src/backup_run.rs b/src/backup_run.rs
index 516e172..372ef65 100644
--- a/src/backup_run.rs
+++ b/src/backup_run.rs
@@ -31,7 +31,7 @@ const SQLITE_CHUNK_SIZE: usize = MIB as usize;
/// A running backup.
pub struct BackupRun<'a> {
checksum_kind: Option<LabelChecksumKind>,
- client: &'a BackupClient,
+ client: &'a mut BackupClient,
policy: BackupPolicy,
buffer_size: usize,
progress: Option<BackupProgress>,
@@ -106,7 +106,10 @@ pub struct RootsBackupOutcome {
impl<'a> BackupRun<'a> {
/// Create a new run for an initial backup.
- pub fn initial(config: &ClientConfig, client: &'a BackupClient) -> Result<Self, BackupError> {
+ pub fn initial(
+ config: &ClientConfig,
+ client: &'a mut BackupClient,
+ ) -> Result<Self, BackupError> {
Ok(Self {
checksum_kind: Some(DEFAULT_CHECKSUM_KIND),
client,
@@ -119,7 +122,7 @@ impl<'a> BackupRun<'a> {
/// Create a new run for an incremental backup.
pub fn incremental(
config: &ClientConfig,
- client: &'a BackupClient,
+ client: &'a mut BackupClient,
) -> Result<Self, BackupError> {
Ok(Self {
checksum_kind: None,
@@ -189,7 +192,7 @@ impl<'a> BackupRun<'a> {
/// Back up all the roots for this run.
pub async fn backup_roots(
- &self,
+ &mut self,
config: &ClientConfig,
old: &LocalGeneration,
newpath: &Path,
@@ -236,7 +239,7 @@ impl<'a> BackupRun<'a> {
}
async fn backup_one_root(
- &self,
+ &mut self,
config: &ClientConfig,
old: &LocalGeneration,
new: &mut NascentGeneration,
@@ -287,7 +290,7 @@ impl<'a> BackupRun<'a> {
}
async fn backup_if_needed(
- &self,
+ &mut self,
entry: AnnotatedFsEntry,
old: &LocalGeneration,
) -> Result<Option<FsEntryBackupOutcome>, BackupError> {
@@ -322,7 +325,7 @@ impl<'a> BackupRun<'a> {
}
async fn backup_one_entry(
- &self,
+ &mut self,
entry: &AnnotatedFsEntry,
path: &Path,
reason: Reason,
@@ -351,7 +354,7 @@ impl<'a> BackupRun<'a> {
/// Upload any file content for a file system entry.
pub async fn upload_filesystem_entry(
- &self,
+ &mut self,
e: &FilesystemEntry,
size: usize,
) -> Result<Vec<ChunkId>, BackupError> {
@@ -370,7 +373,7 @@ impl<'a> BackupRun<'a> {
/// Upload the metadata for the backup of this run.
pub async fn upload_generation(
- &self,
+ &mut self,
filename: &Path,
size: usize,
) -> Result<ChunkId, BackupError> {
@@ -384,7 +387,7 @@ impl<'a> BackupRun<'a> {
}
async fn upload_regular_file(
- &self,
+ &mut self,
filename: &Path,
size: usize,
) -> Result<Vec<ChunkId>, BackupError> {
@@ -407,7 +410,7 @@ impl<'a> BackupRun<'a> {
Ok(chunk_ids)
}
- async fn upload_nascent_generation(&self, filename: &Path) -> Result<ChunkId, ObnamError> {
+ async fn upload_nascent_generation(&mut self, filename: &Path) -> Result<ChunkId, ObnamError> {
let progress = BackupProgress::upload_generation();
let gen_id = self.upload_generation(filename, SQLITE_CHUNK_SIZE).await?;
progress.finish();
diff --git a/src/bin/obnam-server.rs b/src/bin/obnam-server.rs
index 6cf4122..9b5a557 100644
--- a/src/bin/obnam-server.rs
+++ b/src/bin/obnam-server.rs
@@ -1,9 +1,10 @@
use anyhow::Context;
+use clap::Parser;
use log::{debug, error, info};
-use obnam::chunk::DataChunk;
use obnam::chunkid::ChunkId;
use obnam::chunkmeta::ChunkMeta;
-use obnam::indexedstore::IndexedStore;
+use obnam::chunkstore::ChunkStore;
+use obnam::label::Label;
use obnam::server::{ServerConfig, ServerConfigError};
use serde::Serialize;
use std::collections::HashMap;
@@ -11,16 +12,14 @@ use std::default::Default;
use std::net::{SocketAddr, ToSocketAddrs};
use std::path::{Path, PathBuf};
use std::sync::Arc;
-use structopt::StructOpt;
use tokio::sync::Mutex;
use warp::http::StatusCode;
use warp::hyper::body::Bytes;
use warp::Filter;
-#[derive(Debug, StructOpt)]
-#[structopt(name = "obnam2-server", about = "Backup server")]
+#[derive(Debug, Parser)]
+#[clap(name = "obnam2-server", about = "Backup server")]
struct Opt {
- #[structopt(parse(from_os_str))]
config: PathBuf,
}
@@ -28,7 +27,7 @@ struct Opt {
async fn main() -> anyhow::Result<()> {
pretty_env_logger::init_custom_env("OBNAM_SERVER_LOG");
- let opt = Opt::from_args();
+ let opt = Opt::parse();
let config = load_config(&opt.config)?;
let addresses: Vec<SocketAddr> = config.address.to_socket_addrs()?.collect();
@@ -38,7 +37,7 @@ async fn main() -> anyhow::Result<()> {
return Err(ServerConfigError::BadServerAddress.into());
}
- let store = IndexedStore::new(&config.chunks)?;
+ let store = ChunkStore::local(&config.chunks)?;
let store = Arc::new(Mutex::new(store));
let store = warp::any().map(move || Arc::clone(&store));
@@ -71,16 +70,8 @@ async fn main() -> anyhow::Result<()> {
.and(store.clone())
.and_then(search_chunks);
- let delete = warp::delete()
- .and(warp::path("v1"))
- .and(warp::path("chunks"))
- .and(warp::path::param())
- .and(warp::path::end())
- .and(store.clone())
- .and_then(delete_chunk);
-
let log = warp::log("obnam");
- let webroot = create.or(fetch).or(search).or(delete).with(log);
+ let webroot = create.or(fetch).or(search).with(log);
debug!("starting warp");
warp::serve(webroot)
@@ -103,11 +94,11 @@ fn load_config(filename: &Path) -> Result<ServerConfig, anyhow::Error> {
}
pub async fn create_chunk(
- store: Arc<Mutex<IndexedStore>>,
+ store: Arc<Mutex<ChunkStore>>,
meta: String,
data: Bytes,
) -> Result<impl warp::Reply, warp::Rejection> {
- let mut store = store.lock().await;
+ let store = store.lock().await;
let meta: ChunkMeta = match meta.parse() {
Ok(s) => s,
@@ -117,9 +108,7 @@ pub async fn create_chunk(
}
};
- let chunk = DataChunk::new(data.to_vec(), meta);
-
- let id = match store.save(&chunk) {
+ let id = match store.put(data.to_vec(), &meta).await {
Ok(id) => id,
Err(e) => {
error!("couldn't save: {}", e);
@@ -133,11 +122,11 @@ pub async fn create_chunk(
pub async fn fetch_chunk(
id: String,
- store: Arc<Mutex<IndexedStore>>,
+ store: Arc<Mutex<ChunkStore>>,
) -> Result<impl warp::Reply, warp::Rejection> {
let store = store.lock().await;
let id: ChunkId = id.parse().unwrap();
- match store.load(&id) {
+ match store.get(&id).await {
Ok((data, meta)) => {
info!("found chunk {}: {:?}", id, meta);
Ok(ChunkResult::Fetched(meta, data))
@@ -151,18 +140,23 @@ pub async fn fetch_chunk(
pub async fn search_chunks(
query: HashMap<String, String>,
- store: Arc<Mutex<IndexedStore>>,
+ store: Arc<Mutex<ChunkStore>>,
) -> Result<impl warp::Reply, warp::Rejection> {
let store = store.lock().await;
let mut query = query.iter();
let found = if let Some((key, value)) = query.next() {
- if query.next() != None {
+ if query.next().is_some() {
error!("search has more than one key to search for");
return Ok(ChunkResult::BadRequest);
}
if key == "label" {
- store.find_by_label(value).expect("SQL lookup failed")
+ let label = Label::deserialize(value).unwrap();
+ let label = ChunkMeta::new(&label);
+ store
+ .find_by_label(&label)
+ .await
+ .expect("SQL lookup failed")
} else {
error!("unknown search key {:?}", key);
return Ok(ChunkResult::BadRequest);
@@ -174,7 +168,7 @@ pub async fn search_chunks(
let mut hits = SearchHits::default();
for chunk_id in found {
- let meta = match store.load_meta(&chunk_id) {
+ let (_, meta) = match store.get(&chunk_id).await {
Ok(meta) => {
info!("search found chunk {}", chunk_id);
meta
@@ -213,30 +207,10 @@ impl SearchHits {
}
}
-pub async fn delete_chunk(
- id: String,
- store: Arc<Mutex<IndexedStore>>,
-) -> Result<impl warp::Reply, warp::Rejection> {
- let mut store = store.lock().await;
- let id: ChunkId = id.parse().unwrap();
-
- match store.remove(&id) {
- Ok(_) => {
- info!("chunk deleted: {}", id);
- Ok(ChunkResult::Deleted)
- }
- Err(e) => {
- error!("could not delete chunk {}: {:?}", id, e);
- Ok(ChunkResult::NotFound)
- }
- }
-}
-
enum ChunkResult {
Created(ChunkId),
- Fetched(ChunkMeta, DataChunk),
+ Fetched(ChunkMeta, Vec<u8>),
Found(SearchHits),
- Deleted,
NotFound,
BadRequest,
InternalServerError,
@@ -265,13 +239,12 @@ impl warp::Reply for ChunkResult {
);
into_response(
StatusCode::OK,
- chunk.data(),
+ &chunk,
"application/octet-stream",
Some(headers),
)
}
ChunkResult::Found(hits) => json_response(StatusCode::OK, hits.to_json(), None),
- ChunkResult::Deleted => status_response(StatusCode::OK),
ChunkResult::BadRequest => status_response(StatusCode::BAD_REQUEST),
ChunkResult::NotFound => status_response(StatusCode::NOT_FOUND),
ChunkResult::InternalServerError => status_response(StatusCode::INTERNAL_SERVER_ERROR),
diff --git a/src/bin/obnam.rs b/src/bin/obnam.rs
index 089a7a1..240960b 100644
--- a/src/bin/obnam.rs
+++ b/src/bin/obnam.rs
@@ -1,3 +1,4 @@
+use clap::Parser;
use directories_next::ProjectDirs;
use log::{debug, error, info, LevelFilter};
use log4rs::append::file::FileAppender;
@@ -19,7 +20,6 @@ use obnam::cmd::show_gen::ShowGeneration;
use obnam::config::ClientConfig;
use obnam::performance::{Clock, Performance};
use std::path::{Path, PathBuf};
-use structopt::StructOpt;
const QUALIFIER: &str = "";
const ORG: &str = "";
@@ -38,7 +38,7 @@ fn main() {
}
fn main_program(perf: &mut Performance) -> anyhow::Result<()> {
- let opt = Opt::from_args();
+ let opt = Opt::parse();
let config = ClientConfig::read(&config_filename(&opt))?;
setup_logging(&config.log)?;
@@ -96,17 +96,17 @@ fn default_config() -> PathBuf {
}
}
-#[derive(Debug, StructOpt)]
-#[structopt(name = "obnam-backup", about = "Simplistic backup client")]
+#[derive(Debug, Parser)]
+#[clap(name = "obnam-backup", version, about = "Simplistic backup client")]
struct Opt {
- #[structopt(long, short, parse(from_os_str))]
+ #[clap(long, short)]
config: Option<PathBuf>,
- #[structopt(subcommand)]
+ #[clap(subcommand)]
cmd: Command,
}
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
enum Command {
Init(Init),
Backup(Backup),
diff --git a/src/chunker.rs b/src/chunker.rs
index 29f8a90..9883f89 100644
--- a/src/chunker.rs
+++ b/src/chunker.rs
@@ -31,8 +31,7 @@ impl FileChunks {
filename: &Path,
kind: LabelChecksumKind,
) -> Self {
- let mut buf = vec![];
- buf.resize(chunk_size, 0);
+ let buf = vec![0; chunk_size];
Self {
chunk_size,
kind,
diff --git a/src/chunkstore.rs b/src/chunkstore.rs
new file mode 100644
index 0000000..4c8125c
--- /dev/null
+++ b/src/chunkstore.rs
@@ -0,0 +1,307 @@
+//! Access local and remote chunk stores.
+//!
+//! A chunk store may be local and accessed via the file system, or
+//! remote and accessed over HTTP. This module implements both. This
+//! module only handles encrypted chunks.
+
+use crate::chunkid::ChunkId;
+use crate::chunkmeta::ChunkMeta;
+use crate::config::{ClientConfig, ClientConfigError};
+use crate::index::{Index, IndexError};
+
+use log::{debug, error, info};
+use reqwest::header::HeaderMap;
+use std::collections::HashMap;
+use std::path::{Path, PathBuf};
+use tokio::sync::Mutex;
+
+/// A chunk store.
+///
+/// The store may be local or remote.
+pub enum ChunkStore {
+ /// A local chunk store.
+ Local(LocalStore),
+
+ /// A remote chunk store.
+ Remote(RemoteStore),
+}
+
+impl ChunkStore {
+ /// Open a local chunk store.
+ pub fn local<P: AsRef<Path>>(path: P) -> Result<Self, StoreError> {
+ let store = LocalStore::new(path.as_ref())?;
+ Ok(Self::Local(store))
+ }
+
+ /// Open a remote chunk store.
+ pub fn remote(config: &ClientConfig) -> Result<Self, StoreError> {
+ let store = RemoteStore::new(config)?;
+ Ok(Self::Remote(store))
+ }
+
+ /// Does the store have a chunk with a given label?
+ pub async fn find_by_label(&self, meta: &ChunkMeta) -> Result<Vec<ChunkId>, StoreError> {
+ match self {
+ Self::Local(store) => store.find_by_label(meta).await,
+ Self::Remote(store) => store.find_by_label(meta).await,
+ }
+ }
+
+ /// Store a chunk in the store.
+ ///
+ /// The store chooses an id for the chunk.
+ pub async fn put(&self, chunk: Vec<u8>, meta: &ChunkMeta) -> Result<ChunkId, StoreError> {
+ match self {
+ Self::Local(store) => store.put(chunk, meta).await,
+ Self::Remote(store) => store.put(chunk, meta).await,
+ }
+ }
+
+ /// Get a chunk given its id.
+ pub async fn get(&self, id: &ChunkId) -> Result<(Vec<u8>, ChunkMeta), StoreError> {
+ match self {
+ Self::Local(store) => store.get(id).await,
+ Self::Remote(store) => store.get(id).await,
+ }
+ }
+}
+
+/// A local chunk store.
+pub struct LocalStore {
+ path: PathBuf,
+ index: Mutex<Index>,
+}
+
+impl LocalStore {
+ fn new(path: &Path) -> Result<Self, StoreError> {
+ Ok(Self {
+ path: path.to_path_buf(),
+ index: Mutex::new(Index::new(path)?),
+ })
+ }
+
+ async fn find_by_label(&self, meta: &ChunkMeta) -> Result<Vec<ChunkId>, StoreError> {
+ self.index
+ .lock()
+ .await
+ .find_by_label(meta.label())
+ .map_err(StoreError::Index)
+ }
+
+ async fn put(&self, chunk: Vec<u8>, meta: &ChunkMeta) -> Result<ChunkId, StoreError> {
+ let id = ChunkId::new();
+ let (dir, filename) = self.filename(&id);
+
+ if !dir.exists() {
+ std::fs::create_dir_all(&dir).map_err(|err| StoreError::ChunkMkdir(dir, err))?;
+ }
+
+ std::fs::write(&filename, &chunk)
+ .map_err(|err| StoreError::WriteChunk(filename.clone(), err))?;
+ self.index
+ .lock()
+ .await
+ .insert_meta(id.clone(), meta.clone())
+ .map_err(StoreError::Index)?;
+ Ok(id)
+ }
+
+ async fn get(&self, id: &ChunkId) -> Result<(Vec<u8>, ChunkMeta), StoreError> {
+ let meta = self.index.lock().await.get_meta(id)?;
+
+ let (_, filename) = &self.filename(id);
+
+ let raw =
+ std::fs::read(filename).map_err(|err| StoreError::ReadChunk(filename.clone(), err))?;
+
+ Ok((raw, meta))
+ }
+
+ fn filename(&self, id: &ChunkId) -> (PathBuf, PathBuf) {
+ let bytes = id.as_bytes();
+ assert!(bytes.len() > 3);
+ let a = bytes[0];
+ let b = bytes[1];
+ let c = bytes[2];
+ let dir = self.path.join(format!("{}/{}/{}", a, b, c));
+ let filename = dir.join(format!("{}.data", id));
+ (dir, filename)
+ }
+}
+
+/// A remote chunk store.
+pub struct RemoteStore {
+ client: reqwest::Client,
+ base_url: String,
+}
+
+impl RemoteStore {
+ fn new(config: &ClientConfig) -> Result<Self, StoreError> {
+ info!("creating remote store with config: {:#?}", config);
+
+ let client = reqwest::Client::builder()
+ .danger_accept_invalid_certs(!config.verify_tls_cert)
+ .build()
+ .map_err(StoreError::ReqwestError)?;
+ Ok(Self {
+ client,
+ base_url: config.server_url.to_string(),
+ })
+ }
+
+ async fn find_by_label(&self, meta: &ChunkMeta) -> Result<Vec<ChunkId>, StoreError> {
+ let body = match self.get_helper("", &[("label", meta.label())]).await {
+ Ok((_, body)) => body,
+ Err(err) => return Err(err),
+ };
+
+ let hits: HashMap<String, ChunkMeta> =
+ serde_json::from_slice(&body).map_err(StoreError::JsonParse)?;
+ let ids = hits.keys().map(|id| ChunkId::recreate(id)).collect();
+ Ok(ids)
+ }
+
+ async fn put(&self, chunk: Vec<u8>, meta: &ChunkMeta) -> Result<ChunkId, StoreError> {
+ let res = self
+ .client
+ .post(&self.chunks_url())
+ .header("chunk-meta", meta.to_json())
+ .body(chunk)
+ .send()
+ .await
+ .map_err(StoreError::ReqwestError)?;
+ let res: HashMap<String, String> = res.json().await.map_err(StoreError::ReqwestError)?;
+ debug!("upload_chunk: res={:?}", res);
+ let chunk_id = if let Some(chunk_id) = res.get("chunk_id") {
+ debug!("upload_chunk: id={}", chunk_id);
+ chunk_id.parse().unwrap()
+ } else {
+ return Err(StoreError::NoCreatedChunkId);
+ };
+ info!("uploaded_chunk {}", chunk_id);
+ Ok(chunk_id)
+ }
+
+ async fn get(&self, id: &ChunkId) -> Result<(Vec<u8>, ChunkMeta), StoreError> {
+ let (headers, body) = self.get_helper(&format!("/{}", id), &[]).await?;
+ let meta = self.get_chunk_meta_header(id, &headers)?;
+ Ok((body, meta))
+ }
+
+ fn base_url(&self) -> &str {
+ &self.base_url
+ }
+
+ fn chunks_url(&self) -> String {
+ format!("{}/v1/chunks", self.base_url())
+ }
+
+ async fn get_helper(
+ &self,
+ path: &str,
+ query: &[(&str, &str)],
+ ) -> Result<(HeaderMap, Vec<u8>), StoreError> {
+ let url = format!("{}{}", &self.chunks_url(), path);
+ info!("GET {}", url);
+
+ // Build HTTP request structure.
+ let req = self
+ .client
+ .get(&url)
+ .query(query)
+ .build()
+ .map_err(StoreError::ReqwestError)?;
+
+ // Make HTTP request.
+ let res = self
+ .client
+ .execute(req)
+ .await
+ .map_err(StoreError::ReqwestError)?;
+
+ // Did it work?
+ if res.status() != 200 {
+ return Err(StoreError::NotFound(path.to_string()));
+ }
+
+ // Return headers and body.
+ let headers = res.headers().clone();
+ let body = res.bytes().await.map_err(StoreError::ReqwestError)?;
+ let body = body.to_vec();
+ Ok((headers, body))
+ }
+
+ fn get_chunk_meta_header(
+ &self,
+ chunk_id: &ChunkId,
+ headers: &HeaderMap,
+ ) -> Result<ChunkMeta, StoreError> {
+ let meta = headers.get("chunk-meta");
+
+ if meta.is_none() {
+ let err = StoreError::NoChunkMeta(chunk_id.clone());
+ error!("fetching chunk {} failed: {}", chunk_id, err);
+ return Err(err);
+ }
+
+ let meta = meta
+ .unwrap()
+ .to_str()
+ .map_err(StoreError::MetaHeaderToString)?;
+ let meta: ChunkMeta = serde_json::from_str(meta).map_err(StoreError::JsonParse)?;
+
+ Ok(meta)
+ }
+}
+
+/// Possible errors from using a ChunkStore.
+#[derive(Debug, thiserror::Error)]
+pub enum StoreError {
+ /// FIXME
+ #[error("FIXME")]
+ FIXME,
+
+ /// Error from a chunk index.
+ #[error(transparent)]
+ Index(#[from] IndexError),
+
+ /// An error from the HTTP library.
+ #[error("error from reqwest library: {0}")]
+ ReqwestError(reqwest::Error),
+
+ /// Client configuration is wrong.
+ #[error(transparent)]
+ ClientConfigError(#[from] ClientConfigError),
+
+ /// Server claims to not have an entity.
+ #[error("Server does not have {0}")]
+ NotFound(String),
+
+ /// Server didn't give us a chunk's metadata.
+ #[error("Server response did not have a 'chunk-meta' header for chunk {0}")]
+ NoChunkMeta(ChunkId),
+
+ /// An error with the `chunk-meta` header.
+ #[error("couldn't convert response chunk-meta header to string: {0}")]
+ MetaHeaderToString(reqwest::header::ToStrError),
+
+ /// Error parsing JSON.
+ #[error("failed to parse JSON: {0}")]
+ JsonParse(serde_json::Error),
+
+ /// An error creating chunk directory.
+ #[error("Failed to create chunk directory {0}")]
+ ChunkMkdir(PathBuf, #[source] std::io::Error),
+
+ /// An error writing a chunk file.
+ #[error("Failed to write chunk {0}")]
+ WriteChunk(PathBuf, #[source] std::io::Error),
+
+ /// An error reading a chunk file.
+ #[error("Failed to read chunk {0}")]
+ ReadChunk(PathBuf, #[source] std::io::Error),
+
+ /// No chunk id for uploaded chunk.
+ #[error("Server response claimed it had created a chunk, but lacked chunk id")]
+ NoCreatedChunkId,
+}
diff --git a/src/cipher.rs b/src/cipher.rs
index 7bd2e84..21785b9 100644
--- a/src/cipher.rs
+++ b/src/cipher.rs
@@ -4,7 +4,7 @@ use crate::chunk::DataChunk;
use crate::chunkmeta::ChunkMeta;
use crate::passwords::Passwords;
-use aes_gcm::aead::{generic_array::GenericArray, Aead, NewAead, Payload};
+use aes_gcm::aead::{generic_array::GenericArray, Aead, KeyInit, Payload};
use aes_gcm::Aes256Gcm; // Or `Aes128Gcm`
use rand::Rng;
diff --git a/src/client.rs b/src/client.rs
index bed5f1e..a924052 100644
--- a/src/client.rs
+++ b/src/client.rs
@@ -5,15 +5,14 @@ use crate::chunk::{
};
use crate::chunkid::ChunkId;
use crate::chunkmeta::ChunkMeta;
+use crate::chunkstore::{ChunkStore, StoreError};
use crate::cipher::{CipherEngine, CipherError};
use crate::config::{ClientConfig, ClientConfigError};
use crate::generation::{FinishedGeneration, GenId, LocalGeneration, LocalGenerationError};
use crate::genlist::GenerationList;
use crate::label::Label;
-use log::{debug, error, info};
-use reqwest::header::HeaderMap;
-use std::collections::HashMap;
+use log::{error, info};
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
@@ -100,12 +99,15 @@ pub enum ClientError {
/// Failed to write a file.
#[error("failed to write to file {0}: {1}")]
FileWrite(PathBuf, std::io::Error),
+
+ /// Error from a chunk store.
+ #[error(transparent)]
+ ChunkStore(#[from] StoreError),
}
/// Client for the Obnam server HTTP API.
pub struct BackupClient {
- client: reqwest::Client,
- base_url: String,
+ store: ChunkStore,
cipher: CipherEngine,
}
@@ -113,68 +115,25 @@ impl BackupClient {
/// Create a new backup client.
pub fn new(config: &ClientConfig) -> Result<Self, ClientError> {
info!("creating backup client with config: {:#?}", config);
-
let pass = config.passwords()?;
-
- let client = reqwest::Client::builder()
- .danger_accept_invalid_certs(!config.verify_tls_cert)
- .build()
- .map_err(ClientError::ReqwestError)?;
Ok(Self {
- client,
- base_url: config.server_url.to_string(),
+ store: ChunkStore::remote(config)?,
cipher: CipherEngine::new(&pass),
})
}
- fn base_url(&self) -> &str {
- &self.base_url
- }
-
- fn chunks_url(&self) -> String {
- format!("{}/v1/chunks", self.base_url())
- }
-
/// Does the server have a chunk?
pub async fn has_chunk(&self, meta: &ChunkMeta) -> Result<Option<ChunkId>, ClientError> {
- let body = match self.get("", &[("label", meta.label())]).await {
- Ok((_, body)) => body,
- Err(err) => return Err(err),
- };
-
- let hits: HashMap<String, ChunkMeta> =
- serde_json::from_slice(&body).map_err(ClientError::JsonParse)?;
- let mut iter = hits.iter();
- let has = if let Some((chunk_id, _)) = iter.next() {
- Some(chunk_id.into())
- } else {
- None
- };
-
- Ok(has)
+ let mut ids = self.store.find_by_label(meta).await?;
+ Ok(ids.pop())
}
/// Upload a data chunk to the server.
- pub async fn upload_chunk(&self, chunk: DataChunk) -> Result<ChunkId, ClientError> {
+ pub async fn upload_chunk(&mut self, chunk: DataChunk) -> Result<ChunkId, ClientError> {
let enc = self.cipher.encrypt_chunk(&chunk)?;
- let res = self
- .client
- .post(&self.chunks_url())
- .header("chunk-meta", chunk.meta().to_json())
- .body(enc.ciphertext().to_vec())
- .send()
- .await
- .map_err(ClientError::ReqwestError)?;
- debug!("upload_chunk: res={:?}", res);
- let res: HashMap<String, String> = res.json().await.map_err(ClientError::ReqwestError)?;
- let chunk_id = if let Some(chunk_id) = res.get("chunk_id") {
- debug!("upload_chunk: id={}", chunk_id);
- chunk_id.parse().unwrap()
- } else {
- return Err(ClientError::NoCreatedChunkId);
- };
- info!("uploaded_chunk {}", chunk_id);
- Ok(chunk_id)
+ let data = enc.ciphertext().to_vec();
+ let id = self.store.put(data, chunk.meta()).await?;
+ Ok(id)
}
/// Get current client trust chunk from repository, if there is one.
@@ -196,15 +155,9 @@ impl BackupClient {
}
async fn find_client_trusts(&self) -> Result<Vec<ChunkId>, ClientError> {
- let label = Label::literal("client-trust").serialize();
- let body = match self.get("", &[("label", &label)]).await {
- Ok((_, body)) => body,
- Err(err) => return Err(err),
- };
-
- let hits: HashMap<String, ChunkMeta> =
- serde_json::from_slice(&body).map_err(ClientError::JsonParse)?;
- let ids = hits.iter().map(|(id, _)| id.into()).collect();
+ let label = Label::literal("client-trust");
+ let meta = ChunkMeta::new(&label);
+ let ids = self.store.find_by_label(&meta).await?;
Ok(ids)
}
@@ -220,9 +173,7 @@ impl BackupClient {
/// Fetch a data chunk from the server, given the chunk identifier.
pub async fn fetch_chunk(&self, chunk_id: &ChunkId) -> Result<DataChunk, ClientError> {
- let (headers, body) = self.get(&format!("/{}", chunk_id), &[]).await?;
- let meta = self.get_chunk_meta_header(chunk_id, &headers)?;
-
+ let (body, meta) = self.store.get(chunk_id).await?;
let meta_bytes = meta.to_json_vec();
let chunk = self.cipher.decrypt_chunk(&body, &meta_bytes)?;
@@ -244,7 +195,7 @@ impl BackupClient {
let gen = self.fetch_generation_chunk(gen_id).await?;
// Fetch the SQLite file, storing it in the named file.
- let mut dbfile = File::create(&dbname)
+ let mut dbfile = File::create(dbname)
.map_err(|err| ClientError::FileCreate(dbname.to_path_buf(), err))?;
for id in gen.chunk_ids() {
let chunk = self.fetch_chunk(id).await?;
@@ -257,61 +208,4 @@ impl BackupClient {
let gen = LocalGeneration::open(dbname)?;
Ok(gen)
}
-
- async fn get(
- &self,
- path: &str,
- query: &[(&str, &str)],
- ) -> Result<(HeaderMap, Vec<u8>), ClientError> {
- let url = format!("{}{}", &self.chunks_url(), path);
- info!("GET {}", url);
-
- // Build HTTP request structure.
- let req = self
- .client
- .get(&url)
- .query(query)
- .build()
- .map_err(ClientError::ReqwestError)?;
-
- // Make HTTP request.
- let res = self
- .client
- .execute(req)
- .await
- .map_err(ClientError::ReqwestError)?;
-
- // Did it work?
- if res.status() != 200 {
- return Err(ClientError::NotFound(path.to_string()));
- }
-
- // Return headers and body.
- let headers = res.headers().clone();
- let body = res.bytes().await.map_err(ClientError::ReqwestError)?;
- let body = body.to_vec();
- Ok((headers, body))
- }
-
- fn get_chunk_meta_header(
- &self,
- chunk_id: &ChunkId,
- headers: &HeaderMap,
- ) -> Result<ChunkMeta, ClientError> {
- let meta = headers.get("chunk-meta");
-
- if meta.is_none() {
- let err = ClientError::NoChunkMeta(chunk_id.clone());
- error!("fetching chunk {} failed: {}", chunk_id, err);
- return Err(err);
- }
-
- let meta = meta
- .unwrap()
- .to_str()
- .map_err(ClientError::MetaHeaderToString)?;
- let meta: ChunkMeta = serde_json::from_str(meta).map_err(ClientError::JsonParse)?;
-
- Ok(meta)
- }
}
diff --git a/src/cmd/backup.rs b/src/cmd/backup.rs
index 80dbb1f..70e9eac 100644
--- a/src/cmd/backup.rs
+++ b/src/cmd/backup.rs
@@ -10,21 +10,21 @@ use crate::generation::GenId;
use crate::performance::{Clock, Performance};
use crate::schema::VersionComponent;
+use clap::Parser;
use log::info;
use std::time::SystemTime;
-use structopt::StructOpt;
use tempfile::tempdir;
use tokio::runtime::Runtime;
/// Make a backup.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct Backup {
/// Force a full backup, instead of an incremental one.
- #[structopt(long)]
+ #[clap(long)]
full: bool,
/// Backup schema major version to use.
- #[structopt(long)]
+ #[clap(long)]
backup_version: Option<VersionComponent>,
}
@@ -45,7 +45,7 @@ impl Backup {
let major = self.backup_version.unwrap_or(DEFAULT_SCHEMA_MAJOR);
let schema = schema_version(major)?;
- let client = BackupClient::new(config)?;
+ let mut client = BackupClient::new(config)?;
let trust = client
.get_client_trust()
.await?
@@ -68,7 +68,7 @@ impl Backup {
let (is_incremental, outcome) = if let Some(old_id) = old_id {
info!("incremental backup based on {}", old_id);
- let mut run = BackupRun::incremental(config, &client)?;
+ let mut run = BackupRun::incremental(config, &mut client)?;
let old = run.start(Some(&old_id), &oldtemp, perf).await?;
(
true,
@@ -77,7 +77,7 @@ impl Backup {
)
} else {
info!("fresh backup without a previous generation");
- let mut run = BackupRun::initial(config, &client)?;
+ let mut run = BackupRun::initial(config, &mut client)?;
let old = run.start(None, &oldtemp, perf).await?;
(
false,
diff --git a/src/cmd/chunk.rs b/src/cmd/chunk.rs
index 445d23f..293de20 100644
--- a/src/cmd/chunk.rs
+++ b/src/cmd/chunk.rs
@@ -5,22 +5,19 @@ use crate::chunkmeta::ChunkMeta;
use crate::cipher::CipherEngine;
use crate::config::ClientConfig;
use crate::error::ObnamError;
+use clap::Parser;
use std::path::PathBuf;
-use structopt::StructOpt;
/// Encrypt a chunk.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct EncryptChunk {
/// The name of the file containing the cleartext chunk.
- #[structopt(parse(from_os_str))]
filename: PathBuf,
/// Name of file where to write the encrypted chunk.
- #[structopt(parse(from_os_str))]
output: PathBuf,
/// Chunk metadata as JSON.
- #[structopt()]
json: String,
}
@@ -43,18 +40,15 @@ impl EncryptChunk {
}
/// Decrypt a chunk.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct DecryptChunk {
/// Name of file containing encrypted chunk.
- #[structopt(parse(from_os_str))]
filename: PathBuf,
/// Name of file where to write the cleartext chunk.
- #[structopt(parse(from_os_str))]
output: PathBuf,
/// Chunk metadata as JSON.
- #[structopt()]
json: String,
}
diff --git a/src/cmd/chunkify.rs b/src/cmd/chunkify.rs
index e2ce05f..91cb0be 100644
--- a/src/cmd/chunkify.rs
+++ b/src/cmd/chunkify.rs
@@ -4,10 +4,10 @@ use crate::config::ClientConfig;
use crate::engine::Engine;
use crate::error::ObnamError;
use crate::workqueue::WorkQueue;
+use clap::Parser;
use serde::Serialize;
use sha2::{Digest, Sha256};
use std::path::PathBuf;
-use structopt::StructOpt;
use tokio::fs::File;
use tokio::io::{AsyncReadExt, BufReader};
use tokio::runtime::Runtime;
@@ -18,7 +18,7 @@ use tokio::sync::mpsc;
const Q: usize = 8;
/// Split files into chunks and show their metadata.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct Chunkify {
/// Names of files to split into chunks.
filenames: Vec<PathBuf>,
diff --git a/src/cmd/gen_info.rs b/src/cmd/gen_info.rs
index 0aec103..901a0ae 100644
--- a/src/cmd/gen_info.rs
+++ b/src/cmd/gen_info.rs
@@ -4,16 +4,15 @@ use crate::chunk::ClientTrust;
use crate::client::BackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
+use clap::Parser;
use log::info;
-use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
/// Show metadata for a generation.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct GenInfo {
/// Reference of the generation.
- #[structopt()]
gen_ref: String,
}
diff --git a/src/cmd/get_chunk.rs b/src/cmd/get_chunk.rs
index 0b27084..1561492 100644
--- a/src/cmd/get_chunk.rs
+++ b/src/cmd/get_chunk.rs
@@ -4,15 +4,14 @@ use crate::chunkid::ChunkId;
use crate::client::BackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
+use clap::Parser;
use std::io::{stdout, Write};
-use structopt::StructOpt;
use tokio::runtime::Runtime;
/// Fetch a chunk from the server.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct GetChunk {
/// Identifier of chunk to fetch.
- #[structopt()]
chunk_id: String,
}
diff --git a/src/cmd/init.rs b/src/cmd/init.rs
index 8e555ca..5950fbb 100644
--- a/src/cmd/init.rs
+++ b/src/cmd/init.rs
@@ -3,15 +3,15 @@
use crate::config::ClientConfig;
use crate::error::ObnamError;
use crate::passwords::{passwords_filename, Passwords};
-use structopt::StructOpt;
+use clap::Parser;
const PROMPT: &str = "Obnam passphrase: ";
/// Initialize client by setting passwords.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct Init {
/// Only for testing.
- #[structopt(long)]
+ #[clap(long)]
insecure_passphrase: Option<String>,
}
@@ -20,7 +20,7 @@ impl Init {
pub fn run(&self, config: &ClientConfig) -> Result<(), ObnamError> {
let passphrase = match &self.insecure_passphrase {
Some(x) => x.to_string(),
- None => rpassword::read_password_from_tty(Some(PROMPT)).unwrap(),
+ None => rpassword::prompt_password(PROMPT).unwrap(),
};
let passwords = Passwords::new(&passphrase);
diff --git a/src/cmd/inspect.rs b/src/cmd/inspect.rs
index 02801ae..3b41075 100644
--- a/src/cmd/inspect.rs
+++ b/src/cmd/inspect.rs
@@ -6,16 +6,15 @@ use crate::client::BackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
+use clap::Parser;
use log::info;
-use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
/// Make a backup.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct Inspect {
/// Reference to generation to inspect.
- #[structopt()]
gen_id: String,
}
diff --git a/src/cmd/list.rs b/src/cmd/list.rs
index bbb9c91..8bc6978 100644
--- a/src/cmd/list.rs
+++ b/src/cmd/list.rs
@@ -4,11 +4,11 @@ use crate::chunk::ClientTrust;
use crate::client::BackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
-use structopt::StructOpt;
+use clap::Parser;
use tokio::runtime::Runtime;
/// List generations on the server.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct List {}
impl List {
diff --git a/src/cmd/list_backup_versions.rs b/src/cmd/list_backup_versions.rs
index 859d91c..c78ccfc 100644
--- a/src/cmd/list_backup_versions.rs
+++ b/src/cmd/list_backup_versions.rs
@@ -4,13 +4,13 @@ use crate::config::ClientConfig;
use crate::dbgen::{schema_version, DEFAULT_SCHEMA_MAJOR, SCHEMA_MAJORS};
use crate::error::ObnamError;
-use structopt::StructOpt;
+use clap::Parser;
/// List supported backup schema versions.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct ListSchemaVersions {
/// List only the default version.
- #[structopt(long)]
+ #[clap(long)]
default_only: bool,
}
diff --git a/src/cmd/list_files.rs b/src/cmd/list_files.rs
index fb4764d..e8276cd 100644
--- a/src/cmd/list_files.rs
+++ b/src/cmd/list_files.rs
@@ -6,15 +6,15 @@ use crate::client::BackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
use crate::fsentry::{FilesystemEntry, FilesystemKind};
-use structopt::StructOpt;
+use clap::Parser;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
/// List files in a backup.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct ListFiles {
/// Reference to backup to list files in.
- #[structopt(default_value = "latest")]
+ #[clap(default_value = "latest")]
gen_id: String,
}
diff --git a/src/cmd/resolve.rs b/src/cmd/resolve.rs
index 12432cc..a7774d7 100644
--- a/src/cmd/resolve.rs
+++ b/src/cmd/resolve.rs
@@ -4,11 +4,11 @@ use crate::chunk::ClientTrust;
use crate::client::BackupClient;
use crate::config::ClientConfig;
use crate::error::ObnamError;
-use structopt::StructOpt;
+use clap::Parser;
use tokio::runtime::Runtime;
/// Resolve a generation reference into a generation id.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct Resolve {
/// The generation reference.
generation: String,
diff --git a/src/cmd/restore.rs b/src/cmd/restore.rs
index 223d481..58caf61 100644
--- a/src/cmd/restore.rs
+++ b/src/cmd/restore.rs
@@ -9,6 +9,7 @@ use crate::dbgen::FileId;
use crate::error::ObnamError;
use crate::fsentry::{FilesystemEntry, FilesystemKind};
use crate::generation::{LocalGeneration, LocalGenerationError};
+use clap::Parser;
use indicatif::{ProgressBar, ProgressStyle};
use libc::{chmod, mkfifo, timespec, utimensat, AT_FDCWD, AT_SYMLINK_NOFOLLOW};
use log::{debug, error, info};
@@ -20,19 +21,16 @@ use std::os::unix::fs::symlink;
use std::os::unix::net::UnixListener;
use std::path::StripPrefixError;
use std::path::{Path, PathBuf};
-use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
/// Restore a backup.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct Restore {
/// Reference to generation to restore.
- #[structopt()]
gen_id: String,
/// Path to directory where restored files are written.
- #[structopt(parse(from_os_str))]
to: PathBuf,
}
@@ -301,13 +299,17 @@ fn create_progress_bar(file_count: FileId, verbose: bool) -> ProgressBar {
} else {
ProgressBar::hidden()
};
- let parts = vec![
+ let parts = [
"{wide_bar}",
"elapsed: {elapsed}",
"files: {pos}/{len}",
"current: {wide_msg}",
"{spinner}",
];
- progress.set_style(ProgressStyle::default_bar().template(&parts.join("\n")));
+ progress.set_style(
+ ProgressStyle::default_bar()
+ .template(&parts.join("\n"))
+ .expect("create indicatif ProgressStyle value"),
+ );
progress
}
diff --git a/src/cmd/show_config.rs b/src/cmd/show_config.rs
index 7ac52ec..8e0ce30 100644
--- a/src/cmd/show_config.rs
+++ b/src/cmd/show_config.rs
@@ -2,10 +2,10 @@
use crate::config::ClientConfig;
use crate::error::ObnamError;
-use structopt::StructOpt;
+use clap::Parser;
/// Show actual client configuration.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct ShowConfig {}
impl ShowConfig {
diff --git a/src/cmd/show_gen.rs b/src/cmd/show_gen.rs
index f47a07b..95d3fd3 100644
--- a/src/cmd/show_gen.rs
+++ b/src/cmd/show_gen.rs
@@ -7,17 +7,17 @@ use crate::db::DbInt;
use crate::error::ObnamError;
use crate::fsentry::FilesystemKind;
use crate::generation::GenId;
+use clap::Parser;
use indicatif::HumanBytes;
use serde::Serialize;
-use structopt::StructOpt;
use tempfile::NamedTempFile;
use tokio::runtime::Runtime;
/// Show information about a generation.
-#[derive(Debug, StructOpt)]
+#[derive(Debug, Parser)]
pub struct ShowGeneration {
/// Reference to the generation. Defaults to latest.
- #[structopt(default_value = "latest")]
+ #[clap(default_value = "latest")]
gen_id: String,
}
diff --git a/src/db.rs b/src/db.rs
index da24e96..392134d 100644
--- a/src/db.rs
+++ b/src/db.rs
@@ -428,10 +428,7 @@ impl<'a> ToSql for Value<'a> {
i64::try_from(*v)
.map_err(|err| rusqlite::Error::ToSqlConversionFailure(Box::new(err)))?,
),
- Self::Bool(_, v) => ValueRef::Integer(
- i64::try_from(*v)
- .map_err(|err| rusqlite::Error::ToSqlConversionFailure(Box::new(err)))?,
- ),
+ Self::Bool(_, v) => ValueRef::Integer(i64::from(*v)),
Self::Text(_, v) => ValueRef::Text(v.as_ref()),
Self::Blob(_, v) => ValueRef::Blob(v),
};
@@ -478,10 +475,7 @@ impl ToSql for OwnedValue {
i64::try_from(*v)
.map_err(|err| rusqlite::Error::ToSqlConversionFailure(Box::new(err)))?,
),
- Self::Bool(_, v) => Value::Integer(
- i64::try_from(*v)
- .map_err(|err| rusqlite::Error::ToSqlConversionFailure(Box::new(err)))?,
- ),
+ Self::Bool(_, v) => Value::Integer(i64::from(*v)),
Self::Text(_, v) => Value::Text(v.to_string()),
Self::Blob(_, v) => Value::Blob(v.to_vec()),
};
diff --git a/src/engine.rs b/src/engine.rs
index 384f591..d35281b 100644
--- a/src/engine.rs
+++ b/src/engine.rs
@@ -76,7 +76,7 @@ async fn manage_workers<S, T, F>(
// We got a work item. Launch background task to
// work on it.
let tx = tx.clone();
- workers.push(do_work(work, tx, func));
+ workers.push_back(do_work(work, tx, func));
// If queue is full, wait for at least one
// background task to finish.
diff --git a/src/index.rs b/src/index.rs
index 52da2f2..42f1a95 100644
--- a/src/index.rs
+++ b/src/index.rs
@@ -139,7 +139,7 @@ mod sql {
params![],
)?;
conn.execute("CREATE INDEX label_idx ON chunks (label)", params![])?;
- conn.pragma_update(None, "journal_mode", &"WAL")?;
+ conn.pragma_update(None, "journal_mode", "WAL")?;
Ok(conn)
}
@@ -147,7 +147,7 @@ mod sql {
pub fn open_db(filename: &Path) -> Result<Connection, IndexError> {
let flags = OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(filename, flags)?;
- conn.pragma_update(None, "journal_mode", &"WAL")?;
+ conn.pragma_update(None, "journal_mode", "WAL")?;
Ok(conn)
}
diff --git a/src/indexedstore.rs b/src/indexedstore.rs
deleted file mode 100644
index 15b5a22..0000000
--- a/src/indexedstore.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-//! An indexed, on-disk store for chunks on the server.
-
-use crate::chunk::{DataChunk, GenerationChunkError};
-use crate::chunkid::ChunkId;
-use crate::chunkmeta::ChunkMeta;
-use crate::index::{Index, IndexError};
-use crate::store::{Store, StoreError};
-use std::path::Path;
-
-/// A store for chunks and their metadata.
-///
-/// This combines Store and Index into one interface to make it easier
-/// to handle the server side storage of chunks.
-pub struct IndexedStore {
- store: Store,
- index: Index,
-}
-
-/// All the errors that may be returned for `IndexStore`.
-#[derive(Debug, thiserror::Error)]
-pub enum IndexedError {
- /// An error from Index.
- #[error(transparent)]
- IndexError(#[from] IndexError),
-
- /// Error regarding generation chunks.
- #[error(transparent)]
- GenerationChunkError(#[from] GenerationChunkError),
-
- /// An error from Store.
- #[error(transparent)]
- SqlError(#[from] StoreError),
-}
-
-impl IndexedStore {
- /// Create a new indexed store.
- pub fn new(dirname: &Path) -> Result<Self, IndexedError> {
- let store = Store::new(dirname);
- let index = Index::new(dirname)?;
- Ok(Self { store, index })
- }
-
- /// Save a chunk in the store.
- pub fn save(&mut self, chunk: &DataChunk) -> Result<ChunkId, IndexedError> {
- let id = ChunkId::new();
- self.store.save(&id, chunk)?;
- self.insert_meta(&id, chunk.meta())?;
- Ok(id)
- }
-
- fn insert_meta(&mut self, id: &ChunkId, meta: &ChunkMeta) -> Result<(), IndexedError> {
- self.index.insert_meta(id.clone(), meta.clone())?;
- Ok(())
- }
-
- /// Get a chunk from the store, given its id.
- pub fn load(&self, id: &ChunkId) -> Result<(DataChunk, ChunkMeta), IndexedError> {
- Ok((self.store.load(id)?, self.load_meta(id)?))
- }
-
- /// Get a chunk's metadata form the store, given its id.
- pub fn load_meta(&self, id: &ChunkId) -> Result<ChunkMeta, IndexedError> {
- Ok(self.index.get_meta(id)?)
- }
-
- /// Find chunks with a client-assigned label.
- pub fn find_by_label(&self, label: &str) -> Result<Vec<ChunkId>, IndexedError> {
- Ok(self.index.find_by_label(label)?)
- }
-
- /// Remove a chunk from the store.
- pub fn remove(&mut self, id: &ChunkId) -> Result<(), IndexedError> {
- self.index.remove_meta(id)?;
- self.store.delete(id)?;
- Ok(())
- }
-}
diff --git a/src/lib.rs b/src/lib.rs
index fbbea15..8894966 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -13,6 +13,7 @@ pub mod chunk;
pub mod chunker;
pub mod chunkid;
pub mod chunkmeta;
+pub mod chunkstore;
pub mod cipher;
pub mod client;
pub mod cmd;
@@ -27,7 +28,6 @@ pub mod generation;
pub mod genlist;
pub mod genmeta;
pub mod index;
-pub mod indexedstore;
pub mod label;
pub mod passwords;
pub mod performance;
diff --git a/src/passwords.rs b/src/passwords.rs
index ea476bf..efc3f96 100644
--- a/src/passwords.rs
+++ b/src/passwords.rs
@@ -76,7 +76,7 @@ fn derive_password(passphrase: &str) -> String {
let salt = SaltString::generate(&mut OsRng);
Pbkdf2
- .hash_password(passphrase.as_bytes(), salt.as_ref())
+ .hash_password(passphrase.as_bytes(), salt.as_salt())
.unwrap()
.to_string()
}
diff --git a/src/policy.rs b/src/policy.rs
index b3ba24c..8cdbd76 100644
--- a/src/policy.rs
+++ b/src/policy.rs
@@ -20,15 +20,17 @@ pub struct BackupPolicy {
old_if_changed: bool,
}
-impl BackupPolicy {
+impl Default for BackupPolicy {
/// Create a default policy.
- pub fn default() -> Self {
+ fn default() -> Self {
Self {
new: true,
old_if_changed: true,
}
}
+}
+impl BackupPolicy {
/// Does a given file need to be backed up?
pub fn needs_backup(&self, old: &LocalGeneration, new_entry: &FilesystemEntry) -> Reason {
let new_name = new_entry.pathbuf();
diff --git a/src/store.rs b/src/store.rs
index 4e85ba1..185370e 100644
--- a/src/store.rs
+++ b/src/store.rs
@@ -48,18 +48,18 @@ impl Store {
std::fs::create_dir_all(dir)?;
}
- std::fs::write(&metaname, chunk.meta().to_json())?;
- std::fs::write(&dataname, chunk.data())?;
+ std::fs::write(metaname, chunk.meta().to_json())?;
+ std::fs::write(dataname, chunk.data())?;
Ok(())
}
/// Load a chunk from a store.
pub fn load(&self, id: &ChunkId) -> Result<DataChunk, StoreError> {
let (_, metaname, dataname) = &self.filenames(id);
- let meta = std::fs::read(&metaname)?;
+ let meta = std::fs::read(metaname)?;
let meta = serde_json::from_slice(&meta)?;
- let data = std::fs::read(&dataname)?;
+ let data = std::fs::read(dataname)?;
let data = DataChunk::new(data, meta);
Ok(data)
}
@@ -67,8 +67,8 @@ impl Store {
/// Delete a chunk from a store.
pub fn delete(&self, id: &ChunkId) -> Result<(), StoreError> {
let (_, metaname, dataname) = &self.filenames(id);
- std::fs::remove_file(&metaname)?;
- std::fs::remove_file(&dataname)?;
+ std::fs::remove_file(metaname)?;
+ std::fs::remove_file(dataname)?;
Ok(())
}
}