From e5f68184bfe91f6874fe8c2344dbd5fa613d6bee Mon Sep 17 00:00:00 2001 From: Lars Wirzenius Date: Sat, 9 Jan 2021 16:52:35 +0200 Subject: feat! use SQLite db for chunk index on server This speeds startup a lot. However, the backup repository needs to be re-created from scratch and internal APIs have change in incompatible ways. --- client.yaml | 4 +- src/bin/benchmark-index.rs | 6 +- src/bin/benchmark-indexedstore.rs | 2 +- src/bin/obnam-server.rs | 18 ++- src/client.rs | 38 ++++-- src/cmd/restore.rs | 5 +- src/error.rs | 10 +- src/index.rs | 249 +++++++++++++++++++++++++++----------- src/indexedstore.rs | 64 +++------- src/store.rs | 31 +---- 10 files changed, 251 insertions(+), 176 deletions(-) diff --git a/client.yaml b/client.yaml index 1b7d6c9..dd60c9c 100644 --- a/client.yaml +++ b/client.yaml @@ -1,3 +1,3 @@ -server_url: https://obnam0:8888 -root: /home/liw/pers/Foton +server_url: https://localhost:8888 +root: /home/liw/tmp/Foton log: obnam.log diff --git a/src/bin/benchmark-index.rs b/src/bin/benchmark-index.rs index 5008660..d49a6c3 100644 --- a/src/bin/benchmark-index.rs +++ b/src/bin/benchmark-index.rs @@ -1,4 +1,5 @@ use obnam::benchmark::ChunkGenerator; +use obnam::chunkmeta::ChunkMeta; use obnam::index::Index; use std::path::PathBuf; use structopt::StructOpt; @@ -24,9 +25,10 @@ fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); let gen = ChunkGenerator::new(opt.num); - let mut index = Index::default(); + let mut index = Index::new(".")?; for (id, checksum, _, _) in gen { - index.insert(id, "sha25", &checksum); + let meta = ChunkMeta::new(&checksum); + index.insert_meta(id, meta)?; } Ok(()) diff --git a/src/bin/benchmark-indexedstore.rs b/src/bin/benchmark-indexedstore.rs index a4191ac..3ee4c38 100644 --- a/src/bin/benchmark-indexedstore.rs +++ b/src/bin/benchmark-indexedstore.rs @@ -19,7 +19,7 @@ fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); let gen = ChunkGenerator::new(opt.num); - let mut store = IndexedStore::new(&opt.chunks); + let mut store = IndexedStore::new(&opt.chunks)?; for (_, _, meta, chunk) in gen { store.save(&meta, &chunk)?; } diff --git a/src/bin/obnam-server.rs b/src/bin/obnam-server.rs index 76d018f..19f2e99 100644 --- a/src/bin/obnam-server.rs +++ b/src/bin/obnam-server.rs @@ -36,9 +36,7 @@ async fn main() -> anyhow::Result<()> { return Err(ConfigError::BadServerAddress.into()); } - let mut store = IndexedStore::new(&config.chunks); - store.fill_index()?; - println!("existing generations: {:?}", store.find_generations()); + let store = IndexedStore::new(&config.chunks)?; let store = Arc::new(Mutex::new(store)); let store = warp::any().map(move || Arc::clone(&store)); @@ -165,9 +163,7 @@ pub async fn fetch_chunk( let store = store.lock().await; let id: ChunkId = id.parse().unwrap(); match store.load(&id) { - Ok(loaded) => { - let meta = loaded.meta().clone(); - let data = loaded.data().clone(); + Ok((data, meta)) => { info!("found chunk {}: {:?}", id, meta); Ok(ChunkResult::Fetched(meta, data)) } @@ -191,9 +187,9 @@ pub async fn search_chunks( return Ok(ChunkResult::BadRequest); } if key == "generation" && value == "true" { - store.find_generations() + store.find_generations().expect("SQL lookup failed") } else if key == "sha256" { - store.find_by_sha256(value) + store.find_by_sha256(value).expect("SQL lookup failed") } else { error!("unknown search key {:?}", key); return Ok(ChunkResult::BadRequest); @@ -210,10 +206,10 @@ pub async fn search_chunks( info!("search found chunk {}", chunk_id); meta } - Err(_) => { + Err(err) => { error!( - "search found chunk {} in index, but but not on disk", - chunk_id + "search found chunk {} in index, but but not on disk: {}", + chunk_id, err ); return Ok(ChunkResult::InternalServerError); } diff --git a/src/client.rs b/src/client.rs index d075f43..4526830 100644 --- a/src/client.rs +++ b/src/client.rs @@ -67,7 +67,7 @@ impl BackupClient { e: &FilesystemEntry, size: usize, ) -> anyhow::Result> { - debug!("entry: {:?}", e); + info!("upload entry: {:?}", e); let ids = match e.kind() { FilesystemKind::Regular => self.read_file(e.pathbuf(), size)?, FilesystemKind::Directory => vec![], @@ -77,16 +77,18 @@ impl BackupClient { } pub fn upload_generation(&self, filename: &Path, size: usize) -> anyhow::Result { + info!("upload SQLite {}", filename.display()); let ids = self.read_file(filename.to_path_buf(), size)?; let gen = GenerationChunk::new(ids); let data = gen.to_data_chunk()?; let meta = ChunkMeta::new_generation(&sha256(data.data()), ¤t_timestamp()); - let gen_id = self.upload_gen_chunk(meta, gen)?; + let gen_id = self.upload_gen_chunk(meta.clone(), gen)?; + info!("uploaded generation {}, meta {:?}", gen_id, meta); Ok(gen_id) } fn read_file(&self, filename: PathBuf, size: usize) -> anyhow::Result> { - info!("uploading {}", filename.display()); + info!("upload file {}", filename.display()); let file = std::fs::File::open(filename)?; let chunker = Chunker::new(size, file); let chunk_ids = self.upload_new_file_chunks(chunker)?; @@ -128,6 +130,7 @@ impl BackupClient { } }; + info!("has_chunk result: {:?}", has); Ok(has) } @@ -146,6 +149,7 @@ impl BackupClient { } else { return Err(ClientError::NoCreatedChunkId.into()); }; + info!("uploaded_chunk {} meta {:?}", chunk_id, meta); Ok(chunk_id) } @@ -168,6 +172,7 @@ impl BackupClient { } else { return Err(ClientError::NoCreatedChunkId.into()); }; + info!("uploaded_generation chunk {}", chunk_id); Ok(chunk_id) } @@ -176,10 +181,12 @@ impl BackupClient { for item in chunker { let (meta, chunk) = item?; if let Some(chunk_id) = self.has_chunk(&meta)? { - chunk_ids.push(chunk_id); + chunk_ids.push(chunk_id.clone()); + info!("reusing existing chunk {}", chunk_id); } else { let chunk_id = self.upload_chunk(meta, chunk)?; - chunk_ids.push(chunk_id); + chunk_ids.push(chunk_id.clone()); + info!("created new chunk {}", chunk_id); } } @@ -193,7 +200,7 @@ impl BackupClient { let res = self.client.execute(req)?; debug!("list_generations: status={}", res.status()); let body = res.bytes()?; - debug!("list_generationgs: body={:?}", body); + debug!("list_generations: body={:?}", body); let map: HashMap = serde_yaml::from_slice(&body)?; debug!("list_generations: map={:?}", map); let finished = map @@ -204,28 +211,37 @@ impl BackupClient { } pub fn fetch_chunk(&self, chunk_id: &ChunkId) -> anyhow::Result { + info!("fetch chunk {}", chunk_id); + let url = format!("{}/{}", &self.chunks_url(), chunk_id); - trace!("fetch_chunk: url={:?}", url); let req = self.client.get(&url).build()?; let res = self.client.execute(req)?; - debug!("fetch_chunk: status={}", res.status()); if res.status() != 200 { - return Err(ClientError::ChunkNotFound(chunk_id.to_string()).into()); + let err = ClientError::ChunkNotFound(chunk_id.to_string()); + error!("fetching chunk {} failed: {}", chunk_id, err); + return Err(err.into()); } let headers = res.headers(); let meta = headers.get("chunk-meta"); if meta.is_none() { - return Err(ObnamError::NoChunkMeta(chunk_id.to_string()).into()); + let err = ObnamError::NoChunkMeta(chunk_id.to_string()); + error!("fetching chunk {} failed: {}", chunk_id, err); + return Err(err.into()); } let meta = meta.unwrap().to_str()?; + debug!("fetching chunk {}: meta={:?}", chunk_id, meta); let meta: ChunkMeta = serde_json::from_str(meta)?; + debug!("fetching chunk {}: meta={:?}", chunk_id, meta); let body = res.bytes()?; let body = body.to_vec(); let actual = sha256(&body); if actual != meta.sha256() { - return Err(ObnamError::WrongChecksum(chunk_id.to_string()).into()); + let id = chunk_id.to_string(); + let err = ObnamError::WrongChecksum(id, actual, meta.sha256().to_string()); + error!("fetching chunk {} failed: {}", chunk_id, err); + return Err(err.into()); } let chunk: DataChunk = DataChunk::new(body); diff --git a/src/cmd/restore.rs b/src/cmd/restore.rs index c882e21..d783a70 100644 --- a/src/cmd/restore.rs +++ b/src/cmd/restore.rs @@ -31,9 +31,10 @@ pub fn restore(config: &ClientConfig, gen_ref: &str, to: &Path) -> anyhow::Resul None => return Err(ObnamError::UnknownGeneration(gen_ref.to_string()).into()), Some(id) => id, }; + info!("generation id is {}", gen_id); let gen = client.fetch_generation(&gen_id, &dbname)?; - info!("restore file count: {}", gen.file_count()?); + info!("restoring {} files", gen.file_count()?); let progress = create_progress_bar(gen.file_count()?, true); for file in gen.files()? { restore_generation(&client, &gen, file.fileno(), file.entry(), &to, &progress)?; @@ -75,7 +76,7 @@ fn restore_generation( to: &Path, progress: &ProgressBar, ) -> anyhow::Result<()> { - debug!("restoring {:?}", entry); + info!("restoring {:?}", entry); progress.set_message(&format!("{}", entry.pathbuf().display())); progress.inc(1); diff --git a/src/error.rs b/src/error.rs index 360e62d..a35a99c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -13,6 +13,12 @@ pub enum ObnamError { #[error("Server response did not have a 'chunk-meta' header for chunk {0}")] NoChunkMeta(String), - #[error("Wrong checksum for chunk {0}")] - WrongChecksum(String), + #[error("Wrong checksum for chunk {0}, got {1}, expected {2}")] + WrongChecksum(String, String, String), + + #[error("Chunk is missing: {0}")] + MissingChunk(String), + + #[error("Chunk is in store too many times: {0}")] + DuplicateChunk(String), } diff --git a/src/index.rs b/src/index.rs index 0166b0f..fd38611 100644 --- a/src/index.rs +++ b/src/index.rs @@ -1,130 +1,243 @@ use crate::chunkid::ChunkId; +use crate::chunkmeta::ChunkMeta; +use rusqlite::Connection; use std::collections::HashMap; -use std::default::Default; +use std::path::{Path, PathBuf}; /// A chunk index. /// /// A chunk index lets the server quickly find chunks based on a /// string key/value pair, or whether they are generations. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct Index { + filename: PathBuf, + conn: Connection, map: HashMap<(String, String), Vec>, generations: Vec, + metas: HashMap, } impl Index { - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn len(&self) -> usize { - self.map.len() - } - - pub fn insert(&mut self, id: ChunkId, key: &str, value: &str) { - let kv = kv(key, value); - if let Some(v) = self.map.get_mut(&kv) { - v.push(id) + pub fn new>(dirname: P) -> anyhow::Result { + let filename = dirname.as_ref().join("meta.db"); + let conn = if filename.exists() { + sql::open_db(&filename)? } else { - self.map.insert(kv, vec![id]); - } + sql::create_db(&filename)? + }; + Ok(Self { + filename, + conn, + map: HashMap::new(), + generations: vec![], + metas: HashMap::new(), + }) } - pub fn remove(&mut self, key: &str, value: &str) { - let kv = kv(key, value); - self.map.remove(&kv); + pub fn insert_meta(&mut self, id: ChunkId, meta: ChunkMeta) -> anyhow::Result<()> { + let t = self.conn.transaction()?; + sql::insert(&t, &id, &meta)?; + t.commit()?; + Ok(()) } - pub fn find(&self, key: &str, value: &str) -> Vec { - let kv = kv(key, value); - if let Some(v) = self.map.get(&kv) { - v.clone() - } else { - vec![] - } + pub fn get_meta(&self, id: &ChunkId) -> anyhow::Result { + sql::lookup(&self.conn, id) } - pub fn insert_generation(&mut self, id: ChunkId) { - self.generations.push(id) + pub fn remove_meta(&mut self, id: &ChunkId) -> anyhow::Result<()> { + sql::remove(&self.conn, id) } - pub fn remove_generation(&mut self, id: &ChunkId) { - self.generations = self - .generations - .iter() - .cloned() - .filter(|x| x != id) - .collect(); + pub fn find_by_sha256(&self, sha256: &str) -> anyhow::Result> { + sql::find_by_256(&self.conn, sha256) } - pub fn find_generations(&self) -> Vec { - self.generations.clone() + pub fn find_generations(&self) -> anyhow::Result> { + sql::find_generations(&self.conn) } } -fn kv(key: &str, value: &str) -> (String, String) { - (key.to_string(), value.to_string()) -} - #[cfg(test)] mod test { - use super::{ChunkId, Index}; + use super::{ChunkId, ChunkMeta, Index}; + use std::path::Path; + use tempfile::tempdir; - #[test] - fn is_empty_initially() { - let idx = Index::default(); - assert!(idx.is_empty()); + fn new_index(dirname: &Path) -> Index { + Index::new(dirname).unwrap() } #[test] fn remembers_inserted() { let id: ChunkId = "id001".parse().unwrap(); - let mut idx = Index::default(); - idx.insert(id.clone(), "sha256", "abc"); - assert!(!idx.is_empty()); - assert_eq!(idx.len(), 1); - let ids: Vec = idx.find("sha256", "abc"); + let meta = ChunkMeta::new("abc"); + let dir = tempdir().unwrap(); + let mut idx = new_index(dir.path()); + idx.insert_meta(id.clone(), meta.clone()).unwrap(); + assert_eq!(idx.get_meta(&id).unwrap(), meta); + let ids = idx.find_by_sha256("abc").unwrap(); assert_eq!(ids, vec![id]); } #[test] fn does_not_find_uninserted() { let id: ChunkId = "id001".parse().unwrap(); - let mut idx = Index::default(); - idx.insert(id, "sha256", "abc"); - assert_eq!(idx.find("sha256", "def").len(), 0) + let meta = ChunkMeta::new("abc"); + let dir = tempdir().unwrap(); + let mut idx = new_index(dir.path()); + idx.insert_meta(id, meta).unwrap(); + assert_eq!(idx.find_by_sha256("def").unwrap().len(), 0) } #[test] fn removes_inserted() { let id: ChunkId = "id001".parse().unwrap(); - let mut idx = Index::default(); - idx.insert(id.clone(), "sha256", "abc"); - idx.remove("sha256", "abc"); - let ids: Vec = idx.find("sha256", "abc"); + let meta = ChunkMeta::new("abc"); + let dir = tempdir().unwrap(); + let mut idx = new_index(dir.path()); + idx.insert_meta(id.clone(), meta).unwrap(); + idx.remove_meta(&id).unwrap(); + let ids: Vec = idx.find_by_sha256("abc").unwrap(); assert_eq!(ids, vec![]); } #[test] fn has_no_generations_initially() { - let idx = Index::default(); - assert_eq!(idx.find_generations(), vec![]); + let dir = tempdir().unwrap(); + let idx = new_index(dir.path()); + assert_eq!(idx.find_generations().unwrap(), vec![]); } #[test] fn remembers_generation() { let id: ChunkId = "id001".parse().unwrap(); - let mut idx = Index::default(); - idx.insert_generation(id.clone()); - assert_eq!(idx.find_generations(), vec![id]); + let meta = ChunkMeta::new_generation("abc", "timestamp"); + let dir = tempdir().unwrap(); + let mut idx = new_index(dir.path()); + idx.insert_meta(id.clone(), meta.clone()).unwrap(); + assert_eq!(idx.find_generations().unwrap(), vec![id]); } #[test] - fn removes_generaion() { + fn removes_generation() { let id: ChunkId = "id001".parse().unwrap(); - let mut idx = Index::default(); - idx.insert_generation(id.clone()); - idx.remove_generation(&id); - assert_eq!(idx.find_generations(), vec![]); + let meta = ChunkMeta::new_generation("abc", "timestamp"); + let dir = tempdir().unwrap(); + let mut idx = new_index(dir.path()); + idx.insert_meta(id.clone(), meta.clone()).unwrap(); + idx.remove_meta(&id).unwrap(); + assert_eq!(idx.find_generations().unwrap(), vec![]); + } +} + +mod sql { + use crate::chunkid::ChunkId; + use crate::chunkmeta::ChunkMeta; + use crate::error::ObnamError; + use log::error; + use rusqlite::{params, Connection, OpenFlags, Row, Transaction}; + use std::path::Path; + + pub fn create_db(filename: &Path) -> anyhow::Result { + let flags = OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE; + let conn = Connection::open_with_flags(filename, flags)?; + conn.execute( + "CREATE TABLE chunks (id TEXT PRIMARY KEY, sha256 TEXT, generation INT, ended TEXT)", + params![], + )?; + conn.execute("CREATE INDEX sha256_idx ON chunks (sha256)", params![])?; + conn.execute( + "CREATE INDEX generation_idx ON chunks (generation)", + params![], + )?; + conn.pragma_update(None, "journal_mode", &"WAL")?; + Ok(conn) + } + + pub fn open_db(filename: &Path) -> anyhow::Result { + let flags = OpenFlags::SQLITE_OPEN_READ_WRITE; + let conn = Connection::open_with_flags(filename, flags)?; + conn.pragma_update(None, "journal_mode", &"WAL")?; + Ok(conn) + } + + pub fn insert(t: &Transaction, chunkid: &ChunkId, meta: &ChunkMeta) -> anyhow::Result<()> { + let chunkid = format!("{}", chunkid); + let sha256 = meta.sha256(); + let generation = if meta.is_generation() { 1 } else { 0 }; + let ended = meta.ended(); + t.execute( + "INSERT INTO chunks (id, sha256, generation, ended) VALUES (?1, ?2, ?3, ?4)", + params![chunkid, sha256, generation, ended], + )?; + Ok(()) + } + + pub fn remove(conn: &Connection, chunkid: &ChunkId) -> anyhow::Result<()> { + conn.execute("DELETE FROM chunks WHERE id IS ?1", params![chunkid])?; + Ok(()) + } + + pub fn lookup(conn: &Connection, id: &ChunkId) -> anyhow::Result { + let mut stmt = conn.prepare("SELECT * FROM chunks WHERE id IS ?1")?; + let iter = stmt.query_map(params![id], |row| row_to_meta(row))?; + let mut metas: Vec = vec![]; + for meta in iter { + let meta = meta?; + if metas.is_empty() { + eprintln!("lookup: meta={:?}", meta); + metas.push(meta); + } else { + let err = ObnamError::DuplicateChunk(id.to_string()); + error!("{}", err); + return Err(err.into()); + } + } + if metas.len() == 0 { + eprintln!("lookup: no hits"); + return Err(ObnamError::MissingChunk(format!("{}", id)).into()); + } + let r = metas[0].clone(); + Ok(r) + } + + pub fn find_by_256(conn: &Connection, sha256: &str) -> anyhow::Result> { + let mut stmt = conn.prepare("SELECT id FROM chunks WHERE sha256 IS ?1")?; + let iter = stmt.query_map(params![sha256], |row| row_to_id(row))?; + let mut ids = vec![]; + for x in iter { + let x = x?; + ids.push(x); + } + Ok(ids) + } + + pub fn find_generations(conn: &Connection) -> anyhow::Result> { + let mut stmt = conn.prepare("SELECT id FROM chunks WHERE generation IS 1")?; + let iter = stmt.query_map(params![], |row| row_to_id(row))?; + let mut ids = vec![]; + for x in iter { + let x = x?; + ids.push(x); + } + Ok(ids) + } + + pub fn row_to_meta(row: &Row) -> rusqlite::Result { + let sha256: String = row.get(row.column_index("sha256")?)?; + let generation: i32 = row.get(row.column_index("generation")?)?; + let meta = if generation == 0 { + ChunkMeta::new(&sha256) + } else { + let ended: String = row.get(row.column_index("ended")?)?; + ChunkMeta::new_generation(&sha256, &ended) + }; + Ok(meta) + } + + pub fn row_to_id(row: &Row) -> rusqlite::Result { + let id: String = row.get(row.column_index("id")?)?; + Ok(ChunkId::from_str(&id)) } } diff --git a/src/indexedstore.rs b/src/indexedstore.rs index 3f6235f..0366013 100644 --- a/src/indexedstore.rs +++ b/src/indexedstore.rs @@ -2,87 +2,55 @@ use crate::chunk::DataChunk; use crate::chunkid::ChunkId; use crate::chunkmeta::ChunkMeta; use crate::index::Index; -use crate::store::{LoadedChunk, Store}; -use std::path::{Path, PathBuf}; -use walkdir::WalkDir; +use crate::store::Store; +use std::path::Path; /// A store for chunks and their metadata. /// /// This combines Store and Index into one interface to make it easier /// to handle the server side storage of chunks. pub struct IndexedStore { - dirname: PathBuf, store: Store, index: Index, } impl IndexedStore { - pub fn new(dirname: &Path) -> Self { + pub fn new(dirname: &Path) -> anyhow::Result { let store = Store::new(dirname); - let index = Index::default(); - Self { - dirname: dirname.to_path_buf(), - store, - index, - } - } - - pub fn fill_index(&mut self) -> anyhow::Result<()> { - for entry in WalkDir::new(&self.dirname) { - let entry = entry?; - let path = entry.path(); - // println!("found entry: {:?} (ext: {:?})", path, path.extension()); - if let Some(ext) = path.extension() { - if ext == "meta" { - println!("found meta: {:?}", path); - let text = std::fs::read(path)?; - let meta: ChunkMeta = serde_json::from_slice(&text)?; - if let Some(stem) = path.file_stem() { - let id: ChunkId = stem.into(); - println!("id: {:?}", id); - self.insert_meta(&id, &meta); - } - } - } - println!(""); - } - Ok(()) + let index = Index::new(dirname)?; + Ok(Self { store, index }) } pub fn save(&mut self, meta: &ChunkMeta, chunk: &DataChunk) -> anyhow::Result { let id = ChunkId::new(); self.store.save(&id, meta, chunk)?; - self.insert_meta(&id, meta); + self.insert_meta(&id, meta)?; Ok(id) } - fn insert_meta(&mut self, id: &ChunkId, meta: &ChunkMeta) { - self.index.insert(id.clone(), "sha256", meta.sha256()); - if meta.is_generation() { - self.index.insert_generation(id.clone()); - } + fn insert_meta(&mut self, id: &ChunkId, meta: &ChunkMeta) -> anyhow::Result<()> { + self.index.insert_meta(id.clone(), meta.clone())?; + Ok(()) } - pub fn load(&self, id: &ChunkId) -> anyhow::Result { - self.store.load(id) + pub fn load(&self, id: &ChunkId) -> anyhow::Result<(DataChunk, ChunkMeta)> { + Ok((self.store.load(id)?, self.load_meta(id)?)) } pub fn load_meta(&self, id: &ChunkId) -> anyhow::Result { - self.store.load_meta(id) + self.index.get_meta(id) } - pub fn find_by_sha256(&self, sha256: &str) -> Vec { - self.index.find("sha256", sha256) + pub fn find_by_sha256(&self, sha256: &str) -> anyhow::Result> { + self.index.find_by_sha256(sha256) } - pub fn find_generations(&self) -> Vec { + pub fn find_generations(&self) -> anyhow::Result> { self.index.find_generations() } pub fn remove(&mut self, id: &ChunkId) -> anyhow::Result<()> { - let loaded = self.store.load(id)?; - self.index.remove("sha256", loaded.meta().sha256()); - self.index.remove_generation(id); + self.index.remove_meta(id).unwrap(); self.store.delete(id)?; Ok(()) } diff --git a/src/store.rs b/src/store.rs index 123b9fa..e6cc71f 100644 --- a/src/store.rs +++ b/src/store.rs @@ -53,20 +53,12 @@ impl Store { Ok(()) } - /// Load a chunk's metadata from a store. - pub fn load_meta(&self, id: &ChunkId) -> anyhow::Result { - let (_, metaname, _) = &self.filenames(id); - let meta = std::fs::read(&metaname)?; - Ok(serde_json::from_slice(&meta)?) - } - /// Load a chunk from a store. - pub fn load(&self, id: &ChunkId) -> anyhow::Result { + pub fn load(&self, id: &ChunkId) -> anyhow::Result { let (_, _, dataname) = &self.filenames(id); - let meta = self.load_meta(id)?; let data = std::fs::read(&dataname)?; let data = DataChunk::new(data); - Ok(LoadedChunk { meta, data }) + Ok(data) } /// Delete a chunk from a store. @@ -77,22 +69,3 @@ impl Store { Ok(()) } } - -pub struct LoadedChunk { - meta: ChunkMeta, - data: DataChunk, -} - -impl LoadedChunk { - pub fn new(meta: ChunkMeta, data: DataChunk) -> Self { - Self { meta, data } - } - - pub fn meta(&self) -> &ChunkMeta { - &self.meta - } - - pub fn data(&self) -> &DataChunk { - &self.data - } -} -- cgit v1.2.1