diff --git a/yama/src/chunking.rs b/yama/src/chunking.rs index 83daaf0..c643c0e 100644 --- a/yama/src/chunking.rs +++ b/yama/src/chunking.rs @@ -161,6 +161,8 @@ impl<'cst, CST: ChunkSubmissionTarget> Write for RecursiveChunker<'cst, CST> { #[inline] pub fn calculate_chunkid(chunk: &[u8]) -> ChunkId { + // TODO(newver) Allow pluggable chunkID calculations so that encrypted storage can work without + // leaking contents. let mut chunk_id: ChunkId = Default::default(); blake::hash(256, &chunk, &mut chunk_id).expect("BLAKE problem"); chunk_id diff --git a/yama/src/operations/storing.rs b/yama/src/operations/storing.rs index f9afa74..9a83137 100644 --- a/yama/src/operations/storing.rs +++ b/yama/src/operations/storing.rs @@ -265,7 +265,10 @@ pub fn store_fully( let (control_tx, control_rx) = crossbeam_channel::unbounded(); let pile2 = pile.clone(); let pipeline = pile.raw_pile.build_storage_pipeline(sps, control_tx)?; + + // TODO(newver) The existence checker stage should be able to be swapped between different implementations. let pipeline = existence_checker_stage(pile2, pipeline); + store( &root_dir, &mut root_node, @@ -309,6 +312,7 @@ pub fn store_fully( gid_lookup, }; + // TODO(newver) Allow the pointer to be returned separately. pile.write_pointer(&new_pointer_name, &pointer_data)?; pile.flush()?; Ok(()) diff --git a/yama/src/pile.rs b/yama/src/pile.rs index 544b667..4c04f46 100644 --- a/yama/src/pile.rs +++ b/yama/src/pile.rs @@ -87,6 +87,8 @@ pub fn existence_checker_stage( pile: Arc>, next_stage: Sender<(ChunkId, Vec)>, ) -> Sender<(ChunkId, Vec)> { + // TODO(newver) Do better than this. + let shared_seen_set: Arc>> = Default::default(); let (tx, rx) = crossbeam_channel::bounded::<(ChunkId, Vec)>(32); @@ -123,6 +125,8 @@ pub enum ControllerMessage { }, } +// TODO(newver) Make piles async + pub trait RawPile: Send + Sync + Debug + 'static { // TODO expose verification errors? fn exists(&self, kind: Keyspace, key: &[u8]) -> anyhow::Result;