Aggregate reports by month and reorder sections
This commit is contained in:
parent
3637b00f38
commit
01c98cb415
|
@ -114,6 +114,10 @@ pub enum DatmanCommand {
|
|||
Report {
|
||||
/// Name of the pile to report on.
|
||||
pile_name: String,
|
||||
|
||||
/// Don't summarise months.
|
||||
#[clap(long)]
|
||||
individual: bool,
|
||||
},
|
||||
|
||||
#[clap(name = "_backup_source_responder")]
|
||||
|
@ -313,13 +317,17 @@ fn main() -> anyhow::Result<()> {
|
|||
backup_source_responder::handler_stdio()?;
|
||||
}
|
||||
|
||||
DatmanCommand::Report { pile_name } => {
|
||||
DatmanCommand::Report {
|
||||
pile_name,
|
||||
individual,
|
||||
} => {
|
||||
let descriptor = load_descriptor(Path::new(".")).unwrap();
|
||||
let destination = &descriptor.piles[&pile_name];
|
||||
let report = datman::commands::report::generate_report(destination, &descriptor)?;
|
||||
let report =
|
||||
datman::commands::report::generate_report(destination, &descriptor, !individual)?;
|
||||
|
||||
datman::commands::report::print_report(&report)?;
|
||||
datman::commands::report::print_filesystem_space(&destination.path)?;
|
||||
datman::commands::report::print_report(&report)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -29,6 +29,7 @@ use yama::pile::{DebugStatistics, Pile, RawPile};
|
|||
pub struct Report {
|
||||
pub last_source_backups: BTreeMap<String, Option<DateTime<Utc>>>,
|
||||
|
||||
pub chunk_usages_aggregated: bool,
|
||||
pub chunk_usage: BTreeMap<String, Sizes>,
|
||||
|
||||
pub debug_stats: Option<DebugStatistics>,
|
||||
|
@ -64,6 +65,7 @@ fn condense_chunk_id(chunk_id: ChunkId) -> CondensedChunkId {
|
|||
pub fn generate_report(
|
||||
dest_pile_descriptor: &DestPileDescriptor,
|
||||
descriptor: &Descriptor,
|
||||
aggregate_chunk_usage_by_month: bool,
|
||||
) -> anyhow::Result<Report> {
|
||||
let pile_descriptor = load_pile_descriptor(&dest_pile_descriptor.path)?;
|
||||
let pile = open_pile(&dest_pile_descriptor.path, &pile_descriptor)?;
|
||||
|
@ -71,6 +73,7 @@ pub fn generate_report(
|
|||
let debug_stats = pile.raw_pile.debug_statistics()?;
|
||||
|
||||
let mut pointers_to_parent_and_chunkids = BTreeMap::new();
|
||||
let mut pointergroups_to_pointers: BTreeMap<String, Vec<String>> = BTreeMap::new();
|
||||
|
||||
info!("Collecting chunk IDs... This will probably be slow.");
|
||||
for pointer_name in pile.list_pointers()? {
|
||||
|
@ -79,6 +82,20 @@ pub fn generate_report(
|
|||
.context("listed pointer doesn't exist")?;
|
||||
let root_node = retrieve_tree_node(&pile, pointer.chunk_ref)?;
|
||||
let pointer_chunk_ids = collect_chunk_ids(&pile, &root_node.node)?;
|
||||
|
||||
let pointergroup = if aggregate_chunk_usage_by_month {
|
||||
let (base, date_time) =
|
||||
split_pointer_name(&pointer_name).context("Can't split pointer name")?;
|
||||
format!("{}+{}", base, date_time.format("%Y-%m"))
|
||||
} else {
|
||||
pointer_name.clone()
|
||||
};
|
||||
|
||||
pointergroups_to_pointers
|
||||
.entry(pointergroup)
|
||||
.or_default()
|
||||
.push(pointer_name.clone());
|
||||
|
||||
pointers_to_parent_and_chunkids
|
||||
.insert(pointer_name, (pointer.parent_pointer, pointer_chunk_ids));
|
||||
}
|
||||
|
@ -87,12 +104,18 @@ pub fn generate_report(
|
|||
// At the same time, we can also calculate 'rollup' sizes.
|
||||
let mut chunk_sharer_counts: BTreeMap<CondensedChunkId, u16> = BTreeMap::new();
|
||||
|
||||
let mut pointer_stats: BTreeMap<String, Sizes> = BTreeMap::new();
|
||||
let mut pointergroup_stats: BTreeMap<String, Sizes> = BTreeMap::new();
|
||||
|
||||
for (pointergroup_name, pointers_in_group) in pointergroups_to_pointers.iter().rev() {
|
||||
let mut deduped_chunks = BTreeSet::new();
|
||||
|
||||
for pointer_name in pointers_in_group {
|
||||
deduped_chunks.extend(iter_over_all_chunkids_incl_parents(
|
||||
&pointers_to_parent_and_chunkids,
|
||||
&pointer_name,
|
||||
))
|
||||
}
|
||||
|
||||
for pointer_name in pointers_to_parent_and_chunkids.keys().rev() {
|
||||
let deduped_chunks: BTreeSet<CondensedChunkId> =
|
||||
iter_over_all_chunkids_incl_parents(&pointers_to_parent_and_chunkids, &pointer_name)
|
||||
.collect();
|
||||
let mut rollup_count = 0;
|
||||
for chunk in deduped_chunks {
|
||||
let count = chunk_sharer_counts.entry(chunk).or_default();
|
||||
|
@ -101,15 +124,23 @@ pub fn generate_report(
|
|||
rollup_count += 1;
|
||||
}
|
||||
}
|
||||
let entry = pointer_stats.entry(pointer_name.to_owned()).or_default();
|
||||
let entry = pointergroup_stats
|
||||
.entry(pointergroup_name.to_owned())
|
||||
.or_default();
|
||||
entry.rollup = rollup_count;
|
||||
}
|
||||
|
||||
// Now go through again and update all the stats!
|
||||
for pointer_name in pointers_to_parent_and_chunkids.keys().rev() {
|
||||
let deduped_chunks: BTreeSet<CondensedChunkId> =
|
||||
iter_over_all_chunkids_incl_parents(&pointers_to_parent_and_chunkids, &pointer_name)
|
||||
.collect();
|
||||
for (pointergroup_name, pointers_in_group) in &pointergroups_to_pointers {
|
||||
let mut deduped_chunks = BTreeSet::new();
|
||||
|
||||
for pointer_name in pointers_in_group {
|
||||
deduped_chunks.extend(iter_over_all_chunkids_incl_parents(
|
||||
&pointers_to_parent_and_chunkids,
|
||||
&pointer_name,
|
||||
))
|
||||
}
|
||||
|
||||
let mut unique_count = 0;
|
||||
let mut shared_count_by_sharers = [0u32; 256];
|
||||
let total_count = deduped_chunks.len();
|
||||
|
@ -128,7 +159,9 @@ pub fn generate_report(
|
|||
sharers_sum += (count as f64) / (sharers_minus_one + 1) as f64;
|
||||
}
|
||||
|
||||
let entry = pointer_stats.entry(pointer_name.to_owned()).or_default();
|
||||
let entry = pointergroup_stats
|
||||
.entry(pointergroup_name.to_owned())
|
||||
.or_default();
|
||||
entry.moral = (sharers_sum.ceil() as u32) + unique_count;
|
||||
entry.unique = unique_count;
|
||||
entry.total = total_count as u32;
|
||||
|
@ -147,7 +180,8 @@ pub fn generate_report(
|
|||
|
||||
Ok(Report {
|
||||
last_source_backups: last_backed_up,
|
||||
chunk_usage: pointer_stats,
|
||||
chunk_usage: pointergroup_stats,
|
||||
chunk_usages_aggregated: aggregate_chunk_usage_by_month,
|
||||
debug_stats,
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue