Add yama compact command

This commit is contained in:
Olivier 'reivilibre' 2022-11-19 15:49:09 +00:00
parent cf502b7f7e
commit b5e9e55cad
3 changed files with 113 additions and 1 deletions

View File

@ -29,7 +29,8 @@ use yama::operations::checking::VacuumMode;
use yama::operations::legacy_pushpull::{ use yama::operations::legacy_pushpull::{
determine_bypass_level, open_pile_with_work_bypass, push_to, determine_bypass_level, open_pile_with_work_bypass, push_to,
}; };
use yama::operations::{checking, extracting}; use yama::operations::{checking, cleanup, extracting};
use yama::pile::local_sqlitebloblogs::CompactionThresholds;
use yama::pile::{Pile, PileDescriptor, RawPile}; use yama::pile::{Pile, PileDescriptor, RawPile};
use yama::{commands, debug}; use yama::{commands, debug};
@ -83,6 +84,29 @@ enum PileCommand {
shallow: bool, shallow: bool,
}, },
Compact {
/// Don't actually perform any compaction; just plan it out.
#[clap(long)]
dry_run: bool,
/// Allocated size under which a bloblog is considered small.
#[clap(long = "small")]
small_thresh: Option<u64>,
/// Minimum amount of space to reclaim in order to run compaction for reclaim.
#[clap(long = "reclaim")]
min_reclaim: Option<u64>,
/// Maximum amount of space that can be deallocated in a bloblog before we consider it
/// worthwhile to replace.
#[clap(long = "max-dealloc")]
max_deallocated: Option<u64>,
/// Minimum number of mergeable small bloblogs in order to run compaction for merge.
#[clap(long)]
mergeable: Option<u32>,
},
/// Enter a debug prompt for manually operating on the yama pile. /// Enter a debug prompt for manually operating on the yama pile.
Debug { supplied_command: Vec<String> }, Debug { supplied_command: Vec<String> },
@ -191,6 +215,29 @@ fn wrapped_main() -> anyhow::Result<i32> {
return Ok(1); return Ok(1);
} }
} }
PileCommand::Compact {
dry_run,
small_thresh,
min_reclaim,
max_deallocated,
mergeable,
} => {
let this_dir = Path::new(".");
let descriptor =
load_pile_descriptor(this_dir).context("Failed to load pile descriptor")?;
cleanup::compact(
this_dir,
&descriptor,
!*dry_run,
true,
CompactionThresholds {
minimum_to_reclaim: min_reclaim.unwrap_or(2 * 1024 * 1024 * 1024),
minimum_small_bloblogs_to_merge: mergeable.unwrap_or(64),
cond_if_more_deallocated_than: max_deallocated.unwrap_or(256 * 1024 * 1024),
cond_if_less_allocated_than: small_thresh.unwrap_or(64 * 1024 * 1024),
},
)?;
}
PileCommand::Init {} => { PileCommand::Init {} => {
commands::init(".".as_ref())?; commands::init(".".as_ref())?;
} }

View File

@ -1,4 +1,5 @@
pub mod checking; pub mod checking;
pub mod cleanup;
pub mod extracting; pub mod extracting;
pub mod legacy_pushpull; pub mod legacy_pushpull;
pub mod storing; pub mod storing;

View File

@ -0,0 +1,64 @@
use crate::pile::local_sqlitebloblogs::{CompactionThresholds, SqliteBloblogPile};
use crate::pile::{PileDescriptor, PileStorage};
use anyhow::{bail, Context};
use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use log::info;
use std::path::Path;
pub fn compact(
pile_path: &Path,
pile_desc: &PileDescriptor,
actually_run: bool,
make_progress_bar: bool,
thresholds: CompactionThresholds,
) -> anyhow::Result<()> {
let pbar = if make_progress_bar {
ProgressBar::with_draw_target(1000 as u64, ProgressDrawTarget::stdout_with_hz(10))
} else {
ProgressBar::hidden()
};
pbar.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}]/[{eta}] {bar:40.cyan/blue} {pos:>7}/{len:7} {msg}"),
);
pbar.set_message("compacting");
match pile_desc.storage {
PileStorage::SqliteIndexedBloblog => {
let bloblog_pile = SqliteBloblogPile::open(&pile_path)
.context("Failed to open SQLite-indexed Bloblog Pile")?;
compact_bloblogs(bloblog_pile, pbar, actually_run, thresholds)?;
Ok(())
}
other @ PileStorage::RemoteOnly => {
bail!("Cannot use compaction on this kind of pile: {other:?}!");
}
}
}
fn compact_bloblogs(
bloblog_pile: SqliteBloblogPile,
pbar: ProgressBar,
actually_run: bool,
thresholds: CompactionThresholds,
) -> anyhow::Result<()> {
info!("=== Analysing for compaction ===");
let analysis = bloblog_pile.analyse_for_compaction()?;
let chunks_total: u64 = analysis.values().map(|bs| bs.chunks_total).sum();
let chunks_deleted: u64 = analysis.values().map(|bs| bs.chunks_deleted).sum();
let bytes_total: u64 = analysis.values().map(|bs| bs.bytes_total).sum();
let bytes_deleted: u64 = analysis.values().map(|bs| bs.bytes_deleted).sum();
info!("{} bloblogs in this pile, with {chunks_total} chunks ({bytes_total} B) of which {chunks_deleted} ({bytes_deleted} B) are deleted.", analysis.len());
info!("=== Planning compaction ===");
let plan = bloblog_pile.plan_compaction(&thresholds, analysis)?;
info!("Planned compaction: replace {} bloblogs (of which {} are small), freeing up {} B and rewriting {} B", plan.bloblogs_to_replace.len(), plan.small_bloblogs, plan.reclaimable_space, plan.bytes_to_write);
if actually_run {
info!("=== Compacting ===");
bloblog_pile.perform_compaction(Box::new(pbar), plan)?;
}
Ok(())
}