Handle SQLite constraint violations
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
Fixes #43, #44
This commit is contained in:
parent
4b2afc261c
commit
8bd66dcf3e
@ -5,7 +5,8 @@ use clap::{crate_authors, crate_description, crate_version, Clap};
|
||||
|
||||
use env_logger::Env;
|
||||
use yama::commands::{fully_integrate_pointer_node, load_pile_descriptor, open_pile};
|
||||
use yama::operations::extracting;
|
||||
use yama::operations::checking::VacuumMode;
|
||||
use yama::operations::{checking, extracting};
|
||||
use yama::pile::{Pile, PileDescriptor, RawPile};
|
||||
use yama::{commands, debug};
|
||||
|
||||
@ -43,7 +44,10 @@ enum PileCommand {
|
||||
/// Check this yama pile for corruption.
|
||||
Check {
|
||||
#[clap(long)]
|
||||
gc: bool,
|
||||
apply_gc: bool,
|
||||
|
||||
#[clap(long)]
|
||||
dry_run_gc: bool,
|
||||
},
|
||||
/// Enter a debug prompt for manually operating on the yama pile.
|
||||
Debug,
|
||||
@ -124,8 +128,22 @@ fn main() -> anyhow::Result<()> {
|
||||
// todo >2 workers
|
||||
extracting::extract(destination, &mut root_tree_node.node, &pile, true, 2)?;
|
||||
}
|
||||
PileCommand::Check { gc: _ } => {
|
||||
unimplemented!()
|
||||
PileCommand::Check {
|
||||
apply_gc,
|
||||
dry_run_gc,
|
||||
} => {
|
||||
let vacuum_mode = if *dry_run_gc {
|
||||
VacuumMode::DryRunVacuum
|
||||
} else if *apply_gc {
|
||||
VacuumMode::Vacuum
|
||||
} else {
|
||||
VacuumMode::NoVacuum
|
||||
};
|
||||
let (pdesc, pile) = open_pile()?;
|
||||
let error_count = checking::check_deep(pile, vacuum_mode, true)?;
|
||||
if error_count > 0 {
|
||||
eprintln!("THERE ARE {} ERRORS.", error_count);
|
||||
}
|
||||
}
|
||||
PileCommand::Init {} => {
|
||||
commands::init(".".as_ref())?;
|
||||
|
@ -9,13 +9,15 @@ use std::sync::{Arc, Condvar, Mutex};
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
|
||||
use log::warn;
|
||||
use nix::unistd::sync;
|
||||
use rusqlite::params;
|
||||
use rusqlite::{params, Error};
|
||||
use rusqlite::{Connection, OptionalExtension};
|
||||
|
||||
use crate::definitions::ChunkId;
|
||||
use crate::pile::{Keyspace, RawPile};
|
||||
use crate::utils::bytes_to_hexstring;
|
||||
use rusqlite::ffi::ErrorCode::ConstraintViolation;
|
||||
|
||||
/// Bloblogs will not be reused if they are already 2 GiB large.
|
||||
pub const MAX_BLOBLOG_REUSE_SIZE: u64 = 2 * 1024 * 1024 * 1024;
|
||||
@ -257,11 +259,28 @@ impl SqliteBloblogPile {
|
||||
fn put_chunk_pointer(&self, chunk_id: &ChunkId, pointer: BloblogPointer) -> anyhow::Result<()> {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
let offset_i64 = i64::try_from(pointer.offset).expect("ouch! can't turn u64 into i64...");
|
||||
inner.connection.execute(
|
||||
match inner.connection.execute(
|
||||
"INSERT INTO chunks (chunk_id, bloblog, offset) VALUES (?1, ?2, ?3)",
|
||||
params![&chunk_id[..], pointer.bloblog, offset_i64],
|
||||
)?;
|
||||
Ok(())
|
||||
) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(Error::SqliteFailure(e, str)) => {
|
||||
if e.code == ConstraintViolation {
|
||||
warn!(
|
||||
"(ignoring) SQLite constraint violation on insertion... {:?}",
|
||||
str
|
||||
);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::SqliteFailure(e, str))?;
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
other => {
|
||||
other?;
|
||||
unreachable!();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user