Run cargo fix
continuous-integration/drone the build was successful Details

This commit is contained in:
Olivier 'reivilibre' 2022-01-11 07:33:26 +00:00
parent bedb9785dc
commit 8f3b211d83
6 changed files with 8 additions and 8 deletions

View File

@ -173,7 +173,7 @@ fn main() -> anyhow::Result<()> {
let now = Utc::now();
let (shard, stopper) = BareMetricsRecorderCore::new(File::create(format!(
let (shard, _stopper) = BareMetricsRecorderCore::new(File::create(format!(
"/tmp/datman_{}.baremetrics",
now.format("%F_%H%M%S")
))?)

View File

@ -91,7 +91,7 @@ pub struct ProgressSender {
}
impl ProgressSender {
pub fn send_now(&mut self, include_message: bool) {
pub fn send_now(&mut self, _include_message: bool) {
self.sender
.send((
RequestBody::Progress {

View File

@ -116,7 +116,7 @@ fn wrapped_main() -> anyhow::Result<i32> {
match &opts.command {
PileCommand::Retrieve {
pointer_name,
subset,
subset: _,
destination,
num_workers: workers,
} => {

View File

@ -114,8 +114,8 @@ impl<R: RawPile> RawPile for RawPileEncryptor<R> {
fn build_storage_pipeline(
&self,
settings: StoragePipelineSettings,
controller_send: Sender<ControllerMessage>,
_settings: StoragePipelineSettings,
_controller_send: Sender<ControllerMessage>,
) -> anyhow::Result<Sender<(ChunkId, Vec<u8>)>> {
todo!()
}

View File

@ -383,7 +383,7 @@ impl SqliteBloblogPile {
pointers_buffered: &mut Vec<(ChunkId, BloblogPointer)>,
) -> anyhow::Result<()> {
let mut inner = this.inner.lock().unwrap();
let mut txn = inner.connection.transaction()?;
let txn = inner.connection.transaction()?;
{
let mut stmt = txn.prepare(
"INSERT OR FAIL INTO chunks (chunk_id, bloblog, offset) VALUES (?1, ?2, ?3)",

View File

@ -349,8 +349,8 @@ impl RawPile for Requester {
fn build_storage_pipeline(
&self,
settings: StoragePipelineSettings,
controller_send: Sender<ControllerMessage>,
_settings: StoragePipelineSettings,
_controller_send: Sender<ControllerMessage>,
) -> anyhow::Result<Sender<(ChunkId, Vec<u8>)>> {
// this one is a little bit more complex.
// We want to be able to send off multiple write requests at once, but not too many, so we