zlog: Use zlog as default log implementation (#28612)

Still TODO:

- [x] Remove old log implementations
- [x] More cleanup
- [x] Verify atomic/lock logic
- [x] More tests
- [ ] ??? Ansi coloring when logging to stdout

Release Notes:

- N/A
This commit is contained in:
Ben Kunkle 2025-04-14 10:17:07 -04:00 committed by GitHub
parent 0eb0a3c7dc
commit 4a57664c7f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 872 additions and 706 deletions

3
Cargo.lock generated
View File

@ -18206,7 +18206,6 @@ dependencies = [
"settings", "settings",
"settings_ui", "settings_ui",
"shellexpand 2.1.2", "shellexpand 2.1.2",
"simplelog",
"smol", "smol",
"snippet_provider", "snippet_provider",
"snippets_ui", "snippets_ui",
@ -18555,7 +18554,9 @@ name = "zlog"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono",
"log", "log",
"tempfile",
"workspace-hack", "workspace-hack",
] ]

View File

@ -108,7 +108,6 @@ session.workspace = true
settings.workspace = true settings.workspace = true
settings_ui.workspace = true settings_ui.workspace = true
shellexpand.workspace = true shellexpand.workspace = true
simplelog.workspace = true
smol.workspace = true smol.workspace = true
snippet_provider.workspace = true snippet_provider.workspace = true
snippets_ui.workspace = true snippets_ui.workspace = true

View File

@ -1,122 +0,0 @@
use chrono::Offset;
use env_logger::Builder;
use log::LevelFilter;
use simplelog::ConfigBuilder;
use std::fs::{self, File, OpenOptions};
use std::io::{self, Write};
use time::UtcOffset;
pub fn init_logger() {
let level = LevelFilter::Info;
// Prevent log file from becoming too large.
const KIB: u64 = 1024;
const MIB: u64 = 1024 * KIB;
const MAX_LOG_BYTES: u64 = MIB;
if std::fs::metadata(paths::log_file()).map_or(false, |metadata| metadata.len() > MAX_LOG_BYTES)
{
let _ = std::fs::rename(paths::log_file(), paths::old_log_file());
}
match LogWriter::new(MAX_LOG_BYTES) {
Ok(writer) => {
let mut config_builder = ConfigBuilder::new();
config_builder.set_time_format_rfc3339();
let local_offset = chrono::Local::now().offset().fix().local_minus_utc();
if let Ok(offset) = UtcOffset::from_whole_seconds(local_offset) {
config_builder.set_time_offset(offset);
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
{
config_builder.add_filter_ignore_str("zbus");
config_builder.add_filter_ignore_str("blade_graphics::hal::resource");
config_builder.add_filter_ignore_str("naga::back::spv::writer");
}
let config = config_builder.build();
simplelog::WriteLogger::init(level, config, writer)
.expect("could not initialize logger");
}
Err(err) => {
init_stdout_logger();
log::error!(
"could not open log file, defaulting to stdout logging: {}",
err
);
}
}
}
pub fn init_stdout_logger() {
Builder::new()
.parse_default_env()
.format(|buf, record| {
use env_logger::fmt::style::{AnsiColor, Style};
let subtle = Style::new().fg_color(Some(AnsiColor::BrightBlack.into()));
write!(buf, "{subtle}[{subtle:#}")?;
write!(
buf,
"{} ",
chrono::Local::now().format("%Y-%m-%dT%H:%M:%S%:z")
)?;
let level_style = buf.default_level_style(record.level());
write!(buf, "{level_style}{:<5}{level_style:#}", record.level())?;
if let Some(path) = record.module_path() {
write!(buf, " {path}")?;
}
write!(buf, "{subtle}]{subtle:#}")?;
writeln!(buf, " {}", record.args())
})
.init();
}
struct LogWriter {
file: File,
max_size: u64,
current_size: u64,
}
impl LogWriter {
fn new(max_size: u64) -> io::Result<Self> {
let file = OpenOptions::new()
.create(true)
.append(true)
.open(paths::log_file())?;
let current_size = file.metadata()?.len();
Ok(LogWriter {
file,
max_size,
current_size,
})
}
fn replace(&mut self) -> io::Result<()> {
self.file.sync_all()?;
fs::rename(paths::log_file(), paths::old_log_file())?;
self.file = OpenOptions::new()
.create(true)
.append(true)
.open(paths::log_file())?;
self.current_size = 0;
Ok(())
}
}
impl Write for LogWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.current_size + buf.len() as u64 > self.max_size {
self.replace()?;
}
let bytes = self.file.write(buf)?;
self.current_size += bytes as u64;
Ok(bytes)
}
fn flush(&mut self) -> io::Result<()> {
self.file.flush()
}
}

View File

@ -1,7 +1,6 @@
// Disable command line from opening on release mode // Disable command line from opening on release mode
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
mod logger;
mod reliability; mod reliability;
mod zed; mod zed;
@ -28,7 +27,6 @@ use prompt_store::PromptBuilder;
use reqwest_client::ReqwestClient; use reqwest_client::ReqwestClient;
use assets::Assets; use assets::Assets;
use logger::{init_logger, init_stdout_logger};
use node_runtime::{NodeBinaryOptions, NodeRuntime}; use node_runtime::{NodeBinaryOptions, NodeRuntime};
use parking_lot::Mutex; use parking_lot::Mutex;
use project::project_settings::ProjectSettings; use project::project_settings::ProjectSettings;
@ -195,11 +193,15 @@ fn main() {
return; return;
} }
zlog::init_from_env(); zlog::init();
if stdout_is_a_pty() { if stdout_is_a_pty() {
init_stdout_logger(); zlog::init_output_stdout();
} else { } else {
init_logger(); let result = zlog::init_output_file(paths::log_file(), Some(paths::old_log_file()));
if let Err(err) = result {
eprintln!("Could not open log file: {}... Defaulting to stdout", err);
zlog::init_output_stdout();
};
} }
log::info!("========== starting zed =========="); log::info!("========== starting zed ==========");

View File

@ -15,6 +15,10 @@ path = "src/zlog.rs"
default = [] default = []
[dependencies] [dependencies]
chrono.workspace = true
log.workspace = true log.workspace = true
workspace-hack.workspace = true workspace-hack.workspace = true
anyhow.workspace = true anyhow.workspace = true
[dev-dependencies]
tempfile.workspace = true

568
crates/zlog/src/filter.rs Normal file
View File

@ -0,0 +1,568 @@
use std::{
collections::{HashMap, VecDeque},
hash::{DefaultHasher, Hasher},
sync::{
OnceLock, RwLock,
atomic::{AtomicU8, Ordering},
},
usize,
};
use crate::{SCOPE_DEPTH_MAX, SCOPE_STRING_SEP_STR, Scope, ScopeAlloc, env_config};
use log;
static ENV_FILTER: OnceLock<env_config::EnvFilter> = OnceLock::new();
static SCOPE_MAP: RwLock<Option<GlobalScopeMap>> = RwLock::new(None);
struct GlobalScopeMap {
map: ScopeMap,
hash: u64,
}
const LEVEL_ENABLED_MAX_DEFAULT: log::LevelFilter = log::LevelFilter::Info;
/// The maximum log level of verbosity that is enabled by default.
/// All messages more verbose than this level will be discarded
/// by default unless specially configured.
///
/// This is used instead of the `log::max_level` as we need to tell the `log`
/// crate that the max level is everything, so that we can dynamically enable
/// logs that are more verbose than this level without the `log` crate throwing
/// them away before we see them
static mut LEVEL_ENABLED_MAX_STATIC: log::LevelFilter = LEVEL_ENABLED_MAX_DEFAULT;
/// A cache of the true maximum log level that _could_ be printed. This is based
/// on the maximally verbose level that is configured by the user, and is used
/// to filter out logs more verbose than any configured level.
///
/// E.g. if `LEVEL_ENABLED_MAX_STATIC `is 'info' but a user has configured some
/// scope to print at a `debug` level, then this will be `debug`, and all
/// `trace` logs will be discarded.
/// Therefore, it should always be `>= LEVEL_ENABLED_MAX_STATIC`
// PERF: this doesn't need to be an atomic, we don't actually care about race conditions here
static LEVEL_ENABLED_MAX_CONFIG: AtomicU8 = AtomicU8::new(LEVEL_ENABLED_MAX_DEFAULT as u8);
pub fn init_env_filter(filter: env_config::EnvFilter) {
if let Some(level_max) = filter.level_global {
unsafe { LEVEL_ENABLED_MAX_STATIC = level_max }
}
if ENV_FILTER.set(filter).is_err() {
panic!("Environment filter cannot be initialized twice");
}
}
pub fn is_possibly_enabled_level(level: log::Level) -> bool {
return LEVEL_ENABLED_MAX_CONFIG.load(Ordering::Relaxed) <= level as u8;
}
pub fn is_scope_enabled(scope: &Scope, level: log::Level) -> bool {
if level <= unsafe { LEVEL_ENABLED_MAX_STATIC } {
// [FAST PATH]
// if the message is at or below the minimum printed log level
// (where error < warn < info etc) then always enable
return true;
}
if !is_possibly_enabled_level(level) {
// [FAST PATH PT. 2]
// if the message is above the maximum enabled log level
// (where error < warn < info etc) then disable without checking
// scope map
return false;
}
let global_scope_map = SCOPE_MAP.read().unwrap_or_else(|err| {
SCOPE_MAP.clear_poison();
return err.into_inner();
});
let Some(GlobalScopeMap { map, .. }) = global_scope_map.as_ref() else {
// on failure, return false because it's not <= LEVEL_ENABLED_MAX_STATIC
return false;
};
if map.is_empty() {
// if no scopes are enabled, return false because it's not <= LEVEL_ENABLED_MAX_STATIC
return false;
}
let enabled_status = map.is_enabled(&scope, level);
return match enabled_status {
// if it isn't configured, then it it's disabled because it's not <= LEVEL_ENABLED_MAX_STATIC
EnabledStatus::NotConfigured => false,
EnabledStatus::Enabled => true,
EnabledStatus::Disabled => false,
};
}
fn hash_scope_map_settings(map: &HashMap<String, String>) -> u64 {
let mut hasher = DefaultHasher::new();
let mut items = map.iter().collect::<Vec<_>>();
items.sort();
for (key, value) in items {
Hasher::write(&mut hasher, key.as_bytes());
Hasher::write(&mut hasher, value.as_bytes());
}
return hasher.finish();
}
pub(crate) fn refresh() {
refresh_from_settings(&HashMap::default());
}
pub fn refresh_from_settings(settings: &HashMap<String, String>) {
let hash_old = {
SCOPE_MAP
.read()
.unwrap_or_else(|err| {
SCOPE_MAP.clear_poison();
err.into_inner()
})
.as_ref()
.map(|scope_map| scope_map.hash)
};
let hash_new = hash_scope_map_settings(settings);
if hash_old == Some(hash_new) {
return;
}
let env_config = ENV_FILTER.get();
let map_new = ScopeMap::new_from_settings_and_env(settings, env_config);
let mut level_enabled_max = unsafe { LEVEL_ENABLED_MAX_STATIC };
for entry in &map_new.entries {
if let Some(level) = entry.enabled {
level_enabled_max = level_enabled_max.max(level.to_level_filter());
}
}
LEVEL_ENABLED_MAX_CONFIG.store(level_enabled_max as u8, Ordering::Release);
let mut global_map = SCOPE_MAP.write().unwrap_or_else(|err| {
SCOPE_MAP.clear_poison();
err.into_inner()
});
global_map.replace(GlobalScopeMap {
map: map_new,
hash: hash_new,
});
}
fn level_from_level_str(level_str: &String) -> Option<log::Level> {
let level = match level_str.to_ascii_lowercase().as_str() {
"" => log::Level::Trace,
"trace" => log::Level::Trace,
"debug" => log::Level::Debug,
"info" => log::Level::Info,
"warn" => log::Level::Warn,
"error" => log::Level::Error,
"off" | "disable" | "no" | "none" | "disabled" => {
crate::warn!(
"Invalid log level \"{level_str}\", set to error to disable non-error logging. Defaulting to error"
);
log::Level::Error
}
_ => {
crate::warn!("Invalid log level \"{level_str}\", ignoring");
return None;
}
};
return Some(level);
}
fn scope_alloc_from_scope_str(scope_str: &String) -> Option<ScopeAlloc> {
let mut scope_buf = [""; SCOPE_DEPTH_MAX];
let mut index = 0;
let mut scope_iter = scope_str.split(SCOPE_STRING_SEP_STR);
while index < SCOPE_DEPTH_MAX {
let Some(scope) = scope_iter.next() else {
break;
};
if scope == "" {
continue;
}
scope_buf[index] = scope;
index += 1;
}
if index == 0 {
return None;
}
if let Some(_) = scope_iter.next() {
crate::warn!(
"Invalid scope key, too many nested scopes: '{scope_str}'. Max depth is {SCOPE_DEPTH_MAX}",
);
return None;
}
let scope = scope_buf.map(|s| s.to_string());
return Some(scope);
}
pub struct ScopeMap {
entries: Vec<ScopeMapEntry>,
root_count: usize,
}
pub struct ScopeMapEntry {
scope: String,
enabled: Option<log::Level>,
descendants: std::ops::Range<usize>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum EnabledStatus {
Enabled,
Disabled,
NotConfigured,
}
impl ScopeMap {
pub fn new_from_settings_and_env(
items_input_map: &HashMap<String, String>,
env_config: Option<&env_config::EnvFilter>,
) -> Self {
let mut items = Vec::with_capacity(
items_input_map.len() + env_config.map_or(0, |c| c.directive_names.len()),
);
if let Some(env_filter) = env_config {
// TODO: parse on load instead of every reload
items.extend(
env_filter
.directive_names
.iter()
.zip(env_filter.directive_levels.iter())
.filter_map(|(scope, level_filter)| {
if items_input_map.get(scope).is_some() {
return None;
}
let scope = scope_alloc_from_scope_str(scope)?;
// TODO: use level filters instead of scopes in scope map
let level = level_filter.to_level()?;
Some((scope, level))
}),
);
}
items.extend(
items_input_map
.into_iter()
.filter_map(|(scope_str, level_str)| {
let scope = scope_alloc_from_scope_str(&scope_str)?;
let level = level_from_level_str(&level_str)?;
return Some((scope, level));
}),
);
items.sort_by(|a, b| a.0.cmp(&b.0));
let mut this = Self {
entries: Vec::with_capacity(items.len() * SCOPE_DEPTH_MAX),
root_count: 0,
};
let items_count = items.len();
struct ProcessQueueEntry {
parent_index: usize,
depth: usize,
items_range: std::ops::Range<usize>,
}
let mut process_queue = VecDeque::new();
process_queue.push_back(ProcessQueueEntry {
parent_index: usize::MAX,
depth: 0,
items_range: 0..items_count,
});
let empty_range = 0..0;
while let Some(process_entry) = process_queue.pop_front() {
let ProcessQueueEntry {
items_range,
depth,
parent_index,
} = process_entry;
let mut cursor = items_range.start;
let res_entries_start = this.entries.len();
while cursor < items_range.end {
let sub_items_start = cursor;
cursor += 1;
let scope_name = &items[sub_items_start].0[depth];
while cursor < items_range.end && &items[cursor].0[depth] == scope_name {
cursor += 1;
}
let sub_items_end = cursor;
if scope_name == "" {
assert_eq!(sub_items_start + 1, sub_items_end);
assert_ne!(depth, 0);
assert_ne!(parent_index, usize::MAX);
assert!(this.entries[parent_index].enabled.is_none());
this.entries[parent_index].enabled = Some(items[sub_items_start].1);
continue;
}
let is_valid_scope = scope_name != "";
let is_last = depth + 1 == SCOPE_DEPTH_MAX || !is_valid_scope;
let mut enabled = None;
if is_last {
assert_eq!(
sub_items_start + 1,
sub_items_end,
"Expected one item: got: {:?}",
&items[items_range.clone()]
);
enabled = Some(items[sub_items_start].1);
} else {
let entry_index = this.entries.len();
process_queue.push_back(ProcessQueueEntry {
items_range: sub_items_start..sub_items_end,
parent_index: entry_index,
depth: depth + 1,
});
}
this.entries.push(ScopeMapEntry {
scope: scope_name.to_owned(),
enabled,
descendants: empty_range.clone(),
});
}
let res_entries_end = this.entries.len();
if parent_index != usize::MAX {
this.entries[parent_index].descendants = res_entries_start..res_entries_end;
} else {
this.root_count = res_entries_end;
}
}
return this;
}
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
pub fn is_enabled<S>(&self, scope: &[S; SCOPE_DEPTH_MAX], level: log::Level) -> EnabledStatus
where
S: AsRef<str>,
{
let mut enabled = None;
let mut cur_range = &self.entries[0..self.root_count];
let mut depth = 0;
'search: while !cur_range.is_empty()
&& depth < SCOPE_DEPTH_MAX
&& scope[depth].as_ref() != ""
{
for entry in cur_range {
if entry.scope == scope[depth].as_ref() {
// note:
enabled = entry.enabled.or(enabled);
cur_range = &self.entries[entry.descendants.clone()];
depth += 1;
continue 'search;
}
}
break 'search;
}
return enabled.map_or(EnabledStatus::NotConfigured, |level_enabled| {
if level <= level_enabled {
EnabledStatus::Enabled
} else {
EnabledStatus::Disabled
}
});
}
}
#[cfg(test)]
mod tests {
use crate::private::scope_new;
use super::*;
fn scope_map_from_keys(kv: &[(&str, &str)]) -> ScopeMap {
let hash_map: HashMap<String, String> = kv
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
ScopeMap::new_from_settings_and_env(&hash_map, None)
}
#[test]
fn test_initialization() {
let map = scope_map_from_keys(&[("a.b.c.d", "trace")]);
assert_eq!(map.root_count, 1);
assert_eq!(map.entries.len(), 4);
let map = scope_map_from_keys(&[]);
assert_eq!(map.root_count, 0);
assert_eq!(map.entries.len(), 0);
let map = scope_map_from_keys(&[("", "trace")]);
assert_eq!(map.root_count, 0);
assert_eq!(map.entries.len(), 0);
let map = scope_map_from_keys(&[("foo..bar", "trace")]);
assert_eq!(map.root_count, 1);
assert_eq!(map.entries.len(), 2);
let map = scope_map_from_keys(&[
("a.b.c.d", "trace"),
("e.f.g.h", "debug"),
("i.j.k.l", "info"),
("m.n.o.p", "warn"),
("q.r.s.t", "error"),
]);
assert_eq!(map.root_count, 5);
assert_eq!(map.entries.len(), 20);
assert_eq!(map.entries[0].scope, "a");
assert_eq!(map.entries[1].scope, "e");
assert_eq!(map.entries[2].scope, "i");
assert_eq!(map.entries[3].scope, "m");
assert_eq!(map.entries[4].scope, "q");
}
fn scope_from_scope_str(scope_str: &'static str) -> Scope {
let mut scope_buf = [""; SCOPE_DEPTH_MAX];
let mut index = 0;
let mut scope_iter = scope_str.split(SCOPE_STRING_SEP_STR);
while index < SCOPE_DEPTH_MAX {
let Some(scope) = scope_iter.next() else {
break;
};
if scope == "" {
continue;
}
scope_buf[index] = scope;
index += 1;
}
assert_ne!(index, 0);
assert!(scope_iter.next().is_none());
return scope_buf;
}
#[test]
fn test_is_enabled() {
let map = scope_map_from_keys(&[
("a.b.c.d", "trace"),
("e.f.g.h", "debug"),
("i.j.k.l", "info"),
("m.n.o.p", "warn"),
("q.r.s.t", "error"),
]);
use log::Level;
assert_eq!(
map.is_enabled(&scope_from_scope_str("a.b.c.d"), Level::Trace),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("a.b.c.d"), Level::Debug),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("e.f.g.h"), Level::Debug),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("e.f.g.h"), Level::Info),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("e.f.g.h"), Level::Trace),
EnabledStatus::Disabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("i.j.k.l"), Level::Info),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("i.j.k.l"), Level::Warn),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("i.j.k.l"), Level::Debug),
EnabledStatus::Disabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("m.n.o.p"), Level::Warn),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("m.n.o.p"), Level::Error),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("m.n.o.p"), Level::Info),
EnabledStatus::Disabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("q.r.s.t"), Level::Error),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("q.r.s.t"), Level::Warn),
EnabledStatus::Disabled
);
}
fn scope_map_from_keys_and_env(kv: &[(&str, &str)], env: &env_config::EnvFilter) -> ScopeMap {
let hash_map: HashMap<String, String> = kv
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
ScopeMap::new_from_settings_and_env(&hash_map, Some(env))
}
#[test]
fn test_initialization_with_env() {
let env_filter = env_config::parse("a.b=debug,u=error").unwrap();
let map = scope_map_from_keys_and_env(&[], &env_filter);
assert_eq!(map.root_count, 2);
assert_eq!(map.entries.len(), 3);
assert_eq!(
map.is_enabled(&scope_new(&["a"]), log::Level::Debug),
EnabledStatus::NotConfigured
);
assert_eq!(
map.is_enabled(&scope_new(&["a", "b"]), log::Level::Debug),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_new(&["a", "b", "c"]), log::Level::Trace),
EnabledStatus::Disabled
);
let env_filter = env_config::parse("a.b=debug,e.f.g.h=trace,u=error").unwrap();
let map = scope_map_from_keys_and_env(
&[
("a.b.c.d", "trace"),
("e.f.g.h", "debug"),
("i.j.k.l", "info"),
("m.n.o.p", "warn"),
("q.r.s.t", "error"),
],
&env_filter,
);
assert_eq!(map.root_count, 6);
assert_eq!(map.entries.len(), 21);
assert_eq!(map.entries[0].scope, "a");
assert_eq!(map.entries[1].scope, "e");
assert_eq!(map.entries[2].scope, "i");
assert_eq!(map.entries[3].scope, "m");
assert_eq!(map.entries[4].scope, "q");
assert_eq!(map.entries[5].scope, "u");
assert_eq!(
map.is_enabled(&scope_new(&["a", "b", "c", "d"]), log::Level::Trace),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_new(&["a", "b", "c"]), log::Level::Trace),
EnabledStatus::Disabled
);
assert_eq!(
map.is_enabled(&scope_new(&["u", "v"]), log::Level::Warn),
EnabledStatus::Disabled
);
// settings override env
assert_eq!(
map.is_enabled(&scope_new(&["e", "f", "g", "h"]), log::Level::Trace),
EnabledStatus::Disabled,
);
}
}

233
crates/zlog/src/sink.rs Normal file
View File

@ -0,0 +1,233 @@
use std::{
fs,
io::{self, Write},
path::PathBuf,
sync::{
Mutex, OnceLock,
atomic::{AtomicU64, Ordering},
},
};
use crate::{SCOPE_STRING_SEP_CHAR, Scope};
/// Whether stdout output is enabled.
static mut ENABLED_SINKS_STDOUT: bool = false;
/// Is Some(file) if file output is enabled.
static ENABLED_SINKS_FILE: Mutex<Option<std::fs::File>> = Mutex::new(None);
static SINK_FILE_PATH: OnceLock<&'static PathBuf> = OnceLock::new();
static SINK_FILE_PATH_ROTATE: OnceLock<&'static PathBuf> = OnceLock::new();
/// Atomic counter for the size of the log file in bytes.
// TODO: make non-atomic if writing single threaded
static SINK_FILE_SIZE_BYTES: AtomicU64 = AtomicU64::new(0);
/// Maximum size of the log file before it will be rotated, in bytes.
const SINK_FILE_SIZE_BYTES_MAX: u64 = 1024 * 1024; // 1 MB
pub fn init_output_stdout() {
unsafe {
ENABLED_SINKS_STDOUT = true;
}
}
pub fn init_output_file(
path: &'static PathBuf,
path_rotate: Option<&'static PathBuf>,
) -> io::Result<()> {
let mut file = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path)?;
SINK_FILE_PATH
.set(path)
.expect("Init file output should only be called once");
if let Some(path_rotate) = path_rotate {
SINK_FILE_PATH_ROTATE
.set(path_rotate)
.expect("Init file output should only be called once");
}
let mut enabled_sinks_file = ENABLED_SINKS_FILE
.try_lock()
.expect("Log file lock is available during init");
let size_bytes = file.metadata().map_or(0, |metadata| metadata.len());
if size_bytes >= SINK_FILE_SIZE_BYTES_MAX {
rotate_log_file(&mut file, Some(path), path_rotate, &SINK_FILE_SIZE_BYTES);
} else {
SINK_FILE_SIZE_BYTES.store(size_bytes, Ordering::Relaxed);
}
*enabled_sinks_file = Some(file);
Ok(())
}
const LEVEL_OUTPUT_STRINGS: [&str; 6] = [
" ", // nop: ERROR = 1
"ERROR", //
"WARN ", //
"INFO ", //
"DEBUG", //
"TRACE", //
];
pub fn submit(record: Record) {
if unsafe { ENABLED_SINKS_STDOUT } {
let mut stdout = std::io::stdout().lock();
_ = writeln!(
&mut stdout,
"{} {} [{}] {}",
chrono::Local::now().format("%Y-%m-%dT%H:%M:%S%:z"),
LEVEL_OUTPUT_STRINGS[record.level as usize],
ScopeFmt(record.scope),
record.message
);
}
let mut file = ENABLED_SINKS_FILE.lock().unwrap_or_else(|handle| {
ENABLED_SINKS_FILE.clear_poison();
handle.into_inner()
});
if let Some(file) = file.as_mut() {
struct SizedWriter<'a> {
file: &'a mut std::fs::File,
written: u64,
}
impl io::Write for SizedWriter<'_> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.file.write(buf)?;
self.written += buf.len() as u64;
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
self.file.flush()
}
}
let file_size_bytes = {
let mut writer = SizedWriter { file, written: 0 };
_ = writeln!(
&mut writer,
"{} {} [{}] {}",
chrono::Local::now().format("%Y-%m-%dT%H:%M:%S%:z"),
LEVEL_OUTPUT_STRINGS[record.level as usize],
ScopeFmt(record.scope),
record.message
);
SINK_FILE_SIZE_BYTES.fetch_add(writer.written, Ordering::Relaxed) + writer.written
};
if file_size_bytes > SINK_FILE_SIZE_BYTES_MAX {
rotate_log_file(
file,
SINK_FILE_PATH.get(),
SINK_FILE_PATH_ROTATE.get(),
&SINK_FILE_SIZE_BYTES,
);
}
}
}
pub fn flush() {
_ = std::io::stdout().lock().flush();
}
struct ScopeFmt(Scope);
impl std::fmt::Display for ScopeFmt {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use std::fmt::Write;
f.write_str(self.0[0])?;
for scope in &self.0[1..] {
if !scope.is_empty() {
f.write_char(SCOPE_STRING_SEP_CHAR)?;
}
f.write_str(scope)?;
}
Ok(())
}
}
pub struct Record<'a> {
pub scope: Scope,
pub level: log::Level,
pub message: &'a std::fmt::Arguments<'a>,
}
fn rotate_log_file<PathRef>(
file: &mut fs::File,
path: Option<PathRef>,
path_rotate: Option<PathRef>,
atomic_size: &AtomicU64,
) where
PathRef: AsRef<std::path::Path>,
{
if let Err(err) = file.flush() {
eprintln!(
"Failed to flush log file before rotating, some logs may be lost: {}",
err
);
}
let rotation_error = match (path, path_rotate) {
(Some(_), None) => Some(anyhow::anyhow!("No rotation log file path configured")),
(None, _) => Some(anyhow::anyhow!("No log file path configured")),
(Some(path), Some(path_rotate)) => fs::copy(path, path_rotate)
.err()
.map(|err| anyhow::anyhow!(err)),
};
if let Some(err) = rotation_error {
eprintln!(
"Log file rotation failed. Truncating log file anyways: {}",
err,
);
}
_ = file.set_len(0);
// SAFETY: It is safe to set size to 0 even if set_len fails as
// according to the documentation, it only fails if:
// - the file is not writeable: should never happen,
// - the size would cause an overflow (implementation specific): 0 should never cause an overflow
atomic_size.store(0, Ordering::Relaxed);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_rotate_log_file() {
let temp_dir = tempfile::tempdir().unwrap();
let log_file_path = temp_dir.path().join("log.txt");
let rotation_log_file_path = temp_dir.path().join("log_rotated.txt");
let mut file = fs::File::create(&log_file_path).unwrap();
let contents = String::from("Hello, world!");
file.write_all(contents.as_bytes()).unwrap();
let size = AtomicU64::new(contents.len() as u64);
rotate_log_file(
&mut file,
Some(&log_file_path),
Some(&rotation_log_file_path),
&size,
);
assert!(log_file_path.exists());
assert_eq!(log_file_path.metadata().unwrap().len(), 0);
assert!(rotation_log_file_path.exists());
assert_eq!(
std::fs::read_to_string(&rotation_log_file_path).unwrap(),
contents,
);
assert_eq!(size.load(Ordering::Relaxed), 0);
}
#[test]
fn test_log_level_names() {
assert_eq!(LEVEL_OUTPUT_STRINGS[log::Level::Error as usize], "ERROR");
assert_eq!(LEVEL_OUTPUT_STRINGS[log::Level::Warn as usize], "WARN ");
assert_eq!(LEVEL_OUTPUT_STRINGS[log::Level::Info as usize], "INFO ");
assert_eq!(LEVEL_OUTPUT_STRINGS[log::Level::Debug as usize], "DEBUG");
assert_eq!(LEVEL_OUTPUT_STRINGS[log::Level::Trace as usize], "TRACE");
}
}

View File

@ -2,18 +2,27 @@
pub use log as log_impl; pub use log as log_impl;
mod env_config; mod env_config;
pub mod filter;
pub mod sink;
pub use sink::{init_output_file, init_output_stdout};
pub const SCOPE_DEPTH_MAX: usize = 4; pub const SCOPE_DEPTH_MAX: usize = 4;
pub fn init_from_env() { pub fn init() {
process_env();
log::set_logger(&ZLOG).expect("Logger should not be initialized twice");
log::set_max_level(log::LevelFilter::max());
}
pub fn process_env() {
let Ok(env_config) = std::env::var("ZED_LOG").or_else(|_| std::env::var("RUST_LOG")) else { let Ok(env_config) = std::env::var("ZED_LOG").or_else(|_| std::env::var("RUST_LOG")) else {
return; return;
}; };
match env_config::parse(&env_config) { match env_config::parse(&env_config) {
Ok(filter) => { Ok(filter) => {
scope_map::init_env_filter(filter); filter::init_env_filter(filter);
scope_map::refresh(); filter::refresh();
// TODO: set max level once removing `env_logger` and `simple_log` crates
} }
Err(err) => { Err(err) => {
eprintln!("Failed to parse log filter: {}", err); eprintln!("Failed to parse log filter: {}", err);
@ -21,25 +30,43 @@ pub fn init_from_env() {
} }
} }
/// because we are currently just wrapping the `log` crate in `zlog`, static ZLOG: Zlog = Zlog {};
/// we need to work around the fact that the `log` crate only provides a
/// single global level filter. In order to have more precise control until pub struct Zlog {}
/// we no longer wrap `log`, we bump up the priority of log level so that it
/// will be logged, even if the actual level is lower impl log::Log for Zlog {
/// This is fine for now, as we use a `info` level filter by default in releases, fn enabled(&self, metadata: &log::Metadata) -> bool {
/// which hopefully won't result in confusion like `warn` or `error` levels might. filter::is_possibly_enabled_level(metadata.level())
pub fn min_printed_log_level(level: log_impl::Level) -> log_impl::Level { }
// this logic is defined based on the logic used in the `log` crate,
// which checks that a logs level is <= both of these values, fn log(&self, record: &log::Record) {
// so we take the minimum of the two values to ensure that check passes if !self.enabled(record.metadata()) {
let level_min_static = log_impl::STATIC_MAX_LEVEL; return;
let level_min_dynamic = log_impl::max_level(); }
if level <= level_min_static && level <= level_min_dynamic { let scope = match record.module_path_static() {
return level; Some(module_path) => {
// TODO: better module name -> scope translation
let crate_name = private::extract_crate_name_from_module_path(module_path);
private::scope_new(&[crate_name])
}
// TODO: when do we hit this
None => private::scope_new(&["*unknown*"]),
};
let level = record.metadata().level();
if !filter::is_scope_enabled(&scope, level) {
return;
}
sink::submit(sink::Record {
scope,
level,
message: record.args(),
});
}
fn flush(&self) {
// todo: necessary?
sink::flush();
} }
return log_impl::LevelFilter::min(level_min_static, level_min_dynamic)
.to_level()
.unwrap_or(level);
} }
#[macro_export] #[macro_export]
@ -47,9 +74,13 @@ macro_rules! log {
($logger:expr, $level:expr, $($arg:tt)+) => { ($logger:expr, $level:expr, $($arg:tt)+) => {
let level = $level; let level = $level;
let logger = $logger; let logger = $logger;
let (enabled, level) = $crate::scope_map::is_scope_enabled(&logger.scope, level); let enabled = $crate::filter::is_scope_enabled(&logger.scope, level);
if enabled { if enabled {
$crate::log_impl::log!(level, "[{}]: {}", &logger.fmt_scope(), format!($($arg)+)); $crate::sink::submit($crate::sink::Record {
scope: logger.scope,
level,
message: &format_args!($($arg)+),
});
} }
} }
} }
@ -205,27 +236,14 @@ pub mod private {
pub type Scope = [&'static str; SCOPE_DEPTH_MAX]; pub type Scope = [&'static str; SCOPE_DEPTH_MAX];
pub type ScopeAlloc = [String; SCOPE_DEPTH_MAX]; pub type ScopeAlloc = [String; SCOPE_DEPTH_MAX];
const SCOPE_STRING_SEP: &'static str = "."; const SCOPE_STRING_SEP_STR: &'static str = ".";
const SCOPE_STRING_SEP_CHAR: char = '.';
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Logger { pub struct Logger {
pub scope: Scope, pub scope: Scope,
} }
impl Logger {
pub fn fmt_scope(&self) -> String {
let mut last = 0;
for s in self.scope {
if s.is_empty() {
break;
}
last += 1;
}
return self.scope[0..last].join(SCOPE_STRING_SEP);
}
}
pub struct Timer { pub struct Timer {
pub logger: Logger, pub logger: Logger,
pub start_time: std::time::Instant, pub start_time: std::time::Instant,
@ -288,543 +306,6 @@ impl Timer {
} }
} }
pub mod scope_map {
use std::{
collections::{HashMap, VecDeque},
hash::{DefaultHasher, Hasher},
sync::{
OnceLock, RwLock,
atomic::{AtomicU64, Ordering},
},
usize,
};
use super::*;
static ENV_FILTER: OnceLock<env_config::EnvFilter> = OnceLock::new();
static SCOPE_MAP: RwLock<Option<ScopeMap>> = RwLock::new(None);
static SCOPE_MAP_HASH: AtomicU64 = AtomicU64::new(0);
pub fn init_env_filter(filter: env_config::EnvFilter) {
if ENV_FILTER.set(filter).is_err() {
panic!("Environment filter cannot be initialized twice");
}
}
pub fn is_scope_enabled(scope: &Scope, level: log_impl::Level) -> (bool, log_impl::Level) {
let level_min = min_printed_log_level(level);
if level <= level_min {
// [FAST PATH]
// if the message is at or below the minimum printed log level
// (where error < warn < info etc) then always enable
return (true, level);
}
let Ok(map) = SCOPE_MAP.read() else {
// on failure, default to enabled detection done by `log` crate
return (true, level);
};
let Some(map) = map.as_ref() else {
// on failure, default to enabled detection done by `log` crate
return (true, level);
};
if map.is_empty() {
// if no scopes are enabled, default to enabled detection done by `log` crate
return (true, level);
}
let enabled_status = map.is_enabled(&scope, level);
match enabled_status {
EnabledStatus::NotConfigured => {
// if this scope isn't configured, default to enabled detection done by `log` crate
return (true, level);
}
EnabledStatus::Enabled => {
// if this scope is enabled, enable logging
// note: bumping level to min level that will be printed
// to work around log crate limitations
return (true, level_min);
}
EnabledStatus::Disabled => {
// if the configured level is lower than the requested level, disable logging
// note: err = 0, warn = 1, etc.
return (false, level);
}
}
}
fn hash_scope_map_settings(map: &HashMap<String, String>) -> u64 {
let mut hasher = DefaultHasher::new();
let mut items = map.iter().collect::<Vec<_>>();
items.sort();
for (key, value) in items {
Hasher::write(&mut hasher, key.as_bytes());
Hasher::write(&mut hasher, value.as_bytes());
}
return hasher.finish();
}
pub(crate) fn refresh() {
refresh_from_settings(&HashMap::default());
}
pub fn refresh_from_settings(settings: &HashMap<String, String>) {
let hash_old = SCOPE_MAP_HASH.load(Ordering::Acquire);
let hash_new = hash_scope_map_settings(settings);
if hash_old == hash_new && hash_old != 0 {
return;
}
let env_config = ENV_FILTER.get();
let map_new = ScopeMap::new_from_settings_and_env(settings, env_config);
if let Ok(_) = SCOPE_MAP_HASH.compare_exchange(
hash_old,
hash_new,
Ordering::Release,
Ordering::Relaxed,
) {
let mut map = SCOPE_MAP.write().unwrap_or_else(|err| {
SCOPE_MAP.clear_poison();
err.into_inner()
});
*map = Some(map_new);
}
}
fn level_from_level_str(level_str: &String) -> Option<log_impl::Level> {
let level = match level_str.to_ascii_lowercase().as_str() {
"" => log_impl::Level::Trace,
"trace" => log_impl::Level::Trace,
"debug" => log_impl::Level::Debug,
"info" => log_impl::Level::Info,
"warn" => log_impl::Level::Warn,
"error" => log_impl::Level::Error,
"off" | "disable" | "no" | "none" | "disabled" => {
crate::warn!(
"Invalid log level \"{level_str}\", set to error to disable non-error logging. Defaulting to error"
);
log_impl::Level::Error
}
_ => {
crate::warn!("Invalid log level \"{level_str}\", ignoring");
return None;
}
};
return Some(level);
}
fn scope_alloc_from_scope_str(scope_str: &String) -> Option<ScopeAlloc> {
let mut scope_buf = [""; SCOPE_DEPTH_MAX];
let mut index = 0;
let mut scope_iter = scope_str.split(SCOPE_STRING_SEP);
while index < SCOPE_DEPTH_MAX {
let Some(scope) = scope_iter.next() else {
break;
};
if scope == "" {
continue;
}
scope_buf[index] = scope;
index += 1;
}
if index == 0 {
return None;
}
if let Some(_) = scope_iter.next() {
crate::warn!(
"Invalid scope key, too many nested scopes: '{scope_str}'. Max depth is {SCOPE_DEPTH_MAX}",
);
return None;
}
let scope = scope_buf.map(|s| s.to_string());
return Some(scope);
}
pub struct ScopeMap {
entries: Vec<ScopeMapEntry>,
root_count: usize,
}
pub struct ScopeMapEntry {
scope: String,
enabled: Option<log_impl::Level>,
descendants: std::ops::Range<usize>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum EnabledStatus {
Enabled,
Disabled,
NotConfigured,
}
impl ScopeMap {
pub fn new_from_settings_and_env(
items_input_map: &HashMap<String, String>,
env_config: Option<&env_config::EnvFilter>,
) -> Self {
let mut items = Vec::with_capacity(
items_input_map.len() + env_config.map_or(0, |c| c.directive_names.len()),
);
if let Some(env_filter) = env_config {
// TODO: parse on load instead of every reload
items.extend(
env_filter
.directive_names
.iter()
.zip(env_filter.directive_levels.iter())
.filter_map(|(scope, level_filter)| {
if items_input_map.get(scope).is_some() {
return None;
}
let scope = scope_alloc_from_scope_str(scope)?;
// TODO: use level filters instead of scopes in scope map
let level = level_filter.to_level()?;
Some((scope, level))
}),
);
}
items.extend(
items_input_map
.into_iter()
.filter_map(|(scope_str, level_str)| {
let scope = scope_alloc_from_scope_str(&scope_str)?;
let level = level_from_level_str(&level_str)?;
return Some((scope, level));
}),
);
items.sort_by(|a, b| a.0.cmp(&b.0));
let mut this = Self {
entries: Vec::with_capacity(items.len() * SCOPE_DEPTH_MAX),
root_count: 0,
};
let items_count = items.len();
struct ProcessQueueEntry {
parent_index: usize,
depth: usize,
items_range: std::ops::Range<usize>,
}
let mut process_queue = VecDeque::new();
process_queue.push_back(ProcessQueueEntry {
parent_index: usize::MAX,
depth: 0,
items_range: 0..items_count,
});
let empty_range = 0..0;
while let Some(process_entry) = process_queue.pop_front() {
let ProcessQueueEntry {
items_range,
depth,
parent_index,
} = process_entry;
let mut cursor = items_range.start;
let res_entries_start = this.entries.len();
while cursor < items_range.end {
let sub_items_start = cursor;
cursor += 1;
let scope_name = &items[sub_items_start].0[depth];
while cursor < items_range.end && &items[cursor].0[depth] == scope_name {
cursor += 1;
}
let sub_items_end = cursor;
if scope_name == "" {
assert_eq!(sub_items_start + 1, sub_items_end);
assert_ne!(depth, 0);
assert_ne!(parent_index, usize::MAX);
assert!(this.entries[parent_index].enabled.is_none());
this.entries[parent_index].enabled = Some(items[sub_items_start].1);
continue;
}
let is_valid_scope = scope_name != "";
let is_last = depth + 1 == SCOPE_DEPTH_MAX || !is_valid_scope;
let mut enabled = None;
if is_last {
assert_eq!(
sub_items_start + 1,
sub_items_end,
"Expected one item: got: {:?}",
&items[items_range.clone()]
);
enabled = Some(items[sub_items_start].1);
} else {
let entry_index = this.entries.len();
process_queue.push_back(ProcessQueueEntry {
items_range: sub_items_start..sub_items_end,
parent_index: entry_index,
depth: depth + 1,
});
}
this.entries.push(ScopeMapEntry {
scope: scope_name.to_owned(),
enabled,
descendants: empty_range.clone(),
});
}
let res_entries_end = this.entries.len();
if parent_index != usize::MAX {
this.entries[parent_index].descendants = res_entries_start..res_entries_end;
} else {
this.root_count = res_entries_end;
}
}
return this;
}
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
pub fn is_enabled<S>(
&self,
scope: &[S; SCOPE_DEPTH_MAX],
level: log_impl::Level,
) -> EnabledStatus
where
S: AsRef<str>,
{
let mut enabled = None;
let mut cur_range = &self.entries[0..self.root_count];
let mut depth = 0;
'search: while !cur_range.is_empty()
&& depth < SCOPE_DEPTH_MAX
&& scope[depth].as_ref() != ""
{
for entry in cur_range {
if entry.scope == scope[depth].as_ref() {
// note:
enabled = entry.enabled.or(enabled);
cur_range = &self.entries[entry.descendants.clone()];
depth += 1;
continue 'search;
}
}
break 'search;
}
return enabled.map_or(EnabledStatus::NotConfigured, |level_enabled| {
if level <= level_enabled {
EnabledStatus::Enabled
} else {
EnabledStatus::Disabled
}
});
}
}
#[cfg(test)]
mod tests {
use crate::private::scope_new;
use super::*;
fn scope_map_from_keys(kv: &[(&str, &str)]) -> ScopeMap {
let hash_map: HashMap<String, String> = kv
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
ScopeMap::new_from_settings_and_env(&hash_map, None)
}
#[test]
fn test_initialization() {
let map = scope_map_from_keys(&[("a.b.c.d", "trace")]);
assert_eq!(map.root_count, 1);
assert_eq!(map.entries.len(), 4);
let map = scope_map_from_keys(&[]);
assert_eq!(map.root_count, 0);
assert_eq!(map.entries.len(), 0);
let map = scope_map_from_keys(&[("", "trace")]);
assert_eq!(map.root_count, 0);
assert_eq!(map.entries.len(), 0);
let map = scope_map_from_keys(&[("foo..bar", "trace")]);
assert_eq!(map.root_count, 1);
assert_eq!(map.entries.len(), 2);
let map = scope_map_from_keys(&[
("a.b.c.d", "trace"),
("e.f.g.h", "debug"),
("i.j.k.l", "info"),
("m.n.o.p", "warn"),
("q.r.s.t", "error"),
]);
assert_eq!(map.root_count, 5);
assert_eq!(map.entries.len(), 20);
assert_eq!(map.entries[0].scope, "a");
assert_eq!(map.entries[1].scope, "e");
assert_eq!(map.entries[2].scope, "i");
assert_eq!(map.entries[3].scope, "m");
assert_eq!(map.entries[4].scope, "q");
}
fn scope_from_scope_str(scope_str: &'static str) -> Scope {
let mut scope_buf = [""; SCOPE_DEPTH_MAX];
let mut index = 0;
let mut scope_iter = scope_str.split(SCOPE_STRING_SEP);
while index < SCOPE_DEPTH_MAX {
let Some(scope) = scope_iter.next() else {
break;
};
if scope == "" {
continue;
}
scope_buf[index] = scope;
index += 1;
}
assert_ne!(index, 0);
assert!(scope_iter.next().is_none());
return scope_buf;
}
#[test]
fn test_is_enabled() {
let map = scope_map_from_keys(&[
("a.b.c.d", "trace"),
("e.f.g.h", "debug"),
("i.j.k.l", "info"),
("m.n.o.p", "warn"),
("q.r.s.t", "error"),
]);
use log_impl::Level;
assert_eq!(
map.is_enabled(&scope_from_scope_str("a.b.c.d"), Level::Trace),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("a.b.c.d"), Level::Debug),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("e.f.g.h"), Level::Debug),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("e.f.g.h"), Level::Info),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("e.f.g.h"), Level::Trace),
EnabledStatus::Disabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("i.j.k.l"), Level::Info),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("i.j.k.l"), Level::Warn),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("i.j.k.l"), Level::Debug),
EnabledStatus::Disabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("m.n.o.p"), Level::Warn),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("m.n.o.p"), Level::Error),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("m.n.o.p"), Level::Info),
EnabledStatus::Disabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("q.r.s.t"), Level::Error),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_from_scope_str("q.r.s.t"), Level::Warn),
EnabledStatus::Disabled
);
}
fn scope_map_from_keys_and_env(
kv: &[(&str, &str)],
env: &env_config::EnvFilter,
) -> ScopeMap {
let hash_map: HashMap<String, String> = kv
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
ScopeMap::new_from_settings_and_env(&hash_map, Some(env))
}
#[test]
fn test_initialization_with_env() {
let env_filter = env_config::parse("a.b=debug,u=error").unwrap();
let map = scope_map_from_keys_and_env(&[], &env_filter);
assert_eq!(map.root_count, 2);
assert_eq!(map.entries.len(), 3);
assert_eq!(
map.is_enabled(&scope_new(&["a"]), log_impl::Level::Debug),
EnabledStatus::NotConfigured
);
assert_eq!(
map.is_enabled(&scope_new(&["a", "b"]), log_impl::Level::Debug),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_new(&["a", "b", "c"]), log_impl::Level::Trace),
EnabledStatus::Disabled
);
let env_filter = env_config::parse("a.b=debug,e.f.g.h=trace,u=error").unwrap();
let map = scope_map_from_keys_and_env(
&[
("a.b.c.d", "trace"),
("e.f.g.h", "debug"),
("i.j.k.l", "info"),
("m.n.o.p", "warn"),
("q.r.s.t", "error"),
],
&env_filter,
);
assert_eq!(map.root_count, 6);
assert_eq!(map.entries.len(), 21);
assert_eq!(map.entries[0].scope, "a");
assert_eq!(map.entries[1].scope, "e");
assert_eq!(map.entries[2].scope, "i");
assert_eq!(map.entries[3].scope, "m");
assert_eq!(map.entries[4].scope, "q");
assert_eq!(map.entries[5].scope, "u");
assert_eq!(
map.is_enabled(&scope_new(&["a", "b", "c", "d"]), log_impl::Level::Trace),
EnabledStatus::Enabled
);
assert_eq!(
map.is_enabled(&scope_new(&["a", "b", "c"]), log_impl::Level::Trace),
EnabledStatus::Disabled
);
assert_eq!(
map.is_enabled(&scope_new(&["u", "v"]), log_impl::Level::Warn),
EnabledStatus::Disabled
);
// settings override env
assert_eq!(
map.is_enabled(&scope_new(&["e", "f", "g", "h"]), log_impl::Level::Trace),
EnabledStatus::Disabled,
);
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -10,7 +10,7 @@ pub fn init(cx: &mut App) {
cx.observe_global::<SettingsStore>(|cx| { cx.observe_global::<SettingsStore>(|cx| {
let zlog_settings = ZlogSettings::get_global(cx); let zlog_settings = ZlogSettings::get_global(cx);
zlog::scope_map::refresh_from_settings(&zlog_settings.scopes); zlog::filter::refresh_from_settings(&zlog_settings.scopes);
}) })
.detach(); .detach();
} }