diff --git a/contributors.txt b/contributors.txt index 927b19a0..8631af4c 100644 --- a/contributors.txt +++ b/contributors.txt @@ -1,11 +1,11 @@ Hayabusa was possible thanks to the following people (in alphabetical order): Akira Nishikawa (@nishikawaakira): Previous lead developer, core hayabusa rule support, etc... -DustInDark(@hitenkoku): Core developer, project management, sigma count implementation, rule creation, countless feature additions and fixes, etc… +DustInDark(@hitenkoku): Core developer, project management, sigma count implementation, rule creation, countless feature additions and fixes, etc… Garigariganzy (@garigariganzy31): Developer, event ID statistics implementation, etc... ItiB (@itiB_S144) : Core developer, sigmac hayabusa backend, rule creation, etc... James Takai / hachiyone(@hach1yon): Current lead developer, tokio multi-threading, sigma aggregation logic, sigmac backend, rule creation, etc… -Kazuminn (@k2warugaki): Developer +Kazuminn (@k47_um1n): Developer Yusuke Matsui (@apt773): AD hacking working group leader, rule testing, documentation, research, support, etc... Zach Mathis (@yamatosecurity, Yamato Security Founder): Project leader, tool and concept design, rule creation and tuning, etc… @@ -14,9 +14,9 @@ Hayabusa would not have been possible without first creating RustyBlue, so we wo Zach Mathis (@yamatosecurity, Yamato Security Founder): Project Leader Nishikawa Akira (@nishikawaakira): Lead Developer -kazuminn (@k2warugaki): Core Developer +kazuminn (@k47_um1n): Core Developer itiB (@itiB_S144): Core Developer -James Takai / hachiyone (@hach1yon): Developer +James Takai / hachiyone (@hach1yon): Core Developer DustInDark (@hitenkoku): Core Developer garigariganzy (@garigariganzy31): Developer 7itoh (@yNitocrypto22): Developer diff --git a/src/afterfact.rs b/src/afterfact.rs index 5cb9fcf0..753c8633 100644 --- a/src/afterfact.rs +++ b/src/afterfact.rs @@ -42,6 +42,7 @@ pub fn after_fact() { .ok(); process::exit(1); }; + let mut displayflag = false; let mut target: Box = if let Some(csv_path) = configs::CONFIG.read().unwrap().args.value_of("output") { @@ -77,8 +78,13 @@ fn emit_csv(writer: &mut W, displayflag: bool) -> io::Result< } else { wtr = csv::WriterBuilder::new().from_writer(writer); } + let messages = print::MESSAGES.lock().unwrap(); - let mut detect_count = 0; + // levelの区分が"Critical","High","Medium","Low","Informational","Undefined"の6つであるため + let mut total_detect_counts_by_level: Vec = vec![0; 6]; + let mut unique_detect_counts_by_level: Vec = vec![0; 6]; + let mut detected_rule_files: Vec = Vec::new(); + for (time, detect_infos) in messages.iter() { for detect_info in detect_infos { if displayflag { @@ -103,17 +109,61 @@ fn emit_csv(writer: &mut W, displayflag: bool) -> io::Result< details: &detect_info.detail, })?; } + let level_suffix = *configs::LEVELMAP + .get(&detect_info.level.to_uppercase()) + .unwrap_or(&0) as usize; + if !detected_rule_files.contains(&detect_info.rulepath) { + detected_rule_files.push(detect_info.rulepath.clone()); + unique_detect_counts_by_level[level_suffix] += 1; + } + total_detect_counts_by_level[level_suffix] += 1; } - detect_count += detect_infos.len(); } println!(""); wtr.flush()?; println!(""); - println!("Total events: {:?}", detect_count); + _print_unique_results( + total_detect_counts_by_level, + "Total".to_string(), + "detections".to_string(), + ); + _print_unique_results( + unique_detect_counts_by_level, + "Unique".to_string(), + "rules".to_string(), + ); Ok(()) } +/// 与えられたユニークな検知数と全体の検知数の情報(レベル別と総計)を元に結果文を標準出力に表示する関数 +fn _print_unique_results(mut counts_by_level: Vec, head_word: String, tail_word: String) { + let levels = Vec::from([ + "Critical", + "High", + "Medium", + "Low", + "Informational", + "Undefined", + ]); + + // configsの登録順番と表示をさせたいlevelの順番が逆であるため + counts_by_level.reverse(); + + // 全体の集計(levelの記載がないためformatの第二引数は空の文字列) + println!( + "{} {}:{}", + head_word, + tail_word, + counts_by_level.iter().sum::() + ); + for (i, level_name) in levels.iter().enumerate() { + println!( + "{} {} {}:{}", + head_word, level_name, tail_word, counts_by_level[i] + ); + } +} fn format_time(time: &DateTime) -> String { if configs::CONFIG.read().unwrap().args.is_present("utc") { format_rfc(time) diff --git a/src/detections/detection.rs b/src/detections/detection.rs index 103297d9..d63a73e8 100644 --- a/src/detections/detection.rs +++ b/src/detections/detection.rs @@ -1,6 +1,5 @@ extern crate csv; -use crate::detections::configs; use crate::detections::print::AlertMessage; use crate::detections::print::MESSAGES; use crate::detections::rule; @@ -132,7 +131,7 @@ impl Detection { return self; } - pub fn add_aggcondtion_msg(&self) { + pub fn add_aggcondition_msg(&self) { for rule in &self.rules { if !rule.has_agg_condition() { continue; @@ -145,46 +144,11 @@ impl Detection { } } - pub fn print_unique_results(&self) { - let rules = &self.rules; - let levellabel = Vec::from([ - "Critical", - "High", - "Medium", - "Low", - "Informational", - "Undefined", - ]); - // levclcounts is [(Undefined), (Informational), (Low),(Medium),(High),(Critical)] - let mut levelcounts = Vec::from([0, 0, 0, 0, 0, 0]); - for rule in rules.into_iter() { - if rule.check_exist_countdata() { - let suffix = configs::LEVELMAP - .get( - &rule.yaml["level"] - .as_str() - .unwrap_or("") - .to_owned() - .to_uppercase(), - ) - .unwrap_or(&0); - levelcounts[*suffix as usize] += 1; - } - } - let mut total_unique = 0; - levelcounts.reverse(); - for (i, value) in levelcounts.iter().enumerate() { - println!("{} alerts: {}", levellabel[i], value); - total_unique += value; - } - println!("Unique alerts detected: {}", total_unique); - } - // 複数のイベントレコードに対して、ルールを1個実行します。 fn execute_rule(mut rule: RuleNode, records: Arc>) -> RuleNode { let agg_condition = rule.has_agg_condition(); for record_info in records.as_ref() { - let result = rule.select(&record_info.evtx_filepath, &record_info); + let result = rule.select(&record_info); if !result { continue; } @@ -219,34 +183,63 @@ impl Detection { fn insert_agg_message(rule: &RuleNode, agg_result: AggResult) { let output = Detection::create_count_output(rule, &agg_result); MESSAGES.lock().unwrap().insert_message( - agg_result.filepath, - rule.rulepath.to_string(), + "-".to_owned(), + rule.rulepath.to_owned(), agg_result.start_timedate, - rule.yaml["level"].as_str().unwrap_or("").to_string(), - "-".to_string(), - "-".to_string(), - rule.yaml["title"].as_str().unwrap_or("").to_string(), - output.to_string(), + rule.yaml["level"].as_str().unwrap_or("").to_owned(), + "-".to_owned(), + "-".to_owned(), + rule.yaml["title"].as_str().unwrap_or("").to_owned(), + output.to_owned(), ) } ///aggregation conditionのcount部分の検知出力文の文字列を返す関数 fn create_count_output(rule: &RuleNode, agg_result: &AggResult) -> String { - let mut ret: String = "count(".to_owned(); - let key: Vec<&str> = agg_result.key.split("_").collect(); - if key.len() >= 1 { - ret.push_str(key[0]); + // 条件式部分の出力 + let mut ret: String = "[condition] ".to_owned(); + let agg_condition_raw_str: Vec<&str> = rule.yaml["detection"]["condition"] + .as_str() + .unwrap() + .split("|") + .collect(); + // この関数が呼び出されている段階で既にaggregation conditionは存在する前提なのでunwrap前の確認は行わない + let agg_condition = rule.get_agg_condition().unwrap(); + let exist_timeframe = rule.yaml["detection"]["timeframe"] + .as_str() + .unwrap_or("") + .to_string() + != ""; + // この関数が呼び出されている段階で既にaggregation conditionは存在する前提なのでagg_conditionの配列の長さは2となる + ret.push_str(agg_condition_raw_str[1].trim()); + if exist_timeframe { + ret.push_str(" in timeframe"); } - ret.push_str(&") "); - if key.len() >= 2 { - ret.push_str("by "); - ret.push_str(key[1]); + + ret.push_str(&format!(" [result] count:{}", agg_result.data)); + if agg_condition._field_name.is_some() { + ret.push_str(&format!( + " {}:{}", + agg_condition._field_name.as_ref().unwrap(), + agg_result.field_values.join("/") + )); } - ret.push_str(&format!( - "{} in {}.", - agg_result.condition_op_num, - rule.yaml["timeframe"].as_str().unwrap_or(""), - )); + + if agg_condition._by_field_name.is_some() { + ret.push_str(&format!( + " {}:{}", + agg_condition._by_field_name.as_ref().unwrap(), + agg_result.key + )); + } + + if exist_timeframe { + ret.push_str(&format!( + " timeframe:{}", + rule.yaml["detection"]["timeframe"].as_str().unwrap() + )); + } + return ret; } pub fn print_rule_load_info( @@ -266,10 +259,196 @@ impl Detection { } } -#[test] -fn test_parse_rule_files() { - let level = "informational"; - let opt_rule_path = Some("./test_files/rules/level_yaml"); - let cole = Detection::parse_rule_files(level.to_owned(), opt_rule_path, &filter::exclude_ids()); - assert_eq!(5, cole.len()); +#[cfg(test)] +mod tests { + + use crate::detections::detection::Detection; + use crate::detections::rule::create_rule; + use crate::detections::rule::AggResult; + use crate::filter; + use chrono::{TimeZone, Utc}; + use yaml_rust::YamlLoader; + + #[test] + fn test_parse_rule_files() { + let level = "informational"; + let opt_rule_path = Some("./test_files/rules/level_yaml"); + let cole = + Detection::parse_rule_files(level.to_owned(), opt_rule_path, &filter::exclude_ids()); + assert_eq!(5, cole.len()); + } + + #[test] + fn test_output_aggregation_output_with_output() { + let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); + let agg_result: AggResult = + AggResult::new(2, "_".to_string(), vec![], default_time, ">= 1".to_string()); + let rule_str = r#" + enabled: true + detection: + selection1: + Channel: 'System' + selection2: + EventID: 7040 + selection3: + param1: 'Windows Event Log' + condition: selection1 and selection2 and selection3 | count() >= 1 + output: testdata + "#; + let mut rule_yaml = YamlLoader::load_from_str(rule_str).unwrap().into_iter(); + let test = rule_yaml.next().unwrap(); + let mut rule_node = create_rule("testpath".to_string(), test); + rule_node.init().ok(); + let expected_output = "[condition] count() >= 1 [result] count:2"; + assert_eq!( + Detection::create_count_output(&rule_node, &agg_result), + expected_output + ); + } + + #[test] + fn test_output_aggregation_output_no_filed_by() { + let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); + let agg_result: AggResult = + AggResult::new(2, "_".to_string(), vec![], default_time, ">= 1".to_string()); + let rule_str = r#" + enabled: true + detection: + selection1: + Channel: 'System' + selection2: + EventID: 7040 + selection3: + param1: 'Windows Event Log' + condition: selection1 and selection2 and selection3 | count() >= 1 + "#; + let mut rule_yaml = YamlLoader::load_from_str(rule_str).unwrap().into_iter(); + let test = rule_yaml.next().unwrap(); + let mut rule_node = create_rule("testpath".to_string(), test); + rule_node.init().ok(); + let expected_output = "[condition] count() >= 1 [result] count:2"; + assert_eq!( + Detection::create_count_output(&rule_node, &agg_result), + expected_output + ); + } + + #[test] + fn test_output_aggregation_output_with_timeframe() { + let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); + let agg_result: AggResult = + AggResult::new(2, "_".to_string(), vec![], default_time, ">= 1".to_string()); + let rule_str = r#" + enabled: true + detection: + selection1: + Channel: 'System' + selection2: + EventID: 7040 + selection3: + param1: 'Windows Event Log' + condition: selection1 and selection2 and selection3 | count() >= 1 + timeframe: 15m + "#; + let mut rule_yaml = YamlLoader::load_from_str(rule_str).unwrap().into_iter(); + let test = rule_yaml.next().unwrap(); + let mut rule_node = create_rule("testpath".to_string(), test); + rule_node.init().ok(); + let expected_output = + "[condition] count() >= 1 in timeframe [result] count:2 timeframe:15m"; + assert_eq!( + Detection::create_count_output(&rule_node, &agg_result), + expected_output + ); + } + + #[test] + fn test_output_aggregation_output_with_field() { + let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); + let agg_result: AggResult = AggResult::new( + 2, + "_".to_string(), + vec!["7040".to_owned(), "9999".to_owned()], + default_time, + ">= 1".to_string(), + ); + let rule_str = r#" + enabled: true + detection: + selection1: + Channel: 'System' + selection2: + param1: 'Windows Event Log' + condition: selection1 and selection2 | count(EventID) >= 1 + "#; + let mut rule_yaml = YamlLoader::load_from_str(rule_str).unwrap().into_iter(); + let test = rule_yaml.next().unwrap(); + let mut rule_node = create_rule("testpath".to_string(), test); + rule_node.init().ok(); + let expected_output = "[condition] count(EventID) >= 1 [result] count:2 EventID:7040/9999"; + assert_eq!( + Detection::create_count_output(&rule_node, &agg_result), + expected_output + ); + } + + #[test] + fn test_output_aggregation_output_with_field_by() { + let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); + let agg_result: AggResult = AggResult::new( + 2, + "lsass.exe".to_string(), + vec!["0000".to_owned(), "1111".to_owned()], + default_time, + ">= 1".to_string(), + ); + let rule_str = r#" + enabled: true + detection: + selection1: + Channel: 'System' + selection2: + param1: 'Windows Event Log' + condition: selection1 and selection2 | count(EventID) by process >= 1 + "#; + let mut rule_yaml = YamlLoader::load_from_str(rule_str).unwrap().into_iter(); + let test = rule_yaml.next().unwrap(); + let mut rule_node = create_rule("testpath".to_string(), test); + rule_node.init().ok(); + let expected_output = "[condition] count(EventID) by process >= 1 [result] count:2 EventID:0000/1111 process:lsass.exe"; + assert_eq!( + Detection::create_count_output(&rule_node, &agg_result), + expected_output + ); + } + #[test] + fn test_output_aggregation_output_with_by() { + let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); + let agg_result: AggResult = AggResult::new( + 2, + "lsass.exe".to_string(), + vec![], + default_time, + ">= 1".to_string(), + ); + let rule_str = r#" + enabled: true + detection: + selection1: + Channel: 'System' + selection2: + param1: 'Windows Event Log' + condition: selection1 and selection2 | count() by process >= 1 + "#; + let mut rule_yaml = YamlLoader::load_from_str(rule_str).unwrap().into_iter(); + let test = rule_yaml.next().unwrap(); + let mut rule_node = create_rule("testpath".to_string(), test); + rule_node.init().ok(); + let expected_output = + "[condition] count() by process >= 1 [result] count:2 process:lsass.exe"; + assert_eq!( + Detection::create_count_output(&rule_node, &agg_result), + expected_output + ); + } } diff --git a/src/detections/rule/condition_parser.rs b/src/detections/rule/condition_parser.rs index 3f37ed60..984a9fca 100644 --- a/src/detections/rule/condition_parser.rs +++ b/src/detections/rule/condition_parser.rs @@ -538,10 +538,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!( - rule_node.select(&"testpath".to_owned(), &recinfo), - expect_select - ); + assert_eq!(rule_node.select(&recinfo), expect_select); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -584,7 +581,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -628,7 +625,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_rec) => { assert!(false, "Failed to parse json record."); diff --git a/src/detections/rule/count.rs b/src/detections/rule/count.rs index d4152518..c675589a 100644 --- a/src/detections/rule/count.rs +++ b/src/detections/rule/count.rs @@ -4,22 +4,33 @@ use crate::detections::rule::AggregationParseInfo; use crate::detections::rule::Message; use crate::detections::rule::RuleNode; use chrono::{DateTime, TimeZone, Utc}; +use hashbrown::HashMap; use serde_json::Value; -use std::collections::HashMap; use std::num::ParseIntError; +use std::path::Path; use crate::detections::rule::aggregation_parser::AggregationConditionToken; use crate::detections::utils; /// 検知された際にカウント情報を投入する関数 -pub fn count(rule: &mut RuleNode, filepath: &String, record: &Value) { +pub fn count(rule: &mut RuleNode, record: &Value) { let key = create_count_key(&rule, record); + let field_name: String = match rule.get_agg_condition() { + None => String::default(), + Some(aggcondition) => aggcondition + ._field_name + .as_ref() + .unwrap_or(&String::default()) + .to_owned(), + }; + let field_value = + get_alias_value_in_record(rule, &field_name, record, false).unwrap_or(String::default()); let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); countup( rule, - filepath, - &key, + key, + field_value, Message::get_event_time(record).unwrap_or(default_time), ); } @@ -27,74 +38,84 @@ pub fn count(rule: &mut RuleNode, filepath: &String, record: &Value) { ///count byの条件に合致する検知済みレコードの数を増やすための関数 pub fn countup( rule: &mut RuleNode, - filepath: &String, - key: &str, + key: String, + field_value: String, record_time_value: DateTime, ) { - rule.countdata - .entry(filepath.to_string()) - .or_insert(HashMap::new()); - let value_map = rule.countdata.get_mut(filepath).unwrap(); - value_map.entry(key.to_string()).or_insert(Vec::new()); - let mut prev_value = value_map[key].clone(); - prev_value.push(record_time_value); - value_map.insert(key.to_string(), prev_value); + let value_map = rule.countdata.entry(key).or_insert(Vec::new()); + value_map.push(AggRecordTimeInfo { + field_record_value: field_value, + record_time: record_time_value, + }); } -/// countでgroupbyなどの情報を区分するためのハッシュマップのキーを作成する関数 +/// 与えられたエイリアスから対象レコード内の値を取得してダブルクオーテーションを外す関数。 +/// ダブルクオーテーションを外す理由は結果表示の際に余計なダブルクオーテーションが入るのを防ぐため +/// is_by_aliasはこの関数を呼び出す際はcountのbyの値もしくはfieldの値のどちらかであるためboolとした +fn get_alias_value_in_record( + rule: &RuleNode, + alias: &String, + record: &Value, + is_by_alias: bool, +) -> Option { + if alias == "" { + return None; + } + match utils::get_event_value(alias, record) { + Some(value) => { + return Some(value.to_string().replace("\"", "")); + } + None => { + AlertMessage::alert( + &mut std::io::stderr().lock(), + match is_by_alias { + true => format!( + "count by clause alias value not found in count process. rule file:{} EventID:{}", + Path::new(&rule.rulepath) + .file_name() + .unwrap() + .to_str() + .unwrap(), + utils::get_event_value(&utils::get_event_id_key(), record).unwrap() + ), + false => format!( + "count field clause alias value not found in count process. rule file:{} EventID:{}", + Path::new(&rule.rulepath) + .file_name() + .unwrap() + .to_str() + .unwrap(), + utils::get_event_value(&utils::get_event_id_key(), record).unwrap() + ), + }, + ) + .ok(); + return None; + } + }; +} + +/// countでgroupbyなどの情報を区分するためのハッシュマップのキーを作成する関数。 +/// 以下の場合は空文字を返却 +/// groupbyの指定がない、groubpbyで指定したエイリアスがレコードに存在しない場合は_のみとする。空文字ではキーを指定してデータを取得することができなかった pub fn create_count_key(rule: &RuleNode, record: &Value) -> String { - if rule.detection.aggregation_condition.as_ref().is_none() { + let agg_condition = rule.get_agg_condition().unwrap(); + if agg_condition._by_field_name.is_some() { + let by_field_key = agg_condition._by_field_name.as_ref().unwrap(); + return get_alias_value_in_record(rule, by_field_key, record, true) + .unwrap_or("_".to_string()); + } else { return "_".to_string(); } - let aggcondition = rule.detection.aggregation_condition.as_ref().unwrap(); - // recordでaliasが登録されている前提とする - let mut key = "".to_string(); - if aggcondition._field_name.is_some() { - let field_value = aggcondition._field_name.as_ref().unwrap(); - match utils::get_event_value(field_value, record) { - Some(value) => { - key.push_str(&value.to_string().replace("\"", "")); - } - None => { - AlertMessage::alert( - &mut std::io::stderr().lock(), - format!("field_value alias not found.value:{}", field_value), - ) - .ok(); - } - }; - } - key.push_str("_"); - if aggcondition._by_field_name.is_some() { - let by_field_value = aggcondition._by_field_name.as_ref().unwrap(); - match utils::get_event_value(by_field_value, record) { - Some(value) => { - key.push_str(&value.to_string().replace("\"", "")); - } - None => { - AlertMessage::alert( - &mut std::io::stderr().lock(), - format!("by_field_value alias not found.value:{}", by_field_value), - ) - .ok(); - } - } - } - return key; } ///現状のレコードの状態から条件式に一致しているかを判定する関数 -pub fn aggregation_condition_select(rule: &RuleNode, filepath: &String) -> Vec { +pub fn aggregation_condition_select(rule: &RuleNode) -> Vec { // recordでaliasが登録されている前提とする - let value_map = rule.countdata.get(filepath).unwrap(); + let value_map = &rule.countdata; let mut ret = Vec::new(); for (key, value) in value_map { - ret.append(&mut judge_timeframe( - &rule, - &filepath.to_string(), - value, - &key.to_string(), - )); + ret.append(&mut judge_timeframe(&rule, &value, &key.to_string())); } return ret; } @@ -129,6 +150,13 @@ pub fn get_str_agg_eq(rule: &RuleNode) -> String { return ret; } +#[derive(Clone, Debug)] +/// countの括弧内の情報とレコードの情報を所持する構造体 +pub struct AggRecordTimeInfo { + pub field_record_value: String, + pub record_time: DateTime, +} + #[derive(Debug)] /// timeframeに設定された情報。SIGMAルール上timeframeで複数の単位(日、時、分、秒)が複合で記載されているルールがなかったためタイプと数値のみを格納する構造体 pub struct TimeFrameInfo { @@ -239,78 +267,183 @@ pub fn select_aggcon(cnt: i32, aggcondition: &AggregationParseInfo) -> bool { } } +/// condtionの分岐によって同じ型を返すif-letのジェネリクス +fn _if_condition_fn_caller S, S, U: FnMut() -> S>( + condition: bool, + mut process_true: T, + mut process_false: U, +) -> S { + if condition { + process_true() + } else { + process_false() + } +} + /// count済みデータ内でタイムフレーム内に存在するselectの条件を満たすレコードが、timeframe単位でcountの条件を満たしているAggResultを配列として返却する関数 pub fn judge_timeframe( rule: &RuleNode, - filepath: &String, - time_datas: &Vec>, + time_datas: &Vec, key: &String, ) -> Vec { let mut ret: Vec = Vec::new(); let mut time_data = time_datas.clone(); - time_data.sort(); + // 番兵 + let stop_time = Utc.ymd(9999, 12, 31).and_hms(23, 59, 59); let aggcondition = rule.detection.aggregation_condition.as_ref().unwrap(); + let exist_field = aggcondition._field_name.is_some(); + let mut start_point = 0; - // 最初はcountの条件として記載されている分のレコードを取得するためのindex指定 - let mut check_point = start_point + aggcondition._cmp_num - 1; // timeframeで指定された基準の値を秒数として保持 let judge_sec_frame = get_sec_timeframe(&rule.detection.timeframe); - loop { - // 基準となるレコードもしくはcountを最低限満たす対象のレコードのindexが配列の領域を超えていた場合 - if start_point as usize >= time_data.len() || check_point as usize >= time_data.len() { - // 最終のレコードを対象として時刻を確認する - let check_point_date = time_data[time_data.len() - 1]; - let diff = check_point_date.timestamp() - time_data[start_point as usize].timestamp(); - // 対象のレコード数を基準となるindexから計算 - let mut count_set_cnt = time_data.len() - (start_point as usize); - if judge_sec_frame.is_some() && diff > judge_sec_frame.unwrap() { - //すでにcountを満たしている状態で1つずつdiffを確認している場合は適正な個数指定となり、もともとcountの条件が残りデータ個数より多い場合は-1したことによってcountの判定でもfalseになるため - count_set_cnt -= count_set_cnt - 1; - } + let mut loaded_field_value: HashMap = HashMap::new(); - // timeframe内に入っている場合があるため判定を行う - let judge = select_aggcon(count_set_cnt as i32, &aggcondition); - if judge { - ret.push(AggResult::new( - filepath.to_string(), - count_set_cnt as i32, - key.to_string(), - time_data[start_point as usize], - get_str_agg_eq(rule), - )); - } - break; + let mut stop_time_datas: Vec = (1..=aggcondition._cmp_num) + .map(|_a| AggRecordTimeInfo { + record_time: stop_time, + field_record_value: "".to_string(), + }) + .collect(); + + time_data.append(&mut stop_time_datas); + time_data.sort_by(|a, b| a.record_time.cmp(&b.record_time)); + + // 次のチェックポイントのindexを取得する関数 + let get_next_checkpoint = |cal_point| { + if cal_point + aggcondition._cmp_num - 1 > (time_data.len() - 1) as i32 { + (time_data.len() - 1) as i32 + } else { + cal_point + aggcondition._cmp_num - 1 } - // 基準となるレコードと時刻比較を行う対象のレコード時刻情報を取得する - let check_point_date = time_data[check_point as usize]; - let diff = check_point_date.timestamp() - time_data[start_point as usize].timestamp(); - // timeframeで指定した情報と比較して時刻差がtimeframeの枠を超えていた場合(timeframeの属性を記載していない場合はこの処理を行わない) - if judge_sec_frame.is_some() && diff > judge_sec_frame.unwrap() { - let count_set_cnt = check_point - start_point; - let judge = select_aggcon(count_set_cnt, &aggcondition); - // timeframe内の対象のレコード数がcountの条件を満たさなかった場合、基準となるレコードを1つずらし、countの判定基準分のindexを設定して、次のレコードから始まるtimeframeの判定を行う - if !judge { - start_point += 1; - check_point = start_point + aggcondition._cmp_num - 1; + }; + // 最初はcountの条件として記載されている分のレコードを取得するためのindex指定 + let mut check_point = get_next_checkpoint(start_point); + + *loaded_field_value + .entry(time_data[0].field_record_value.to_string()) + .or_insert(0) += 1; + + while time_data[start_point as usize].record_time != stop_time + && check_point < time_data.len() as i32 + { + // 基準となるレコードと時刻比較を行う対象のレコード時刻情報を取得する + let check_point_date = &time_data[check_point as usize]; + let diff = check_point_date.record_time.timestamp() + - time_data[start_point as usize].record_time.timestamp(); + // timeframeで指定した情報と比較して時刻差がtimeframeの枠を超えていた場合 + if judge_sec_frame.is_some() && diff > judge_sec_frame.unwrap() { + // 検査対象データが1個しかない状態でaggregation conditionの条件が1であるときにデータ個数が0になってしまう問題への対応 + let count_set_cnt = check_point - start_point; + // timeframe内に入っている場合があるため判定を行う + let result_set_cnt: i32 = _if_condition_fn_caller( + exist_field, + || { + time_data[(start_point as usize + 1)..(check_point as usize)] + .iter() + .for_each(|timedata| { + *loaded_field_value + .entry(timedata.field_record_value.to_string()) + .or_insert(0) += 1; + }); + loaded_field_value.len() as i32 + }, + || count_set_cnt as i32, + ); + // timeframe内の対象のレコード数がcountの条件を満たさなかった場合、基準となるレコードを1つずらし、countの判定基準分のindexを設定して、次のレコードから始まるtimeframeの判定を行う + if !select_aggcon(result_set_cnt, &aggcondition) { + _if_condition_fn_caller( + exist_field && time_data[start_point as usize].record_time != stop_time, + || { + let counter = loaded_field_value + .entry( + time_data[start_point as usize] + .field_record_value + .to_string(), + ) + .or_insert(1); + *counter -= 1; + if *counter == 0 as u128 { + loaded_field_value + .remove(&time_data[start_point as usize].field_record_value); + } + }, + || {}, + ); + start_point += 1; + check_point = get_next_checkpoint(start_point); continue; } + let field_values: Vec = loaded_field_value + .keys() + .filter(|key| **key != "") + .map(|key| key.to_string()) + .collect(); //timeframe内の対象のレコード数がcountの条件を満たした場合は返却用の変数に結果を投入する ret.push(AggResult::new( - filepath.to_string(), - count_set_cnt, + result_set_cnt, key.to_string(), - time_data[start_point as usize], + field_values, + time_data[start_point as usize].record_time, get_str_agg_eq(rule), )); // timeframe投入内の対象レコード数がcountの条件を満たした場合は、すでに判定済みのtimeframe内では同様に検知を行うことになり、過検知となってしまうため、今回timeframe内と判定された最後のレコードの次のレコードを次の基準として参照するようにindexを設定する start_point = check_point; - check_point = start_point + aggcondition._cmp_num - 1; + check_point = get_next_checkpoint(start_point); + loaded_field_value = HashMap::new(); + *loaded_field_value + .entry(time_data[0].field_record_value.to_string()) + .or_insert(0) += 1; } else { - // timeframeで指定した情報と比較して。時刻差がtimeframeの枠を超えていない場合は次のレコード時刻情報を参照して、timeframe内であるかを判定するため + // 条件の基準が1の時に最初の要素を2回読み込む事を防止するため + _if_condition_fn_caller( + check_point_date.record_time != stop_time && check_point != 0, + || { + *loaded_field_value + .entry(check_point_date.field_record_value.to_string()) + .or_insert(0) += 1; + () + }, + || {}, + ); + // timeframeで指定した情報と比較して、時刻差がtimeframeの枠を超えていない場合は次のレコード時刻情報を参照して、timeframe内であるかを判定するため check_point += 1; } } + + // timeframeがないルールの場合の判定(フィールドの読み込みはwhile内で実施済み) + + if judge_sec_frame.is_none() { + if exist_field && select_aggcon(loaded_field_value.keys().len() as i32, &aggcondition) { + let field_values: Vec = loaded_field_value + .keys() + .filter(|key| **key != "") + .map(|key| key.to_string()) + .collect(); + //timeframe内の対象のレコード数がcountの条件を満たした場合は返却用の変数に結果を投入する + ret.push(AggResult::new( + loaded_field_value.values().map(|value| *value as i32).sum(), + key.to_string(), + field_values, + time_data[start_point as usize].record_time, + get_str_agg_eq(rule), + )); + } else { + if select_aggcon( + *loaded_field_value.get("").unwrap_or(&0) as i32, + &aggcondition, + ) { + //timeframe内の対象のレコード数がcountの条件を満たした場合は返却用の変数に結果を投入する + ret.push(AggResult::new( + *loaded_field_value.get("").unwrap_or(&0) as i32, + key.to_string(), + vec![], + time_data[start_point as usize].record_time, + get_str_agg_eq(rule), + )); + } + } + } return ret; } @@ -376,14 +509,13 @@ mod tests { condition: selection1 and selection2 and selection3 | count() >= 1 output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' "#; - let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); let mut expected_count = HashMap::new(); expected_count.insert("_".to_owned(), 2); let expected_agg_result: Vec = vec![AggResult::new( - "testpath".to_string(), 2, "_".to_string(), - default_time, + vec![], + Utc.ymd(1977, 1, 1).and_hms(0, 0, 0), ">= 1".to_string(), )]; check_count( @@ -429,23 +561,21 @@ mod tests { timeframe: 15m output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' "#; - let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); - let record_time = Utc.ymd(1996, 2, 27).and_hms(1, 5, 1); let mut expected_count = HashMap::new(); expected_count.insert("_".to_owned(), 2); let mut expected_agg_result: Vec = Vec::new(); expected_agg_result.push(AggResult::new( - "testpath".to_string(), 1, "_".to_string(), - default_time, + vec![], + Utc.ymd(1977, 1, 1).and_hms(0, 0, 0), ">= 1".to_string(), )); expected_agg_result.push(AggResult::new( - "testpath".to_string(), 1, "_".to_string(), - record_time, + vec![], + Utc.ymd(1996, 2, 27).and_hms(1, 5, 1), ">= 1".to_string(), )); check_count( @@ -471,14 +601,13 @@ mod tests { condition: selection1 and selection2 and selection3 | count(Channel) >= 1 output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' "#; - let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); let mut expected_count = HashMap::new(); - expected_count.insert("System_".to_owned(), 1); + expected_count.insert("_".to_owned(), 1); let expected_agg_result = AggResult::new( - "testpath".to_string(), 1, - "System_".to_string(), - default_time, + "_".to_string(), + vec!["System".to_owned()], + Utc.ymd(1977, 1, 1).and_hms(0, 0, 0), ">= 1".to_string(), ); check_count( @@ -520,24 +649,22 @@ mod tests { output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' "#; - let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); - let record_time = Utc.ymd(1996, 2, 27).and_hms(1, 5, 1); let mut expected_count = HashMap::new(); - expected_count.insert("7040_System".to_owned(), 1); - expected_count.insert("9999_Test".to_owned(), 1); + expected_count.insert("System".to_owned(), 1); + expected_count.insert("Test".to_owned(), 1); let mut expected_agg_result: Vec = Vec::new(); expected_agg_result.push(AggResult::new( - "testpath".to_string(), 1, - "7040_System".to_owned(), - default_time, + "System".to_owned(), + vec!["7040".to_owned()], + Utc.ymd(1977, 1, 1).and_hms(0, 0, 0), ">= 1".to_string(), )); expected_agg_result.push(AggResult::new( - "testpath".to_string(), 1, - "9999_Test".to_owned(), - record_time, + "Test".to_owned(), + vec!["9999".to_owned()], + Utc.ymd(1996, 2, 27).and_hms(1, 5, 1), ">= 1".to_string(), )); check_count( @@ -579,24 +706,22 @@ mod tests { timeframe: 1h output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' "#; - let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); - let record_time = Utc.ymd(1977, 1, 1).and_hms(0, 5, 0); let mut expected_count = HashMap::new(); - expected_count.insert("7040_Windows Event Log".to_owned(), 1); - expected_count.insert("9999_Test".to_owned(), 1); + expected_count.insert("Windows Event Log".to_owned(), 1); + expected_count.insert("Test".to_owned(), 1); let mut expected_agg_result: Vec = Vec::new(); expected_agg_result.push(AggResult::new( - "testpath".to_string(), 1, - "7040_Windows Event Log".to_owned(), - default_time, + "Windows Event Log".to_owned(), + vec!["7040".to_owned()], + Utc.ymd(1977, 1, 1).and_hms(0, 0, 0), ">= 1".to_string(), )); expected_agg_result.push(AggResult::new( - "testpath".to_string(), 1, - "9999_Test".to_owned(), - record_time, + "Test".to_owned(), + vec!["9999".to_owned()], + Utc.ymd(1977, 1, 1).and_hms(0, 5, 0), ">= 1".to_string(), )); check_count( @@ -638,14 +763,14 @@ mod tests { let test = rule_yaml.next().unwrap(); let mut rule_node = create_rule("testpath".to_string(), test); let init_result = rule_node.init(); - assert_eq!(init_result.is_ok(), true); + assert!(init_result.is_ok()); let target = vec![SIMPLE_RECORD_STR, record_str]; for record in target { match serde_json::from_str(record) { Ok(rec) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(rec, "testpath".to_owned(), &keys); - let _result = rule_node.select(&"testpath".to_string(), &recinfo); + let _result = rule_node.select(&recinfo); } Err(_rec) => { assert!(false, "failed to parse json record."); @@ -654,13 +779,7 @@ mod tests { } //countupの関数が機能しているかを確認 assert_eq!( - *&rule_node - .countdata - .get("testpath") - .unwrap() - .get(&"7040_".to_owned()) - .unwrap() - .len() as i32, + *&rule_node.countdata.get(&"_".to_owned()).unwrap().len() as i32, 2 ); let judge_result = rule_node.judge_satisfy_aggcondition(); @@ -673,7 +792,7 @@ mod tests { { "Event": { "System": { - "EventID": 7040, + "EventID": 9999, "Channel": "System", "TimeCreated_attributes": { "SystemTime": "1977-01-01T00:05:00Z" @@ -698,15 +817,14 @@ mod tests { output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' "#; - let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); let mut expected_count = HashMap::new(); - expected_count.insert("7040_System".to_owned(), 2); + expected_count.insert("System".to_owned(), 2); let mut expected_agg_result: Vec = Vec::new(); expected_agg_result.push(AggResult::new( - "testpath".to_string(), 2, - "7040_System".to_owned(), - default_time, + "System".to_owned(), + vec!["7040".to_owned(), "9999".to_owned()], + Utc.ymd(1977, 1, 1).and_hms(0, 0, 0), ">= 2".to_string(), )); check_count( @@ -716,6 +834,371 @@ mod tests { expected_agg_result, ); } + + #[test] + /// countで括弧内の記載、byの記載両方がありtimeframe内に存在する場合にruleでcountの検知ができることを確認する(countの括弧内の項目が異なる場合) + fn test_count_exist_field_and_by_with_timeframe_other_field_value() { + let record_str: &str = r#" + { + "Event": { + "System": { + "EventID": 9999, + "Channel": "System", + "TimeCreated_attributes": { + "SystemTime": "1977-01-01T00:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log", + "param2": "auto start" + } + }, + "Event_attributes": { + "xmlns": "http://schemas.microsoft.com/win/2004/08/events/event" + } + }"#; + let rule_str = r#" + enabled: true + detection: + selection1: + param1: 'Windows Event Log' + condition: selection1 | count(EventID) by Channel >= 1 + timeframe: 1h + output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' + "#; + + let default_time = Utc.ymd(1977, 1, 1).and_hms(0, 0, 0); + let mut expected_count = HashMap::new(); + expected_count.insert("System".to_owned(), 2); + let mut expected_agg_result: Vec = Vec::new(); + expected_agg_result.push(AggResult::new( + 2, + "System".to_owned(), + vec!["7040".to_owned(), "9999".to_owned()], + default_time, + ">= 1".to_string(), + )); + check_count( + rule_str, + vec![SIMPLE_RECORD_STR, record_str], + expected_count, + expected_agg_result, + ); + } + + // timeframeの検査 + // timeframe=2hで、パイプ以降はcount(EventID) >= 3とする。 + // + // このとき先頭の3行だと検知しないが、2行目から4行目は検知するはず + // このように先頭行ではなく、途中から数えて検知するパターンをチェックする。 + // 0:30 EventID=1 + // 1:30 EventID=1 + // 2:30 EventID=2 + // 3:30 EventID=3 + #[test] + fn test_count_timeframe() { + let record_str1: &str = r#" + { + "Event": { + "System": { + "EventID": 1, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T00:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str2: &str = r#" + { + "Event": { + "System": { + "EventID": 1, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T01:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str3: &str = r#" + { + "Event": { + "System": { + "EventID": 2, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T02:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str4: &str = r#" + { + "Event": { + "System": { + "EventID": 3, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T03:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let rule_str = r#" + enabled: true + detection: + selection1: + param1: 'Windows Event Log' + condition: selection1 | count(EventID) >= 3 + timeframe: 2h + output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' + "#; + + let default_time = Utc.ymd(1977, 1, 9).and_hms(1, 30, 0); + let mut expected_count = HashMap::new(); + expected_count.insert("_".to_owned(), 4); + let mut expected_agg_result: Vec = Vec::new(); + expected_agg_result.push(AggResult::new( + 3, + "_".to_owned(), + vec!["1".to_owned(), "2".to_owned(), "3".to_owned()], + default_time, + ">= 3".to_string(), + )); + check_count( + rule_str, + vec![record_str1, record_str2, record_str3, record_str4], + expected_count, + expected_agg_result, + ); + } + + // timeframeの検査 + // timeframe=2hで、パイプ以降はcount(EventID) >= 3とする。 + // + // このパターンをチェック + // 0:30 EventID=1 + // 1:30 EventID=1 + // 2:30 EventID=2 + // 3:30 EventID=2 + // 4:30 EventID=3 + // 5:30 EventID=4 + // 19:00 EventID=1 + // 20:00 EventID=1 + // 21:00 EventID=3 + // 22:00 EventID=4 + #[test] + fn test_count_timeframe2() { + let record_str1: &str = r#" + { + "Event": { + "System": { + "EventID": 1, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T00:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str2: &str = r#" + { + "Event": { + "System": { + "EventID": 1, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T01:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str3: &str = r#" + { + "Event": { + "System": { + "EventID": 2, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T02:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str4: &str = r#" + { + "Event": { + "System": { + "EventID": 2, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T03:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str5: &str = r#" + { + "Event": { + "System": { + "EventID": 3, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T04:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str6: &str = r#" + { + "Event": { + "System": { + "EventID": 4, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T05:30:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str7: &str = r#" + { + "Event": { + "System": { + "EventID": 1, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T19:00:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str8: &str = r#" + { + "Event": { + "System": { + "EventID": 1, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T20:00:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str9: &str = r#" + { + "Event": { + "System": { + "EventID": 3, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T21:00:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let record_str10: &str = r#" + { + "Event": { + "System": { + "EventID": 4, + "TimeCreated_attributes": { + "SystemTime": "1977-01-09T22:00:00Z" + } + }, + "EventData": { + "param1": "Windows Event Log" + } + } + }"#; + + let rule_str = r#" + enabled: true + detection: + selection1: + param1: 'Windows Event Log' + condition: selection1 | count(EventID) >= 3 + timeframe: 2h + output: 'Service name : %param1%¥nMessage : Event Log Service Stopped¥nResults: Selective event log manipulation may follow this event.' + "#; + + let mut expected_count = HashMap::new(); + expected_count.insert("_".to_owned(), 10); + let mut expected_agg_result: Vec = Vec::new(); + expected_agg_result.push(AggResult::new( + 3, + "_".to_owned(), + vec!["2".to_owned(), "3".to_owned(), "4".to_owned()], + Utc.ymd(1977, 1, 9).and_hms(3, 30, 0), + ">= 3".to_string(), + )); + + expected_agg_result.push(AggResult::new( + 3, + "_".to_owned(), + vec!["1".to_owned(), "3".to_owned(), "4".to_owned()], + Utc.ymd(1977, 1, 9).and_hms(20, 00, 0), + ">= 3".to_string(), + )); + check_count( + rule_str, + vec![ + record_str1, + record_str2, + record_str3, + record_str4, + record_str5, + record_str6, + record_str7, + record_str8, + record_str9, + record_str10, + ], + expected_count, + expected_agg_result, + ); + } + /// countで対象の数値確認を行うためのテスト用関数 fn check_count( rule_str: &str, @@ -735,7 +1218,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - let result = &rule_node.select(&"testpath".to_owned(), &recinfo); + let result = &rule_node.select(&recinfo); assert_eq!(result, &true); } Err(_rec) => { @@ -744,27 +1227,23 @@ mod tests { } } let agg_results = &rule_node.judge_satisfy_aggcondition(); - let mut expect_filepath = vec![]; + assert_eq!(agg_results.len(), expect_agg_results.len()); + let mut expect_data = vec![]; let mut expect_key = vec![]; + let mut expect_field_values = vec![]; let mut expect_start_timedate = vec![]; let mut expect_condition_op_num = vec![]; for expect_agg in expect_agg_results { let expect_count = expected_counts.get(&expect_agg.key).unwrap_or(&-1); //countupの関数が機能しているかを確認 assert_eq!( - *&rule_node - .countdata - .get("testpath") - .unwrap() - .get(&expect_agg.key) - .unwrap() - .len() as i32, + *&rule_node.countdata.get(&expect_agg.key).unwrap().len() as i32, *expect_count ); - expect_filepath.push(expect_agg.filepath); expect_data.push(expect_agg.data); expect_key.push(expect_agg.key); + expect_field_values.push(expect_agg.field_values); expect_start_timedate.push(expect_agg.start_timedate); expect_condition_op_num.push(expect_agg.condition_op_num); } @@ -773,9 +1252,14 @@ mod tests { let index = expect_start_timedate .binary_search(&agg_result.start_timedate) .unwrap(); - assert_eq!(agg_result.filepath, expect_filepath[index]); assert_eq!(agg_result.data, expect_data[index]); assert_eq!(agg_result.key, expect_key[index]); + assert!(agg_result.field_values.len() == expect_field_values[index].len()); + for expect_field_value in &expect_field_values[index] { + // テストによってはtimeframeの値と各fieldの値で配列の順番が想定したものと変化してしまう可能性があるため配列の長さを確認したうえで期待した各要素が存在するかを確認する。 + // field`要素の順番については以降の処理で関連しない + assert!(agg_result.field_values.contains(&expect_field_value)); + } assert_eq!(agg_result.condition_op_num, expect_condition_op_num[index]); } } diff --git a/src/detections/rule/matchers.rs b/src/detections/rule/matchers.rs index f5800c14..aacf3672 100644 --- a/src/detections/rule/matchers.rs +++ b/src/detections/rule/matchers.rs @@ -707,7 +707,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "failed to parse json record."); @@ -737,7 +737,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -767,7 +767,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "failed to parse json record."); @@ -798,7 +798,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -829,7 +829,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -859,7 +859,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -889,7 +889,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -920,7 +920,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -951,7 +951,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -982,7 +982,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1013,7 +1013,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1044,7 +1044,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1074,7 +1074,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1108,7 +1108,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1142,7 +1142,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1175,7 +1175,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1217,7 +1217,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -1259,7 +1259,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -1301,7 +1301,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -1343,7 +1343,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -1385,7 +1385,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -1427,7 +1427,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -1457,7 +1457,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1487,7 +1487,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1517,7 +1517,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1605,7 +1605,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1635,7 +1635,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1667,7 +1667,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -1699,7 +1699,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); diff --git a/src/detections/rule/mod.rs b/src/detections/rule/mod.rs index 0b50e470..ebea89fd 100644 --- a/src/detections/rule/mod.rs +++ b/src/detections/rule/mod.rs @@ -15,7 +15,7 @@ use self::aggregation_parser::AggregationParseInfo; mod condition_parser; mod count; -use self::count::TimeFrameInfo; +use self::count::{AggRecordTimeInfo, TimeFrameInfo}; use super::detection::EvtxRecordInfo; @@ -28,7 +28,7 @@ pub struct RuleNode { pub rulepath: String, pub yaml: Yaml, detection: DetectionNode, - countdata: HashMap>>>, + countdata: HashMap>, } impl Debug for RuleNode { @@ -66,10 +66,10 @@ impl RuleNode { } } - pub fn select(&mut self, filepath: &String, event_record: &EvtxRecordInfo) -> bool { + pub fn select(&mut self, event_record: &EvtxRecordInfo) -> bool { let result = self.detection.select(event_record); - if result { - count::count(self, filepath, &event_record.record); + if result && self.has_agg_condition() { + count::count(self, &event_record.record); } return result; } @@ -83,14 +83,23 @@ impl RuleNode { if !self.has_agg_condition() { return ret; } - for filepath in self.countdata.keys() { - ret.append(&mut count::aggregation_condition_select(&self, &filepath)); - } + ret.append(&mut count::aggregation_condition_select(&self)); return ret; } pub fn check_exist_countdata(&self) -> bool { self.countdata.len() > 0 } + /// ルール内のAggregationParseInfo(Aggregation Condition)を取得する関数 + pub fn get_agg_condition(&self) -> Option<&AggregationParseInfo> { + match self.detection.aggregation_condition.as_ref() { + None => { + return None; + } + Some(agg_parse_info) => { + return Some(agg_parse_info); + } + } + } } // RuleNodeのdetectionに定義されているキーの一覧を取得する。 @@ -293,12 +302,12 @@ impl DetectionNode { #[derive(Debug)] /// countなどのaggregationの結果を出力する構造体 pub struct AggResult { - /// evtx file path - pub filepath: String, /// countなどの値 pub data: i32, - /// (countの括弧内の記載)_(count byで指定された条件)で設定されたキー + /// count byで指定された条件のレコード内での値 pub key: String, + /// countの括弧内指定された項目の検知されたレコード内での値の配列。括弧内で指定がなかった場合は長さ0の配列となる + pub field_values: Vec, ///検知したブロックの最初のレコードの時間 pub start_timedate: DateTime, ///条件式の情報 @@ -307,16 +316,16 @@ pub struct AggResult { impl AggResult { pub fn new( - filepath: String, data: i32, key: String, + field_values: Vec, start_timedate: DateTime, condition_op_num: String, ) -> AggResult { return AggResult { - filepath: filepath, data: data, key: key, + field_values: field_values, start_timedate: start_timedate, condition_op_num: condition_op_num, }; @@ -361,7 +370,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -391,7 +400,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -421,7 +430,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -504,7 +513,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -563,7 +572,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -629,7 +638,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -673,7 +682,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -718,7 +727,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -782,7 +791,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -846,7 +855,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -892,7 +901,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_rec) => { assert!(false, "Failed to parse json record."); @@ -950,17 +959,11 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - let result = rule_node.select(&"testpath".to_string(), &recinfo); + let result = rule_node.select(&recinfo); assert_eq!(rule_node.detection.aggregation_condition.is_some(), true); assert_eq!(result, true); assert_eq!( - *&rule_node - .countdata - .get("testpath") - .unwrap() - .get(key) - .unwrap() - .len() as i32, + *&rule_node.countdata.get(key).unwrap().len() as i32, expect_count ); } diff --git a/src/detections/rule/selectionnodes.rs b/src/detections/rule/selectionnodes.rs index c33a4db0..4d88bedd 100644 --- a/src/detections/rule/selectionnodes.rs +++ b/src/detections/rule/selectionnodes.rs @@ -432,7 +432,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -465,7 +465,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -497,7 +497,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -529,7 +529,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), true); + assert_eq!(rule_node.select(&recinfo), true); } Err(_) => { assert!(false, "Failed to parse json record."); @@ -561,7 +561,7 @@ mod tests { Ok(record) => { let keys = detections::rule::get_detection_keys(&rule_node); let recinfo = utils::create_rec_info(record, "testpath".to_owned(), &keys); - assert_eq!(rule_node.select(&"testpath".to_owned(), &recinfo), false); + assert_eq!(rule_node.select(&recinfo), false); } Err(_) => { assert!(false, "Failed to parse json record."); diff --git a/src/main.rs b/src/main.rs index 425d0a9d..50abee7d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -171,7 +171,6 @@ impl App { pb.inc(); } after_fact(); - detection.print_unique_results(); } // Windowsイベントログファイルを1ファイル分解析する。 @@ -237,7 +236,7 @@ impl App { detection = detection.start(&self.rt, records_per_detect); } - detection.add_aggcondtion_msg(); + detection.add_aggcondition_msg(); tl.tm_stats_dsp_msg(); return detection; diff --git a/src/timeline/timeline.rs b/src/timeline/timeline.rs index 75603540..890212af 100644 --- a/src/timeline/timeline.rs +++ b/src/timeline/timeline.rs @@ -11,7 +11,7 @@ pub struct Timeline { impl Timeline { pub fn new() -> Timeline { let totalcnt = 0; - let filepath = "".to_owned(); + let filepath = String::default(); let starttm = "".to_string(); let endtm = "".to_string(); let statslst = HashMap::new(); diff --git a/src/yaml.rs b/src/yaml.rs index 4326d9f8..4411afb2 100644 --- a/src/yaml.rs +++ b/src/yaml.rs @@ -191,7 +191,7 @@ mod tests { }; let _ = &yaml.read_dir( "test_files/rules/yaml/".to_string(), - &"".to_owned(), + &String::default(), &exclude_ids, ); assert_ne!(yaml.files.len(), 0);