[Bug] Fix UTF-8 Encoding for Rule File Operations (#5684)
* Updated kubernetes.audit.requestObject.spec.containers.image type of text to Keyword * [Bug] Fix UTF-8 Encoding for Rule File Operations
This commit is contained in:
@@ -105,7 +105,7 @@ class TOMLActionConnector:
|
||||
if path.suffix != ".toml":
|
||||
# If it doesn't, add one
|
||||
path = path.with_suffix(".toml")
|
||||
with path.open("w") as f:
|
||||
with path.open("w", encoding="utf-8") as f:
|
||||
contents_dict = self.contents.to_dict()
|
||||
# Sort the dictionary so that 'metadata' is at the top
|
||||
sorted_dict = dict(sorted(contents_dict.items(), key=lambda item: item[0] != "metadata"))
|
||||
|
||||
@@ -221,7 +221,7 @@ class TOMLException:
|
||||
if path.suffix != ".toml":
|
||||
# If it doesn't, add one
|
||||
path = path.with_suffix(".toml")
|
||||
with path.open("w") as f:
|
||||
with path.open("w", encoding="utf-8") as f:
|
||||
contents_dict = self.contents.to_dict()
|
||||
# Sort the dictionary so that 'metadata' is at the top
|
||||
sorted_dict = dict(sorted(contents_dict.items(), key=lambda item: item[0] != "metadata"))
|
||||
|
||||
@@ -308,7 +308,7 @@ def toml_write(rule_contents: dict[str, Any], out_file_path: Path | None = None)
|
||||
|
||||
f = None
|
||||
if out_file_path:
|
||||
f = out_file_path.open("w")
|
||||
f = out_file_path.open("w", encoding="utf-8")
|
||||
|
||||
try:
|
||||
for data in ("metadata", "transform", "rule"):
|
||||
|
||||
@@ -242,9 +242,9 @@ class RawRuleCollection(BaseCollection[DictRule]):
|
||||
# use pytoml instead of toml because of annoying bugs
|
||||
# https://github.com/uiri/toml/issues/152
|
||||
# might also be worth looking at https://github.com/sdispater/tomlkit
|
||||
raw_dict = pytoml.loads(path.read_text()) # type: ignore[reportUnknownMemberType]
|
||||
raw_dict = pytoml.loads(path.read_text(encoding="utf-8")) # type: ignore[reportUnknownMemberType]
|
||||
elif path.suffix == ".json":
|
||||
raw_dict = json.loads(path.read_text())
|
||||
raw_dict = json.loads(path.read_text(encoding="utf-8"))
|
||||
elif path.suffix == ".ndjson":
|
||||
raise ValueError("ndjson is not supported in RawRuleCollection. Break out the rules individually.")
|
||||
else:
|
||||
|
||||
@@ -338,7 +338,7 @@ def rulename_to_filename(name: str, tactic_name: str | None = None, ext: str = "
|
||||
def load_rule_contents(rule_file: Path, single_only: bool = False) -> list[Any]:
|
||||
"""Load a rule file from multiple formats."""
|
||||
extension = rule_file.suffix
|
||||
raw_text = rule_file.read_text()
|
||||
raw_text = rule_file.read_text(encoding="utf-8")
|
||||
|
||||
if extension in (".ndjson", ".jsonl"):
|
||||
# kibana exported rule object is ndjson with the export metadata on the last line
|
||||
|
||||
+1
-1
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "detection_rules"
|
||||
version = "1.5.39"
|
||||
version = "1.5.40"
|
||||
description = "Detection Rules is the home for rules used by Elastic Security. This repository is used for the development, maintenance, testing, validation, and release of rules for Elastic Security’s Detection Engine."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.12"
|
||||
|
||||
Reference in New Issue
Block a user