diff --git a/rules/linux/credential_access_auditd_sensitive_cloud_and_host_identity_file_open.toml b/rules/linux/credential_access_auditd_sensitive_cloud_and_host_identity_file_open.toml new file mode 100644 index 000000000..48cfa2c6b --- /dev/null +++ b/rules/linux/credential_access_auditd_sensitive_cloud_and_host_identity_file_open.toml @@ -0,0 +1,224 @@ +[metadata] +creation_date = "2026/04/24" +integration = ["auditd_manager"] +maturity = "production" +updated_date = "2026/04/24" + +[rule] +author = ["Elastic"] +description = """ +Detects Auditd opened-file reads on sensitive root and cluster paths (Kubernetes token mounts, kubelet and admin +kubeconfig, PKI material, shadow, root SSH keys, root cloud CLI and Docker config) when the process looks like common +copy or scripting utilities or the binary runs from temp or run staging. User home paths are excluded so file watches +stay explicit and aligned with auditd. +""" +false_positives = [ + """ + Backup, configuration management, and image scanners may open the same paths from scripted utilities; baseline + trusted agents and narrow exclusions by process executable hash or parent chain. + """, + """ + Administrators reading kubeconfig or cloud profiles during migration can match; correlate with change tickets and + bastion sessions. + """, + """ + Credential reads under non-root home trees are intentionally excluded; clone the rule with explicit per-user + file.path values and optional process.executable prefixes if you must cover interactive accounts with matching + audit -w lines for those paths. + """, +] +from = "now-9m" +index = ["logs-auditd_manager.auditd-*"] +language = "kuery" +license = "Elastic License v2" +name = "Sensitive Identity File Open by Suspicious Process via Auditd" +note = """## Triage and analysis + +### Investigating Sensitive Identity File Open by Suspicious Process via Auditd + +Review which file.path matched, the process name and executable, parent command line, and the Linux user or audit +identity. Pivot on the same host for adjacent opens, network egress, or privilege changes. Compare against known +maintenance windows and automation identities. + +### Possible investigation steps + +- Confirm whether the workload is a Kubernetes node, jump host, or developer machine and whether the actor should read + the matched path at all. +- For Kubernetes token paths, map the process to a container or host PID namespace and inspect pod security context and + projected volumes. +- For cloud credential JSON or shared credentials files, check cloud audit logs for API or token activity shortly after + the open timestamp. +- Capture file hash and process binary hash where possible for incident evidence. + +### False positive analysis + +- Legitimate kubelet or control plane components may touch admin.conf or PKI material on control plane nodes; scope the + rule to worker roles if noisy. +- CI users running tests from /tmp with cat against a copied kubeconfig can match; tune process or user allowlists. + +### Response and remediation + +- If malicious, isolate the host, rotate exposed keys and tokens, invalidate cloud sessions, and review RBAC and file + permissions on shared credential stores. +""" +references = [ + "https://attack.mitre.org/techniques/T1552/001/", + "https://attack.mitre.org/techniques/T1552/007/", +] +risk_score = 73 +rule_id = "e4c5d6e7-f8a9-4012-b3c4-d5e6f7a80912" +setup = """## Setup + +This rule expects the Elastic Agent Auditd Manager integration on Linux, with audit rules that emit file open events +for the paths you care about. Use Fleet to install and configure Auditd Manager, then paste custom rules into the +integration so opens are audited before they reach Elasticsearch. + +### Step 1: Add Auditd Manager in Fleet + +1. In Kibana, open Management, then Integrations. +2. Search for Auditd Manager and open the integration card. +3. Click Add Auditd Manager, assign a name, and add the integration to the Elastic Agent policy that runs on your + Linux hosts (nodes, jump boxes, or developer workstations as applicable). +4. Save and deploy the policy so agents enroll or update. + +### Step 2: Paste audit rules into Auditd Manager + +1. Edit the same Auditd Manager integration policy. +2. Open the Audit rules (or Auditd rule files) section used for free-form audit.rules content. +3. Paste the block below into the audit rules text box, then save the integration policy again so agents reload rules. + +The permission mask uses r (read) together with w (write) and a (attribute change) so auditd emits events on read +opens such as cat or head, which align with opened-file in the detection query. Write and attribute bits still catch +modifications. If your site policy prefers read-only watches, you may narrow to -p r at the cost of missing write-side +telemetry on the same paths. + +``` +## Kubernetes and node identity material +-w /var/run/secrets/kubernetes.io/serviceaccount/token -p rwa -k elastic_sensitive_identity +-w /var/run/secrets/eks.amazonaws.com/serviceaccount/token -p rwa -k elastic_sensitive_identity +-w /var/run/secrets/azure/tokens/azure-identity-token -p rwa -k elastic_sensitive_identity +-w /var/run/secrets/tokens/azure-identity-token -p rwa -k elastic_sensitive_identity +-w /var/lib/kubelet/kubeconfig -p rwa -k elastic_sensitive_identity +-w /etc/kubernetes/admin.conf -p rwa -k elastic_sensitive_identity +-w /etc/kubernetes/pki/ca.key -p rwa -k elastic_sensitive_identity +-w /etc/kubernetes/pki/apiserver-kubelet-client.key -p rwa -k elastic_sensitive_identity +-w /var/lib/kubelet/pki/kubelet-client-current.pem -p rwa -k elastic_sensitive_identity +-w /etc/rancher/k3s/k3s.yaml -p rwa -k elastic_sensitive_identity + +## Host credential stores (root only) +-w /etc/shadow -p rwa -k elastic_sensitive_identity +-w /root/.ssh/id_rsa -p rwa -k elastic_sensitive_identity +-w /root/.ssh/id_ed25519 -p rwa -k elastic_sensitive_identity +-w /root/.ssh/id_ecdsa -p rwa -k elastic_sensitive_identity +-w /root/.aws/credentials -p rwa -k elastic_sensitive_identity +-w /root/.aws/config -p rwa -k elastic_sensitive_identity +-w /root/.azure/accessTokens.json -p rwa -k elastic_sensitive_identity +-w /root/.azure/azureProfile.json -p rwa -k elastic_sensitive_identity +-w /root/.azure/msal_token_cache.json -p rwa -k elastic_sensitive_identity +-w /root/.config/gcloud/application_default_credentials.json -p rwa -k elastic_sensitive_identity +-w /root/.config/gcloud/credentials.db -p rwa -k elastic_sensitive_identity +-w /root/.config/gcloud/access_tokens.db -p rwa -k elastic_sensitive_identity +-w /root/.kube/config -p rwa -k elastic_sensitive_identity +-w /root/.docker/config.json -p rwa -k elastic_sensitive_identity +``` + +### Step 3: Reload and verify + +1. Confirm auditd is active on the host and that auditctl -l (or equivalent) lists the new rules without syntax errors. +2. Generate a harmless test open in a lab (for example cat of a non-production token file you control) and confirm + documents land in logs-auditd_manager.auditd-* with event.category file and event.action opened-file (or the closest + normalized action your stack maps for open syscalls). +3. If event.action differs in your environment, adjust the rule query to include the mapped value while keeping the + same path and process logic. + +Further background: https://docs.elastic.co/integrations/auditd_manager +""" +severity = "high" +tags = [ + "Domain: Endpoint", + "Domain: Identity", + "OS: Linux", + "Use Case: Threat Detection", + "Tactic: Credential Access", + "Data Source: Auditd Manager", + "Resources: Investigation Guide", +] +timestamp_override = "event.ingested" +type = "query" +query = ''' +host.os.type:"linux" and +data_stream.dataset:"auditd_manager.auditd" and +event.category:"file" and +event.action:"opened-file" and +( + process.name:( + cp or mv or ln or cat or head or tail or + base64 or xxd or od or + curl or wget or + tar or zip or gzip or scp or rsync or + python* or perl* or ruby* or node or bun or php* or lua* or + tee or dd or + nc or ncat or netcat or socat or + openssl or ssh or sftp or + busybox or jq or yq or + strings or xargs or sed or awk or grep or find or + .* + ) or + process.executable:(/tmp/* or /var/tmp/* or /dev/shm/* or /run/*) or + (process.name:(sh or bash or zsh or dash or fish or ksh) and process.args:("-c" or "-i")) +) and +file.path:( + "/var/run/secrets/kubernetes.io/serviceaccount/token" or + "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" or + "/var/run/secrets/eks.amazonaws.com/serviceaccount/token" or + "/var/run/secrets/azure/tokens/azure-identity-token" or + "/var/run/secrets/tokens/azure-identity-token" or + "/var/lib/kubelet/kubeconfig" or + "/etc/kubernetes/admin.conf" or + "/etc/kubernetes/pki/ca.key" or + "/etc/kubernetes/pki/apiserver-kubelet-client.key" or + "/var/lib/kubelet/pki/kubelet-client-current.pem" or + "/etc/rancher/k3s/k3s.yaml" or + "/etc/shadow" or + "/root/.ssh/id_rsa" or + "/root/.ssh/id_ed25519" or + "/root/.ssh/id_ecdsa" or + "/root/.aws/credentials" or + "/root/.aws/config" or + "/root/.aws/cli/cache" or + "/root/.aws/sso/cache" or + "/root/.azure/accessTokens.json" or + "/root/.azure/azureProfile.json" or + "/root/.azure/msal_token_cache.json" or + "/root/.azure/msal_http_cache.bin" or + "/root/.config/gcloud/application_default_credentials.json" or + "/root/.config/gcloud/credentials.db" or + "/root/.config/gcloud/access_tokens.db" or + "/root/.config/gcloud/legacy_credentials" or + "/root/.kube/config" or + "/root/.docker/config.json" +) +''' + +[[rule.threat]] +framework = "MITRE ATT&CK" + +[[rule.threat.technique]] +id = "T1552" +name = "Unsecured Credentials" +reference = "https://attack.mitre.org/techniques/T1552/" + +[[rule.threat.technique.subtechnique]] +id = "T1552.001" +name = "Credentials In Files" +reference = "https://attack.mitre.org/techniques/T1552/001/" + +[[rule.threat.technique.subtechnique]] +id = "T1552.007" +name = "Container API" +reference = "https://attack.mitre.org/techniques/T1552/007/" + +[rule.threat.tactic] +id = "TA0006" +name = "Credential Access" +reference = "https://attack.mitre.org/tactics/TA0006/"