115 lines
5.1 KiB
TOML
115 lines
5.1 KiB
TOML
[metadata]
|
|
creation_date = "2025/07/31"
|
|
integration = ["splunk"]
|
|
maturity = "production"
|
|
promotion = true
|
|
updated_date = "2025/10/17"
|
|
|
|
[rule]
|
|
author = ["Elastic"]
|
|
description = """
|
|
Generates a detection alert for each Splunk alert written to the configured indices. Enabling this rule allows you to
|
|
immediately begin investigating Splunk alerts in the app.
|
|
"""
|
|
from = "now-2m"
|
|
index = ["logs-splunk.alert-*"]
|
|
interval = "1m"
|
|
language = "kuery"
|
|
license = "Elastic License v2"
|
|
max_signals = 1000
|
|
name = "Splunk External Alerts"
|
|
note = """## Triage and analysis
|
|
|
|
### Investigating Splunk External Alerts
|
|
|
|
Splunk monitors and analyzes data, often used in security environments to track and respond to potential threats. The rule identifies such manipulations by flagging alerts enabling timely investigation and response.
|
|
|
|
### Possible investigation steps
|
|
|
|
- Examine the specific indices where the alert was written to identify any unusual or unauthorized activity.
|
|
- Cross-reference the alert with recent changes or activities in the Splunk environment to determine if the alert could be a result of legitimate administrative actions.
|
|
- Investigate the source and context of the alert to identify any patterns or anomalies that could indicate manipulation or false positives.
|
|
- Check for any related alerts or logs that might provide additional context or evidence of adversarial behavior.
|
|
- Consult the Splunk investigation guide and resources tagged in the alert for specific guidance on handling similar threats.
|
|
|
|
### False positive analysis
|
|
|
|
- Alerts triggered by routine Splunk maintenance activities can be false positives. To manage these, identify and document regular maintenance schedules and create exceptions for alerts generated during these times.
|
|
- Frequent alerts from specific indices that are known to contain non-threatening data can be excluded by adjusting the rule to ignore these indices, ensuring only relevant alerts are investigated.
|
|
- Alerts generated by automated scripts or tools that interact with Splunk for legitimate purposes can be false positives. Review and whitelist these scripts or tools to prevent unnecessary alerts.
|
|
- If certain user actions consistently trigger alerts but are verified as non-malicious, consider creating user-specific exceptions to reduce noise and focus on genuine threats.
|
|
- Regularly review and update the list of exceptions to ensure they remain relevant and do not inadvertently exclude new or evolving threats.
|
|
|
|
### Response and remediation
|
|
|
|
- Immediately isolate affected systems to prevent further manipulation of Splunk alerts and potential spread of malicious activity.
|
|
- Review and validate the integrity of the Splunk alert indices to ensure no unauthorized changes have been made.
|
|
- Restore any compromised Splunk alert configurations from a known good backup to ensure accurate monitoring and alerting.
|
|
- Conduct a thorough audit of user access and permissions within Splunk to identify and revoke any unauthorized access.
|
|
- Escalate the incident to the security operations center (SOC) for further analysis and to determine if additional systems or data have been affected.
|
|
- Implement enhanced monitoring on Splunk indices to detect any future unauthorized changes or suspicious activities.
|
|
- Document the incident details and response actions taken for future reference and to improve incident response procedures.
|
|
"""
|
|
references = ["https://docs.elastic.co/en/integrations/splunk"]
|
|
risk_score = 47
|
|
rule_id = "d3b6222f-537e-4b84-956a-3ebae2dcf811"
|
|
rule_name_override = "splunk.alert.source"
|
|
setup = """## Setup
|
|
|
|
### Splunk Alert Integration
|
|
This rule is designed to capture alert events generated by the Splunk integration and promote them as Elastic detection alerts.
|
|
|
|
To capture Splunk alerts, install and configure the Splunk integration to ingest alert events into the `logs-splunk.alert-*` index pattern.
|
|
|
|
If this rule is enabled alongside the External Alerts promotion rule (UUID: eb079c62-4481-4d6e-9643-3ca499df7aaa), you may receive duplicate alerts for the same Splunk events. Consider adding a rule exception for the External Alert rule to exclude data_stream.dataset:splunk.alert to avoid receiving duplicate alerts.
|
|
|
|
### Additional notes
|
|
|
|
For information on troubleshooting the maximum alerts warning please refer to this [guide](https://www.elastic.co/guide/en/security/current/alerts-ui-monitor.html#troubleshoot-max-alerts).
|
|
"""
|
|
severity = "medium"
|
|
tags = [
|
|
"Data Source: Splunk",
|
|
"Use Case: Threat Detection",
|
|
"Resources: Investigation Guide",
|
|
"Promotion: External Alerts",
|
|
]
|
|
timestamp_override = "event.ingested"
|
|
type = "query"
|
|
|
|
query = '''
|
|
event.kind: alert and data_stream.dataset: splunk.alert
|
|
'''
|
|
|
|
|
|
[[rule.risk_score_mapping]]
|
|
field = "event.risk_score"
|
|
operator = "equals"
|
|
value = ""
|
|
|
|
[[rule.severity_mapping]]
|
|
field = "event.severity"
|
|
operator = "equals"
|
|
severity = "low"
|
|
value = "21"
|
|
|
|
[[rule.severity_mapping]]
|
|
field = "event.severity"
|
|
operator = "equals"
|
|
severity = "medium"
|
|
value = "47"
|
|
|
|
[[rule.severity_mapping]]
|
|
field = "event.severity"
|
|
operator = "equals"
|
|
severity = "high"
|
|
value = "73"
|
|
|
|
[[rule.severity_mapping]]
|
|
field = "event.severity"
|
|
operator = "equals"
|
|
severity = "critical"
|
|
value = "99"
|
|
|
|
|