clean IP subnet

This commit is contained in:
vunx2
2020-03-18 16:49:44 +07:00
parent 1df5620a14
commit e228d42b97
3 changed files with 204 additions and 195 deletions
+26 -2
View File
@@ -2,6 +2,7 @@ title: CarbonBlack field mapping
order: 20
backends:
- carbonblack
- cb
fieldmappings:
AccountName: username
CommandLine: cmdline
@@ -15,14 +16,34 @@ fieldmappings:
Image: process_name
ImageLoaded: modload
ImagePath: path
NewProcessName: process_name
#NewProcessName: process_name
#ParentCommandLine: NONE??
ParentProcessName: parent_name
ParentImage: parent_name
Path: path
ProcessCommandLine: cmdline
ProcessName: process_name
Signature: digsig_result
#Signature: digsig_result
SourceIp: ipaddr
DestinationAddress: ipaddr
DestinationPort: ipport
DestPort: ipport
TargetObject: regmod
TargetFilename: filemod
TargetFileName: filemod
Targetfilename: filemod
SourceImage: parent_name
TargetImage: childproc_name
NewProcessName: childproc_name
Product: product_name
Signature: digsig_publisher
CallTrace: modload
DestinationHostname: domain
User: username
StartModule: modload
Company: company_name
Description: file_desc
FileVersion: file_version
@@ -72,3 +93,6 @@ fieldmappings:
excludedfields:
- EventID
- Robot2
- TargetObject
- CallTrace
- Imphash
+177 -191
View File
@@ -1,201 +1,166 @@
# Output backends for sigmac
# Copyright 2016-2018 Thomas Patzke, Florian Roth, Roey
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
# from netaddr import *
import sigma
from .base import SingleTextQueryBackend
from .mixins import MultiRuleOutputMixin
from sigma.parser.modifiers.base import SigmaTypeModifier
import requests
# import argparse
import urllib3
import json
import os
from .. eventdict import event
urllib3.disable_warnings()
import os, ssl
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and
getattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
ssl._create_default_https_context = ssl._create_unverified_context
# parser = argparse.ArgumentParser()
# parser.add_argument("--eshost", help="Elasticsearch host", type=str, required=True)
# parser.add_argument("--esport", help="Elasticsearch port", type=str, required=True)
# parser.add_argument("--ruledir", help="sigma rule directory path to convert", type=str, required=True)
# parser.add_argument("--index", help="Elasticsearch index name egs: \"winlogbeat-*\"", type=str, required=True)
# parser.add_argument("--email", help="email address to send mail alert", type=str, required=True)
# parser.add_argument("--outdir", help="output directory to create elastalert rules", type=str, required=True)
# parser.add_argument("--sigmac", help="Sigmac location", default="../tools/sigmac", type=str)
# parser.add_argument("--realerttime", help="Realert time (optional value, default 5 minutes)", type=str, default=5)
# parser.add_argument("--debug", help="Show debug output", type=bool, default=False)
# args = parser.parse_args()
class CarbonBlackBackend(SingleTextQueryBackend):
"""Converts Sigma rule into Carbon Black Query Language (SPL)."""
from fnmatch import fnmatch
from sigma.backends.base import SingleTextQueryBackend
from sigma.backends.exceptions import NotSupportedError
from sigma.parser.modifiers.type import SigmaRegularExpressionModifier
from sigma.parser.condition import ConditionOR, ConditionAND, NodeSubexpression
from sigma.parser.modifiers.base import SigmaTypeModifier
class CarbonBlackWildcardHandlingMixin:
"""
Determine field mapping to keyword subfields depending on existence of wildcards in search values. Further,
provide configurability with backend parameters.
"""
# options = SingleTextQueryBackend.options + (
# ("keyword_field", None, "Keyword sub-field name", None),
# ("keyword_blacklist", None, "Fields that don't have a keyword subfield (wildcards * and ? allowed)", None)
# )
reContainsWildcard = re.compile("(?:(?<!\\\\)|\\\\\\\\)[*?]").search
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.matchKeyword = True
try:
self.blacklist = self.keyword_blacklist.split(",")
except AttributeError:
self.blacklist = list()
def containsWildcard(self, value):
"""Determine if value contains wildcard."""
if type(value) == str:
res = self.reContainsWildcard(value)
return res
else:
return False
class CarbonBlackQueryBackend(CarbonBlackWildcardHandlingMixin, SingleTextQueryBackend):
"""Converts Sigma rule into CarbonBlack query string. Only searches, no aggregations."""
identifier = "carbonblack"
active = True
index_field = "index"
# \ -> \\
# \* -> \*
# \\* -> \\*
reEscape = re.compile('("|(?<!\\\\)\\\\(?![*?\\\\]))')
reClear = None
andToken = " and "
#reEscape = re.compile("([\s+\\-=!(){}\\[\\]^\"~:/]|(?<!\\\\)\\\\(?![*?\\\\])|\\\\u|&&|\\|\\|)")
reEscape = re.compile("([\s\s+()\"])")
reClear = re.compile("[<>]")
andToken = " AND "
orToken = " OR "
notToken = "-"
notToken = " -"
subExpression = "(%s)"
listExpression = "%s"
listSeparator = " "
valueExpression = "%s"
nullExpression = "- %s=\"*\""
notNullExpression = "%s=\"*\""
listSeparator = " OR "
valueExpression = '%s'
typedValueExpression = {
SigmaRegularExpressionModifier: "/%s/"
}
nullExpression = "NOT _exists_:%s"
notNullExpression = "_exists_:%s"
mapExpression = "%s:%s"
mapListsSpecialHandling = True
mapListValueExpression = "%s IN %s"
mapListsSpecialHandling = False
def generateMapItemListNode(self, key, value):
if(key == "EventID"):
return ("( OR ".join(['%s:%s )' % (self.generateEventKey(item), self.generateEventValue(item)) for item in value if self.generateEventKey(item)!= '']))
def __init__(self, *args, **kwargs):
"""Initialize field mappings."""
super().__init__(*args, **kwargs)
self.category = None
self.excluded_fields = None
elif not set([type(val) for val in value]).issubset({str, int}):
raise TypeError("List values must be strings or numbers")
return "(" + (" OR ".join(['%s:%s' % (key, self.generateValueNode(item)) for item in value])) + ")"
def cleanValue(self, val):
if("[1 to *]" in val):
self.reEscape = re.compile("([()])")
else:
self.reEscape = re.compile("([\s\s+()])")
val = val.strip()
val = super().cleanValue(val)
if isinstance(val, str):
if val.startswith("*"):
val = val.replace("*", "",1)
if val.startswith("\\"):
val = val.replace("\\", "", 1)
if val.startswith("*\\"):
val = val.replace("*\\", "*")
if val.startswith("*/"):
val = val.replace("*/", "*")
if val.startswith("*"):
val = val.replace("*", "")
if val.endswith("\\*"):
val = val.replace("\\*", "*")
if val.endswith("/*"):
val = val.replace("/*", "*")
val = val.strip()
return val
def cleanIPRange(self,value):
new_value = value
if type(new_value) is str and value.find('*') :
sub = value.count('.')
if(value[-2:] == '.*'):
value = value[:-2]
min_ip = value + '.0' * (4 - sub)
new_value = min_ip + '/' + str(8 * (4 - sub))
elif type(new_value) is list:
for index, vl in enumerate(new_value):
new_value[index] = self.cleanIPRange(vl)
return new_value
def generateValueNode(self, node):
result = self.valueExpression % (str(node))
if result == "" or result.isspace():
return '""'
else:
if self.matchKeyword: # don't quote search value on keyword field
return result
else:
return "%s" % result
def generateMapItemNode(self, node):
fieldname, value = node
if(fieldname == "path"):
value = self.cleanValuePath(value)
else:
value = self.cleanValue(value)
print(str(value))
if(fieldname == "EventID" and (type(value) is str or type(value) is int )):
fieldname = self.generateEventKey(value)
value = self.generateEventValue(value)
transformed_fieldname = self.fieldNameMapping(fieldname, value)
if(transformed_fieldname == "ipaddr"):
value = self.cleanIPRange(value)
if(transformed_fieldname == ''):
return ''
if self.mapListsSpecialHandling == False and type(value) in (str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):
return self.mapExpression % (transformed_fieldname, self.generateNode(value))
elif type(value) == list:
return self.generateMapItemListNode(transformed_fieldname, value)
elif isinstance(value, SigmaTypeModifier):
return self.generateMapItemTypedNode(transformed_fieldname, value)
elif value is None:
return self.nullExpression % (transformed_fieldname, )
if fieldname.lower() in self.excluded_fields:
return
else:
raise TypeError("Backend does not support map values of type " + str(type(value)))
transformed_fieldname = self.fieldNameMapping(fieldname, value)
if(transformed_fieldname == "ipaddr"):
print("OK")
value = self.cleanIPRange(value)
if self.mapListsSpecialHandling == False and type(value) in (str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):
#return self.mapExpression % (transformed_fieldname, self.generateNode(value))
if isinstance(value, list):
return self.generateNode([self.mapExpression % (transformed_fieldname, self.cleanValue(item)) for item in value])
elif isinstance(value, str) or isinstance(value, int):
return self.mapExpression % (transformed_fieldname, self.generateNode(self.cleanValue(value)))
elif type(value) == list:
return self.generateMapItemListNode(transformed_fieldname, value)
elif isinstance(value, SigmaTypeModifier):
return self.generateMapItemTypedNode(transformed_fieldname, value)
elif value is None:
return self.nullExpression % (transformed_fieldname,)
else:
raise TypeError("Backend does not support map values of type " + str(type(value)))
def generateAggregation(self, agg):
if agg == None:
return ""
if agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_NEAR:
raise NotImplementedError("The 'near' aggregation operator is not yet implemented for this backend")
if agg.groupfield == None:
if agg.aggfunc_notrans == 'count':
if agg.aggfield == None :
return " | eventstats count as val | search val %s %s" % (agg.cond_op, agg.condition)
else:
agg.aggfunc_notrans = 'dc'
return " | eventstats %s(%s) as val | search val %s %s" % (agg.aggfunc_notrans, agg.aggfield or "", agg.cond_op, agg.condition)
else:
if agg.aggfunc_notrans == 'count':
if agg.aggfield == None :
return " | eventstats count as val by %s| search val %s %s" % (agg.groupfield, agg.cond_op, agg.condition)
else:
agg.aggfunc_notrans = 'dc'
return " | eventstats %s(%s) as val by %s | search val %s %s" % (agg.aggfunc_notrans, agg.aggfield or "", agg.groupfield or "", agg.cond_op, agg.condition)
def cleanValue(self, value):
new_value = value
if type(new_value) is str:
if (new_value[:2] in ("*\/","*\\")):
new_value = new_value[2:]
if (new_value[:1] == '*'):
new_value = new_value.replace("*", "", 1)
if ( " to " not in new_value):
new_value = new_value.replace("* ", "*")
new_value = new_value.replace(" *", "*")
new_value = new_value.replace('"', '\"')
# need tuning
if (( "(" in new_value or " " in new_value or ")" in new_value or ":" in new_value) and " to " not in new_value):
if (new_value[0] != '"' and new_value[-1] != '"'):
new_value = '"' + new_value +'"'
new_value = new_value.replace("(", "\(")
new_value = new_value.replace(")", "\)")
if ('"' not in new_value):
new_value = new_value.replace(" ", "\ ")
new_value = new_value.strip()
if type(new_value) is list:
for index, vl in enumerate(new_value):
new_value[index] = self.cleanValue(vl)
return new_value
def cleanValuePath(self, value):
new_value = value
if type(new_value) is str:
# double backslash convention
if (new_value[:2] in ("*\/","*\\")):
new_value = new_value[2:]
if (new_value[:1] == '*'):
new_value = new_value.replace("*", "", 1)
# need tuning
if("*" in new_value and " " in new_value):
new_value=re.escape(new_value)
new_value = new_value.strip()
if type(new_value) is list:
for index, vl in enumerate(new_value):
new_value[index] = self.cleanValue(vl)
return new_value
def generateEventKey(self, value):
if (value in event):
return event[value][0]
else:
return ''
def generateEventValue(self, value):
if (value in event):
return event[value][1]
else:
return ''
def cleanIPRange(self,value):
if('*' not in value):
return value
new_value = value
if type(new_value) is str and value.find('*') :
sub = value.count('.')
if(value[-2:] == '.*'):
value = value[:-2]
min_ip = value + '.0' * (4 - sub)
max_ip = value + '.255' * (4 - sub)
new_value = '['+ min_ip + ' to ' + max_ip + ']'
# ip = IPNetwork(value + '/' + str(sub))
# min_ip = str(ip[0])
# max_ip = str(ip[-1])
if type(new_value) is list:
for index, vl in enumerate(new_value):
new_value[index] = self.cleanIPRange(vl)
return new_value
def generateNOTNode(self, node):
expression = super().generateNode(node.item)
if expression:
return "(%s%s)" % (self.notToken, expression)
# def generateNOTNode(self, node):
# generated = self.generateNode(node.item)
# if generated is not None:
# return self.notToken + generated
# else:
# return None
def postAPI(self,result,title,desc):
url = 'https://10.14.132.35//api/v1/watchlist'
url = os.getenv("cbapi_watchlist")
body = {
"name":title,
"search_query":"q="+str(result),
@@ -203,34 +168,55 @@ class CarbonBlackBackend(SingleTextQueryBackend):
"index_type":"events"
}
header = {
"X-Auth-Token": "099c366b1e56c0bca3ae61ce1fb7435af7a5926c"
"X-Auth-Token": os.getenv("APIToken")
}
print(title)
x = requests.post(url, data =json.dumps(body), headers = header, verify=False)
print(x.text)
def generateEventKey(self, value):
if (value in event):
return event[value][0]
else:
return 'eventid'
def generateEventValue(self, value):
if (value in event):
return event[value][1]
else:
return ''
def generate(self, sigmaparser):
"""Method is called for each sigma rule and receives the parsed rule (SigmaParser)"""
columns = list()
title = sigmaparser.parsedyaml["title"]
desc = sigmaparser.parsedyaml["description"]
# print(title)
# print("\n")
try:
self.category = sigmaparser.parsedyaml['logsource'].setdefault('category', None)
self.counted = sigmaparser.parsedyaml.get('counted', None)
self.excluded_fields = [item.lower() for item in sigmaparser.config.config.get("excludedfields", [])]
except KeyError:
self.category = None
for parsed in sigmaparser.condparsed:
query = self.generateQuery(parsed)
before = self.generateBefore(parsed)
after = self.generateAfter(parsed)
result = ""
# print(query.replace("\\\\","\\"))
if before is not None:
result = before
if query is not None:
result += query
if after is not None:
result += after
# if mapped is not None:
# result += fields
# self.postAPI(result,title,desc)
# print (title)
print (str(result))
return result
# val = "vsss admin shadow"
# escapeSubst = "\\\\\g<1>"
# print(self.reEscape.sub(escapeSubst, val))
self.postAPI(result,title,desc)
return result
# if self.category == "process_creation":
# for parsed in sigmaparser.condparsed:
# query = self.generateQuery(parsed)
# result = ""
# if query is not None:
# result += query
# return result
# else:
# raise NotSupportedError("Not supported logsource category.")
+1 -2
View File
@@ -108,7 +108,7 @@ def set_argparser():
argparser.add_argument("--backend-config", "-C", help="Configuration file (YAML format) containing options to pass to the backend")
argparser.add_argument("--defer-abort", "-d", action="store_true", help="Don't abort on parse or conversion errors, proceed with next rule. The exit code from the last error is returned")
argparser.add_argument("--ignore-backend-errors", "-I", action="store_true", help="Only return error codes for parse errors and ignore errors for rules that cause backend errors. Useful, when you want to get as much queries as possible.")
argparser.add_argument("--shoot-yourself-in-the-foot", action="store_true", help=argparse.SUPPRESS)
argparser.add_argument("--shoot-yourshootself-in-the-foot", action="store_true", help=argparse.SUPPRESS)
argparser.add_argument("--verbose", "-v", action="store_true", help="Be verbose")
argparser.add_argument("--debug", "-D", action="store_true", help="Debugging output")
argparser.add_argument("inputs", nargs="*", help="Sigma input files ('-' for stdin)")
@@ -235,7 +235,6 @@ for sigmafile in get_inputs(cmdargs.inputs, cmdargs.recurse):
parser = SigmaCollectionParser(f, sigmaconfigs, rulefilter)
results = parser.generate(backend)
for result in results:
print(result, file=out)