From 193a06d4c9f96d3a02554b80d52ded43528ecc6a Mon Sep 17 00:00:00 2001 From: BlackSnufkin Date: Sun, 3 May 2026 07:08:44 -0700 Subject: [PATCH] Parallelize static analyzers, redesign /analyze/all, tidy logging + saved-view MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - AnalysisManager: static analyzers run concurrently via a ThreadPoolExecutor; dynamic stays parallel for yara/pe_sieve/moneta/patriot, hsb runs solo after to keep its sleep-timing measurements clean. Per-tool start + finish + wall-time logged so progress is visible. - /analyze/all redesign: stat tiles (stages / alerts / elapsed), phase-banded rows, color-coded state pills, agent-down preflight marks unreachable EDR profiles SKIPPED instead of burning the timeout, done banner only links to stages that actually produced data. - file_info hero: buttons fully data-driven — Static / Dynamic / HolyGrail / per-EDR-profile only render if the corresponding saved JSON exists for the sample. - analyze_edr no longer writes a JSON for pre-execution failures (agent_unreachable / busy / error). The error still surfaces in the HTTP response; the saved-view route stops rendering fake results. - Logging: single root-level handler, compact formatter — HH:MM:SS, fixed-width colored level, dim module name with package prefixes stripped, werkzeug renamed to http and access lines reformatted to METHOD path → status. urllib3 / requests muted to WARNING. --- app/__init__.py | 184 +++++++++++++++++++++--- app/analyzers/manager.py | 97 +++++++++++-- app/blueprints/analysis.py | 25 +++- app/services/rendering.py | 20 ++- app/static/js/analyze-all/core.js | 197 +++++++++++++++++++------ app/templates/analyze_all.html | 229 +++++++++++++++++++++++++----- app/templates/file_info.html | 14 ++ 7 files changed, 652 insertions(+), 114 deletions(-) diff --git a/app/__init__.py b/app/__init__.py index 40c7000..e6708a3 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -89,35 +89,173 @@ def create_app(): return app +import re + +# Werkzeug's access-log message arrives as: +# `127.0.0.1 - - [03/May/2026 06:52:21] "GET /api/... HTTP/1.1" 200 -` +# The IP is always 127.0.0.1 in dev, the bracketed timestamp duplicates +# our own HH:MM:SS prefix, and the HTTP version is constant. Pull out +# the bits that vary and ditch the rest. +_ACCESS_LOG_RE = re.compile( + r'^\S+ - - \[[^\]]+\] "(\S+) (\S+) HTTP/[\d.]+" (\d+) (-|\d+)$' +) + + +class _WerkzeugAccessFilter(logging.Filter): + """Rewrite werkzeug HTTP access lines into `METHOD path → status`.""" + + def filter(self, record): + match = _ACCESS_LOG_RE.match(record.getMessage()) + if match: + method, path, status, _size = match.groups() + record.msg = f'{method:<6} {path} → {status}' + record.args = () + return True + + +class _CompactFormatter(logging.Formatter): + """Compact, aligned, color-aware log formatter. + + Output shape (debug mode): + HH:MM:SS DEBUG manager Running yara + HH:MM:SS INFO edr.elastic Polling Elastic for detection alerts on DESKTOP-X (...) + HH:MM:SS WARN edr_health EDR health poller tick failed + HH:MM:SS INFO http GET /api/edr/agents/status → 200 + + Width-fixed columns (5-char level, 16-char name) so timestamps and + messages line up across the whole stream regardless of which logger + emitted the record. ANSI color codes are appended AFTER width-padding + so they don't break alignment. + + The original LogRecord is left untouched (the previous formatter + mutated `record.levelname` / `record.msg` in place, which breaks + re-emission through a second handler or filter chain). + """ + + LEVEL_COLORS = { + 'DEBUG': Fore.CYAN, + 'INFO': Fore.GREEN, + 'WARNING': Fore.YELLOW, + 'ERROR': Fore.RED, + 'CRITICAL': Fore.MAGENTA + Style.BRIGHT, + } + # 5-char fixed width — keeps the column aligned without losing the + # severity glance value. WARNING -> WARN, CRITICAL -> CRIT. + LEVEL_TAGS = { + 'DEBUG': 'DEBUG', + 'INFO': 'INFO ', + 'WARNING': 'WARN ', + 'ERROR': 'ERROR', + 'CRITICAL': 'CRIT ', + } + def format(self, record): + ts = self.formatTime(record, datefmt='%H:%M:%S') + + level_tag = self.LEVEL_TAGS.get(record.levelname, record.levelname[:5].ljust(5)) + level_color = self.LEVEL_COLORS.get(record.levelname, '') + level_part = f'{level_color}{level_tag}{Style.RESET_ALL}' + + # Name is dim-styled so the visual boundary to the message is + # already clear — no need to right-pad to a fixed width, which + # used to produce a lot of trailing whitespace on short names + # (`http`, `app`, `api`). Level alignment alone gives enough + # vertical structure for scanning. + name = self._compact_name(record.name) + name_part = f'{Style.DIM}{name}{Style.RESET_ALL}' + + message = record.getMessage() + line = f'{ts} {level_part} {name_part} {message}' + + # Mirror stdlib behaviour for exceptions / stack info. + if record.exc_info: + line = f'{line}\n{self.formatException(record.exc_info)}' + if record.stack_info: + line = f'{line}\n{self.formatStack(record.stack_info)}' + return line + + @staticmethod + def _compact_name(name: str) -> str: + """Trim verbose dotted module paths down to something readable + in the 16-char column. Drops the universal `app.` prefix, the + per-package `services.` / `blueprints.` / `analyzers.` prefixes, + and the `_analyzer` / `_edr_analyzer` suffix from analyzer + modules. Renames `werkzeug` → `http` since every line that + logger emits is an HTTP request.""" + if name == 'werkzeug': + return 'http' + if name.startswith('app.'): + name = name[len('app.'):] + for prefix in ('services.', 'blueprints.', 'analyzers.'): + if name.startswith(prefix): + name = name[len(prefix):] + break + # Strip the `_edr_analyzer` flavor first, then the bare `_analyzer`. + for suffix in ('_edr_analyzer', '_analyzer'): + if name.endswith(suffix): + name = name[:-len(suffix)] + break + return name + + def setup_logging(app): - """Configure logging with selective colors and avoid duplicate logs.""" + """Install a single root-level handler for the whole app. + + Configuring at the root means every module logger created via + `logging.getLogger(__name__)` (analyzers, services, edr clients, + blueprints) inherits the same format without per-module setup. Run + only in the Werkzeug reloader's child process to avoid duplicate + output when debug mode is on. + """ if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': return - if app.config['DEBUG']: - log_level = logging.DEBUG + debug = bool(app.config.get('DEBUG')) + level = logging.DEBUG if debug else logging.INFO - from flask.logging import default_handler - app.logger.setLevel(log_level) + if debug: + formatter = _CompactFormatter() + else: + # Production output: timestamped, no ANSI, simple. + formatter = logging.Formatter( + '%(asctime)s %(levelname)s %(name)s: %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + ) - class ColoredFormatter(logging.Formatter): - LOG_COLORS = { - "DEBUG": Fore.CYAN, - "INFO": Fore.GREEN, - "WARNING": Fore.YELLOW, - "ERROR": Fore.RED, - "CRITICAL": Fore.MAGENTA + Style.BRIGHT, - } + handler = logging.StreamHandler() + handler.setFormatter(formatter) + handler.setLevel(level) - def format(self, record): - log_color = self.LOG_COLORS.get(record.levelname, "") - levelname_color = f"{log_color}{record.levelname}{Style.RESET_ALL}" - message = f"{Style.RESET_ALL}{record.msg}" - record.levelname = levelname_color - record.msg = message - return super().format(record) + root = logging.getLogger() + for h in root.handlers[:]: + root.removeHandler(h) + root.addHandler(handler) + root.setLevel(level) - formatter = ColoredFormatter('[%(asctime)s - %(name)s] [%(levelname)s] - %(message)s') - default_handler.setFormatter(formatter) + # Flask creates its own logger with a default handler; clear it so + # we don't duplicate every line. Propagation up to root carries the + # message through our formatter. + app.logger.handlers.clear() + app.logger.setLevel(level) + app.logger.propagate = True - app.logger.debug("Debug logging is enabled.") + # Quiet down high-volume third-party loggers. urllib3's connection + # pool dumps multi-line tracebacks at DEBUG every retry attempt, + # which drowns out the analyzer logs operators actually came for. + for noisy, lvl in ( + ('urllib3', logging.WARNING), + ('urllib3.connectionpool', logging.WARNING), + ('requests', logging.WARNING), + ('requests.packages.urllib3', logging.WARNING), + # Werkzeug's per-request access log stays at INFO so it shows + # in debug mode but doesn't double-log via the root handler. + ('werkzeug', logging.INFO), + ): + logging.getLogger(noisy).setLevel(lvl) + + # Compact werkzeug access lines: drop the redundant IP / bracketed + # timestamp that duplicates our own HH:MM:SS prefix. + werkzeug_logger = logging.getLogger('werkzeug') + if not any(isinstance(f, _WerkzeugAccessFilter) for f in werkzeug_logger.filters): + werkzeug_logger.addFilter(_WerkzeugAccessFilter()) + + app.logger.debug('Logging configured (debug mode)') diff --git a/app/analyzers/manager.py b/app/analyzers/manager.py index 237159a..72e254f 100644 --- a/app/analyzers/manager.py +++ b/app/analyzers/manager.py @@ -5,6 +5,7 @@ import subprocess import time import psutil import json +from concurrent.futures import ThreadPoolExecutor, as_completed from typing import Dict, Type, Optional, Tuple from abc import ABC, abstractmethod @@ -47,6 +48,14 @@ class AnalysisManager: 'rededr': RedEdrAnalyzer } + # Analyzers that must run serially AFTER the parallel batch finishes. + # HSB (Hunt-Sleeping-Beacons) measures the target's sleep / thread + # timing — concurrent inspection by PE-Sieve / Moneta / Patriot + # opens handles, walks VAD, and can briefly suspend threads, which + # would distort the timing pattern HSB observes. Run it solo at the + # end so its measurements are clean. + _SERIAL_DYNAMIC_ANALYZERS = frozenset({'hsb'}) + def __init__(self, config: dict, logger: Optional[logging.Logger] = None): self.logger = logger or logging.getLogger(__name__) self.logger.debug("Initializing AnalysisManager") @@ -90,6 +99,25 @@ class AnalysisManager: self.logger.debug("Analyzer initialization completed") def _run_analyzers(self, analyzers: Dict[str, BaseAnalyzer], target, analysis_type: str) -> dict: + """Run a group of analyzers and return their findings keyed by name. + + Static analyzers all run in parallel — they're independent + subprocesses operating on the same on-disk file with their own + output dirs / stdout. Wall time drops from sum(tools) to + max(tools). + + Dynamic analyzers split into two groups: parallel-safe (yara, + pe_sieve, moneta, patriot — read-only IOC scanners) and serial + (anything in `_SERIAL_DYNAMIC_ANALYZERS`, currently just hsb, + whose sleep-timing measurements are perturbed by concurrent + process inspection from the others). The serial group runs AFTER + the parallel batch completes so HSB sees a quiescent target. + + Each analyzer is wrapped so a single failure can't bring down + the rest of the group — the failed entry gets a + `{status: 'error', error: ...}` envelope and the others keep + running. + """ results = {} if not analyzers: self.logger.warning(f"No {analysis_type} analyzers are enabled") @@ -100,16 +128,67 @@ class AnalysisManager: if not self._validate_dynamic_target(target): return {'status': 'error', 'error': 'Process does not exist or is not running'} - self.logger.debug(f"Running {len(analyzers)} {analysis_type} analyzers") - for name, analyzer in analyzers.items(): - try: - self.logger.debug(f"Running {name}") - analyzer.analyze(target) - results[name] = analyzer.get_results() - except Exception as e: - self.logger.error(f"Error in {name}: {str(e)}") - results[name] = {'status': 'error', 'error': str(e)} + # Partition into parallel + serial groups. Static is fully + # parallel; dynamic respects the _SERIAL_DYNAMIC_ANALYZERS set. + if analysis_type == 'dynamic': + parallel = {n: a for n, a in analyzers.items() if n not in self._SERIAL_DYNAMIC_ANALYZERS} + serial = {n: a for n, a in analyzers.items() if n in self._SERIAL_DYNAMIC_ANALYZERS} + else: + parallel = dict(analyzers) + serial = {} + self.logger.debug( + f"Running {analysis_type} analyzers — parallel: {list(parallel)}, " + f"serial: {list(serial)}" + ) + + if parallel: + results.update(self._run_in_parallel(parallel, target)) + for name, analyzer in serial.items(): + results[name] = self._run_one(name, analyzer, target) + + return results + + def _run_one(self, name: str, analyzer: BaseAnalyzer, target) -> dict: + """Run one analyzer, catching exceptions so a single failure + doesn't take down the rest of the batch. Logs start + completion + with per-tool wall time so the operator can see progress in the + debug log.""" + self.logger.debug(f"Running {name}") + t0 = time.monotonic() + try: + analyzer.analyze(target) + result = analyzer.get_results() + except Exception as e: + self.logger.error(f"Error in {name}: {str(e)}") + result = {'status': 'error', 'error': str(e)} + elapsed = time.monotonic() - t0 + self.logger.debug(f"{name} finished in {elapsed:.2f}s") + return result + + def _run_in_parallel(self, analyzers: Dict[str, BaseAnalyzer], target) -> dict: + """Drive `analyzers` concurrently via a thread pool sized to the + batch. All analyzers shell out to subprocesses, so the GIL doesn't + bottleneck wall time — this gets us roughly max(per-tool wall + time) instead of sum(per-tool wall time).""" + results: Dict[str, dict] = {} + max_workers = min(len(analyzers), 8) or 1 + t0 = time.monotonic() + with ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix='analyzer') as pool: + futures = {pool.submit(self._run_one, name, a, target): name + for name, a in analyzers.items()} + for fut in as_completed(futures): + name = futures[fut] + try: + results[name] = fut.result() + except Exception as e: + # _run_one already swallows exceptions; this is the + # belt-and-braces path for anything that escapes it. + self.logger.error(f"Error in {name}: {str(e)}") + results[name] = {'status': 'error', 'error': str(e)} + self.logger.debug( + f"Parallel batch finished in {time.monotonic() - t0:.2f}s: {list(results)}" + ) return results def _validate_dynamic_target(self, target) -> bool: diff --git a/app/blueprints/analysis.py b/app/blueprints/analysis.py index dfbb3c6..69e50fe 100644 --- a/app/blueprints/analysis.py +++ b/app/blueprints/analysis.py @@ -276,13 +276,28 @@ def analyze_edr(profile, target): app.logger.error(f"EDR dispatch failed: {e}", exc_info=True) return jsonify({'status': 'error', 'error': str(e)}), 500 - # Persist the Phase 1 snapshot. If Phase 2 is in flight, the background - # thread will overwrite this file when it completes; the frontend polls - # the GET endpoint to pick up the final state. - deps.helpers.save_analysis_results(results, result_path, results_filename) + status = results.get('status', 'completed') + + # Persist the Phase 1 snapshot ONLY when something real actually + # happened on the agent. Pre-execution transport failures + # (agent_unreachable, busy, error) leave us with an empty error + # envelope that has no execution / alerts / hostname — saving it + # would just clutter the saved-view route later with a fake + # "result" that's really just the error message. The dispatch + # error is still surfaced to the caller via the HTTP response. + PRE_EXEC_FAILURES = {'agent_unreachable', 'busy', 'error'} + if status not in PRE_EXEC_FAILURES: + # If Phase 2 is in flight, the background thread will overwrite + # this file when it completes; the frontend polls the GET + # endpoint to pick up the final state. + deps.helpers.save_analysis_results(results, result_path, results_filename) + else: + app.logger.debug( + f"Skipping save for EDR profile={profile} target={target}: " + f"status={status} (pre-execution failure)" + ) payload = {'edr': results} - status = results.get('status', 'completed') if status in ('error', 'agent_unreachable'): return jsonify({'status': status, 'results': payload}), 502 if status == 'busy': diff --git a/app/services/rendering.py b/app/services/rendering.py index 3f0f02f..e3a8a49 100644 --- a/app/services/rendering.py +++ b/app/services/rendering.py @@ -110,8 +110,25 @@ def render_file_info(data): f"Calculated: {checksum['calculated_checksum']}" ) + # Hero buttons are dynamic — only render the analyses that actually + # have saved data on disk for THIS sample. Listing every registered + # tool regardless of whether it ever ran for this file was confusing + # (e.g. clicking "Elastic Defend" on a sample only ever dispatched to + # Fibratus, or "Static Analysis" on a freshly-uploaded driver that + # only went through HolyGrail). Each `*_results` field is populated + # by helpers._load_file_data based on JSON file presence; pre-exec + # failures don't write a JSON, so the presence of each file is a + # clean "this analysis actually ran for this sample" signal. deps = current_app.extensions['litterbox'] - edr_profiles = deps.edr_registry.list_profiles() if hasattr(deps, 'edr_registry') else [] + all_profiles = deps.edr_registry.list_profiles() if hasattr(deps, 'edr_registry') else [] + saved_profile_names = set((data.get('edr_results') or {}).keys()) + edr_profiles = [p for p in all_profiles if p['name'] in saved_profile_names] + + available = { + 'static': data.get('static_results') is not None, + 'dynamic': data.get('dynamic_results') is not None, + 'holygrail': data.get('byovd_results') is not None, + } logger.debug("Rendering file_info.html template") return render_template( @@ -119,6 +136,7 @@ def render_file_info(data): file_info=file_info, entropy_risk_levels={'High': 7.2, 'Medium': 6.8, 'Low': 0}, edr_profiles=edr_profiles, + available=available, ) diff --git a/app/static/js/analyze-all/core.js b/app/static/js/analyze-all/core.js index f7eaf9f..5ffb675 100644 --- a/app/static/js/analyze-all/core.js +++ b/app/static/js/analyze-all/core.js @@ -35,15 +35,72 @@ function row(stage, profile = null) { return document.querySelector(sel); } +// Map row kind → human label + lb-tag severity class for the state pill. +const STATE_LABEL = { + queued: { label: 'QUEUED', cls: 'muted' }, + running: { label: 'RUNNING', cls: 'medium' }, + done: { label: 'COMPLETED', cls: 'low' }, + failed: { label: 'FAILED', cls: 'high' }, + skipped: { label: 'SKIPPED', cls: 'muted' }, +}; + function setStatus(stage, profile, kind, detailText) { const r = row(stage, profile); if (!r) return; - const dot = r.querySelector('[data-role="dot"]'); + const dot = r.querySelector('[data-role="dot"]'); const detail = r.querySelector('[data-role="detail"]'); + const state = r.querySelector('[data-role="state"]'); if (dot) dot.className = `lb-all-dot lb-all-dot--${kind}`; if (detail && detailText != null) detail.textContent = detailText; + if (state) { + const meta = STATE_LABEL[kind] || STATE_LABEL.queued; + state.className = `lb-tag ${meta.cls} lb-all-state`; + state.textContent = meta.label; + } + r.classList.toggle('is-skipped', kind === 'skipped'); + refreshStagesCounter(); } +// Refresh the "N / total" stages counter at the top of the page based on +// how many rows are in a terminal state (done / failed / skipped). +function refreshStagesCounter() { + const counter = document.getElementById('allStagesCounter'); + if (!counter) return; + const rows = document.querySelectorAll('.lb-all-row'); + let total = rows.length; + let settled = 0; + rows.forEach(r => { + const dot = r.querySelector('[data-role="dot"]'); + if (!dot) return; + if (dot.classList.contains('lb-all-dot--done') || + dot.classList.contains('lb-all-dot--failed') || + dot.classList.contains('lb-all-dot--skipped')) { + settled += 1; + } + }); + counter.textContent = `${settled} / ${total}`; +} + +// Track total EDR alerts seen across all profiles, surface in the top tile. +let _totalAlerts = 0; +function bumpAlertCounter(n) { + _totalAlerts += n; + const el = document.getElementById('allAlertsCounter'); + if (el) { + el.textContent = String(_totalAlerts); + el.style.color = _totalAlerts > 0 ? 'var(--lb-accent-soft)' : ''; + } +} +function setAlertCounter(n) { + _totalAlerts = n; + const el = document.getElementById('allAlertsCounter'); + if (el) { + el.textContent = String(_totalAlerts); + el.style.color = _totalAlerts > 0 ? 'var(--lb-accent-soft)' : ''; + } +} +setAlertCounter(0); + function setElapsed(stage, profile, ms) { const r = row(stage, profile); if (!r) return; @@ -55,12 +112,18 @@ function setElapsed(stage, profile, ms) { } // Top-of-page overall timer. Runs from page load until all done. -const overallEl = document.getElementById('allOverallTimer'); +// Mirrors into both the badge in the title bar AND the big tile at the +// top of the page (which gets sub-second updates via the same interval). +const overallEl = document.getElementById('allOverallTimer'); +const elapsedTileEl = document.getElementById('allElapsedDisplay'); +function fmtElapsed(ms) { + const s = Math.floor(ms / 1000); + return `${Math.floor(s / 60).toString().padStart(2, '0')}:${(s % 60).toString().padStart(2, '0')}`; +} let overallTimer = setInterval(() => { - const elapsed = Date.now() - PAGE_START; - const s = Math.floor(elapsed / 1000); - if (overallEl) overallEl.textContent = - `${Math.floor(s / 60).toString().padStart(2, '0')}:${(s % 60).toString().padStart(2, '0')}`; + const text = fmtElapsed(Date.now() - PAGE_START); + if (overallEl) overallEl.textContent = text; + if (elapsedTileEl) elapsedTileEl.textContent = text; }, 1000); // Per-row live elapsed ticker — only running rows tick. @@ -149,6 +212,7 @@ async function runEdrProfile(profile) { ); const failed = (final.status === 'partial' || final.status === 'error'); setStatus('edr', profile, failed ? 'failed' : 'done', failed ? `${final.status}: ${final.error || ''}` : detail); + if (!failed && totalAlerts > 0) bumpAlertCounter(totalAlerts); } catch (err) { setStatus('edr', profile, 'failed', `Error: ${err.message}`); } finally { @@ -198,11 +262,49 @@ async function runDynamic() { } } +// ---- Agent preflight ---------------------------------------------------- +// +// Hit /api/edr/agents/status (server-cached, ~instant) before kicking off +// the EDR profiles. Profiles whose agent isn't reachable get marked as +// "skipped" with no dispatch attempt — saves the 4-5s timeout each agent +// would otherwise burn just to fail. +async function probeReachableProfiles() { + if (!cfg.edrProfiles.length) return new Set(); + try { + const resp = await fetch('/api/edr/agents/status', { cache: 'no-store' }); + if (!resp.ok) return null; // soft-fail → caller dispatches all + const data = await resp.json(); + const byName = new Map( + (data.agents || []).map(a => [a.name, !!(a.agent && a.agent.reachable)]) + ); + const reachable = new Set(); + for (const p of cfg.edrProfiles) { + // Profiles we don't have status for get the benefit of the + // doubt — let the dispatch attempt surface the real error. + if (!byName.has(p) || byName.get(p)) reachable.add(p); + } + return reachable; + } catch { + return null; // soft-fail + } +} + // ---- Orchestration ------------------------------------------------------ async function run() { - // Static + every EDR profile fire immediately, all in parallel. + // Preflight EDR agents — skip the ones the dashboard says are down. + const reachable = await probeReachableProfiles(); + const edrToDispatch = []; + for (const p of cfg.edrProfiles) { + if (reachable === null || reachable.has(p)) { + edrToDispatch.push(p); + } else { + setStatus('edr', p, 'skipped', 'Agent unreachable — skipped'); + } + } + + // Static + reachable EDR profiles fire immediately, all in parallel. const staticPromise = runStatic(); - const edrPromises = cfg.edrProfiles.map(p => runEdrProfile(p)); + const edrPromises = edrToDispatch.map(p => runEdrProfile(p)); // Dynamic waits ONLY for Static — EDR is on a remote VM, no resource // contention with the local Dynamic analyzers. EDR continues in @@ -224,9 +326,10 @@ async function run() { showDoneBanner(); } -/** Once the pipeline settles, turn each row into a link to its saved - * detailed view (e.g. /results/static/, /results/dynamic/, - * /results/edr//). Failed rows stay un-linked. */ +/** Once the pipeline settles, mark each completed row as clickable so + * it links to its saved detailed view. Failed / skipped rows stay + * non-interactive. The arrow chevron is built into the template and + * the `.is-clickable` class controls its visibility (CSS). */ function linkifyCompletedRows() { document.querySelectorAll('.lb-all-row').forEach(r => { const dot = r.querySelector('[data-role="dot"]'); @@ -240,15 +343,9 @@ function linkifyCompletedRows() { null ); if (!url) return; - r.style.cursor = 'pointer'; + r.classList.add('is-clickable'); r.title = `View saved ${stage} results`; r.addEventListener('click', () => { window.location.href = url; }); - // Visual cue — drop a small chevron at the right edge. - const arrow = document.createElement('span'); - arrow.className = 'lb-muted lb-mono'; - arrow.style.cssText = 'margin-left: 8px; font-size: 13px;'; - arrow.textContent = '→'; - r.appendChild(arrow); }); } @@ -256,31 +353,49 @@ function showDoneBanner() { const banner = document.getElementById('allDoneBanner'); if (!banner) return; banner.classList.remove('hidden'); - banner.querySelector('.lb-strong').textContent = - 'Pipeline complete — click any completed row to view its detailed results.'; - banner.querySelector('.lb-muted').textContent = ''; - // Add explicit jump buttons. - const body = banner.querySelector('.lb-panel-body'); - if (body && !body.querySelector('.lb-all-jump-row')) { - const div = document.createElement('div'); - div.className = 'lb-all-jump-row'; - div.style.cssText = 'display: flex; gap: 8px; justify-content: center; margin-top: 12px; flex-wrap: wrap;'; - const links = [ - ['Static', `/results/static/${cfg.fileHash}`], - ...cfg.edrProfiles.map(p => [p, `/results/edr/${encodeURIComponent(p)}/${cfg.fileHash}`]), - ['Dynamic', `/results/dynamic/${cfg.fileHash}`], - ['File Info', `/results/info/${cfg.fileHash}`], - ]; - for (const [label, url] of links) { - const a = document.createElement('a'); - a.href = url; - a.className = 'lb-btn lb-btn-ghost'; - a.style.cssText = 'padding: 4px 12px; font-size: 12px;'; - a.textContent = label; - div.appendChild(a); + + // Populate the jump-row with one link per stage that actually + // produced saved data — skip failed / skipped EDR profiles since + // they don't have a saved view to navigate to. + const jumpRow = banner.querySelector('.lb-all-jump-row'); + if (!jumpRow || jumpRow.children.length) return; + + const links = []; + if (rowState('static') === 'done') { + links.push(['Static', `/results/static/${cfg.fileHash}`, 'low']); + } + for (const p of cfg.edrProfiles) { + if (rowState('edr', p) === 'done') { + const profLabel = document.querySelector( + `.lb-all-row[data-stage="edr"][data-profile="${p}"] .lb-strong` + )?.textContent || p; + links.push([profLabel, `/results/edr/${encodeURIComponent(p)}/${cfg.fileHash}`, 'low']); } - body.appendChild(div); + } + if (rowState('dynamic') === 'done') { + links.push(['Dynamic', `/results/dynamic/${cfg.fileHash}`, 'low']); + } + links.push(['File Info', `/results/info/${cfg.fileHash}`, 'muted']); + + for (const [label, url, sev] of links) { + const a = document.createElement('a'); + a.href = url; + a.className = `lb-btn lb-btn-ghost lb-tag-${sev}`; + a.textContent = label; + jumpRow.appendChild(a); } } +/** Read the terminal state of a row. Returns 'done' | 'failed' | + * 'skipped' | 'queued' | 'running' based on the dot class. */ +function rowState(stage, profile = null) { + const r = row(stage, profile); + const dot = r?.querySelector('[data-role="dot"]'); + if (!dot) return 'queued'; + for (const cls of ['done', 'failed', 'skipped', 'running', 'queued']) { + if (dot.classList.contains(`lb-all-dot--${cls}`)) return cls; + } + return 'queued'; +} + document.addEventListener('DOMContentLoaded', run); diff --git a/app/templates/analyze_all.html b/app/templates/analyze_all.html index 5eed326..4653972 100644 --- a/app/templates/analyze_all.html +++ b/app/templates/analyze_all.html @@ -8,90 +8,249 @@ {% block content %} +
All-in-One Analysis - 00:00 -
+ {{ file_hash[:12] }}… +
+ 00:00
-

- Static and every registered EDR profile fire immediately in parallel. +

+
+ Stages + 0 / {{ 2 + (edr_profiles|length) }} +
+
+ EDR Alerts + +
+
+ Elapsed + 00:00 +
+
+

+ Static and every reachable EDR profile fire immediately in parallel. Dynamic waits only for Static to finish — EDR runs on a remote VM - with no contention against the local Dynamic analyzers, so it - keeps running in parallel with Dynamic. The page redirects to the - file-info view when everything settles. + with no resource contention against the local Dynamic analyzers, so + it keeps running in parallel with Dynamic.

-
-
Local + Remote (parallel)
-
+ +
+
+ + +
+ Phase 1 · Parallel + Static + every reachable EDR profile +
+ +
-
Static Analysis
-
Queued
+
+ Static + YARA · CheckPlz · Stringnalyzer +
+
Queued
+ QUEUED +
+ + {% for profile in edr_profiles %}
-
{{ profile.display_name }} EDR
-
Queued
+
+ {{ profile.display_name }} + EDR{% if profile.kind %} · {{ profile.kind }}{% endif %} +
+
Queued
+ QUEUED +
{% endfor %} -
-
-
-
After Static
-
+ +
+ Phase 2 · After Static + Local payload detonation + memory scanners +
+
-
Dynamic Analysis
-
Waiting for Static to finish
+
+ Dynamic + YARA · PE-Sieve · Moneta · Patriot · HSB · RedEdr +
+
Waiting for Static to finish
+ QUEUED +
-