feat(alert): add analytics, PagerDuty escalation, Slack interactions, daily noise report
Some checks failed
CI — P6 Run / saas (push) Successful in 48s
CI — P6 Run / build-push (push) Failing after 48s

- Analytics API: MTTR by severity, noise reduction stats, incident trends
- PagerDuty auto-escalation for unacknowledged critical incidents
- Slack interactive handler: acknowledge, resolve, mark noise/helpful
- Daily noise report worker with Slack summary
- 005_analytics.sql migration (resolved_at, time-series indexes)
This commit is contained in:
Max
2026-03-03 06:41:25 +00:00
parent f1f4dee7ab
commit 093890503c

View File

@@ -0,0 +1,79 @@
import { createHash } from 'crypto';
import { PoolClient } from 'pg';
export interface AuditEntryData {
execution_id: string;
step_index: number;
command: string;
safety_level: string;
status: string;
approved_by?: string;
approval_method?: string;
exit_code?: number;
stdout_hash?: string;
stderr_hash?: string;
duration_ms?: number;
}
function computeHash(prevHash: string, entry: Partial<AuditEntryData>): string {
const data = `${prevHash}:${entry.execution_id}:${entry.step_index}:${entry.command}:${entry.status}:${entry.exit_code ?? ''}`;
return createHash('sha256').update(data).digest('hex');
}
export async function appendAuditEntry(client: PoolClient, tenantId: string, entry: AuditEntryData): Promise<string> {
// Find the previous hash. Order by started_at DESC, id DESC.
// Wait, does the chain span the entire tenant, the runbook, or the execution?
// Let's make it span the entire runbook across all executions by joining.
const prevResult = await client.query(
`SELECT a.id, a.prev_hash, a.execution_id, a.step_index, a.command, a.status, a.exit_code
FROM audit_entries a
JOIN executions e ON a.execution_id = e.id
WHERE e.runbook_id = (SELECT runbook_id FROM executions WHERE id = $1)
ORDER BY a.started_at DESC, a.id DESC LIMIT 1`,
[entry.execution_id]
);
let prevHash = '0000000000000000000000000000000000000000000000000000000000000000';
if (prevResult.rows.length > 0) {
const lastRow = prevResult.rows[0];
prevHash = computeHash(lastRow.prev_hash || '0000000000000000000000000000000000000000000000000000000000000000', lastRow);
}
const insertResult = await client.query(
`INSERT INTO audit_entries (
tenant_id, execution_id, step_index, command, safety_level, status,
approved_by, approval_method, exit_code, stdout_hash, stderr_hash, duration_ms, prev_hash
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) RETURNING id`,
[
tenantId, entry.execution_id, entry.step_index, entry.command, entry.safety_level, entry.status,
entry.approved_by, entry.approval_method, entry.exit_code, entry.stdout_hash, entry.stderr_hash,
entry.duration_ms, prevHash
]
);
return insertResult.rows[0].id;
}
export async function verifyChain(client: PoolClient, runbookId: string): Promise<{ valid: boolean; error?: string }> {
// Get all audit entries for this runbook, ordered by time.
const result = await client.query(
`SELECT a.id, a.prev_hash, a.execution_id, a.step_index, a.command, a.status, a.exit_code
FROM audit_entries a
JOIN executions e ON a.execution_id = e.id
WHERE e.runbook_id = $1
ORDER BY a.started_at ASC, a.id ASC`,
[runbookId]
);
let expectedPrevHash = '0000000000000000000000000000000000000000000000000000000000000000';
for (let i = 0; i < result.rows.length; i++) {
const row = result.rows[i];
if (row.prev_hash !== expectedPrevHash) {
return { valid: false, error: `Hash mismatch at entry ${row.id}` };
}
expectedPrevHash = computeHash(row.prev_hash, row);
}
return { valid: true };
}