From ae5ab7d4b7e6c1edd47b34c6fba3bd51dabca38a Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 17:05:18 +0000
Subject: [PATCH 01/13] Add deterministic AWY scripts and workflow scaffolding
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
.github/workflows/aw-portfolio-yield.md | 132 ++
.../workflows/shared/otel-observability.md | 18 +
scripts/aw_yield_postcompute.py | 429 ++++++
scripts/aw_yield_precompute.py | 1174 +++++++++++++++++
tests/test_aw_yield_postcompute.py | 170 +++
tests/test_aw_yield_precompute.py | 150 +++
6 files changed, 2073 insertions(+)
create mode 100644 .github/workflows/aw-portfolio-yield.md
create mode 100644 .github/workflows/shared/otel-observability.md
create mode 100644 scripts/aw_yield_postcompute.py
create mode 100644 scripts/aw_yield_precompute.py
create mode 100644 tests/test_aw_yield_postcompute.py
create mode 100644 tests/test_aw_yield_precompute.py
diff --git a/.github/workflows/aw-portfolio-yield.md b/.github/workflows/aw-portfolio-yield.md
new file mode 100644
index 00000000000..f5f59879d38
--- /dev/null
+++ b/.github/workflows/aw-portfolio-yield.md
@@ -0,0 +1,132 @@
+---
+name: Agentic Workflow Portfolio Yield
+description: Weekly portfolio analysis of agentic workflows using deterministic scoring, overlap detection, and OTel-backed evidence for governance recommendations
+on:
+ schedule: weekly on monday around 09:00
+ workflow_dispatch:
+permissions:
+ contents: read
+ actions: read
+ issues: read
+ pull-requests: read
+engine: claude
+strict: true
+timeout-minutes: 25
+network:
+ allowed: [defaults, github]
+tools:
+ bash: true
+ github:
+ mode: gh-proxy
+ toolsets: [default, actions, pull_requests]
+safe-outputs:
+ mentions: false
+ allowed-github-references: []
+ max-bot-mentions: 0
+ create-issue:
+ labels: [automation, report, observability]
+ max: 1
+ close-older-issues: true
+ expires: 30d
+imports:
+ - shared/otel-observability.md
+pre-agent-steps:
+ - name: Precompute workflow portfolio data
+ run: |
+ set -euo pipefail
+ mkdir -p /tmp/gh-aw
+ python3 scripts/aw_yield_precompute.py --workflows ".github/workflows" --out /tmp/aw-yield-precompute.json
+post-steps:
+ - name: Finalize workflow portfolio report
+ run: |
+ set -euo pipefail
+ mkdir -p /tmp/gh-aw
+ python3 scripts/aw_yield_postcompute.py --precompute /tmp/aw-yield-precompute.json --agent-output /tmp/gh-aw --out /tmp/aw-yield-final.json
+---
+# Agentic Workflow Portfolio Yield
+
+You are the semantic interpreter for the repository's agentic workflow portfolio.
+
+## Hard Rules
+
+- Treat `/tmp/aw-yield-precompute.json` as the factual source of truth.
+- OTel = facts. Python = math. Agent = interpretation.
+- Do **not** recompute raw scores, ranking, overlap values, fractions, or portfolio math from scratch.
+- Do **not** invent telemetry, economics, confidence, or success evidence.
+- Use the `otel` MCP server only for aggregated summaries when the precompute file explicitly indicates that telemetry exists but needs brief interpretation.
+- Do not request or inspect raw traces.
+- Do not perform write actions with GitHub tools.
+
+## Required Interpretation Scope
+
+Explicitly evaluate these three levels:
+
+1. **Workflow level** — is each workflow worth running?
+2. **Episode level** — do related workflow groups create value or coordination drag?
+3. **Portfolio level** — is the overall workflow ecosystem becoming more coherent and reusable, or more fragmented and noisy?
+
+## Inputs
+
+Read and rely on:
+
+- `/tmp/aw-yield-precompute.json`
+- workflow recommendation seeds already computed there
+- overlap clusters already computed there
+- organizational health signals already computed there
+- optional OTel summaries already folded into the precompute payload
+
+## Deliverables
+
+1. Write `/tmp/gh-aw/portfolio-yield-agent.json` with this shape:
+
+```json
+{
+ "executive_summary": "",
+ "recommendations": {
+ "keep": [{"path": "", "reason": ""}],
+ "revise": [{"path": "", "reason": ""}],
+ "merge": [{"path": "", "reason": ""}],
+ "instrument": [{"path": "", "reason": ""}],
+ "retire": [{"path": "", "reason": ""}]
+ },
+ "highest_value_actions": ["", "", ""],
+ "deterministic_vs_agentic_findings": [""],
+ "episode_observations": [""],
+ "retirement_candidates": [""],
+ "consolidation_opportunities": [""],
+ "instrumentation_gaps": [""],
+ "telemetry_claims": []
+}
+```
+
+2. Produce exactly one `create_issue` safe output titled:
+
+`Agentic Workflow Portfolio Yield Report — YYYY-MM-DD`
+
+3. The issue body must include these sections:
+
+- `# Agentic Workflow Portfolio Yield Report`
+- `## Executive Summary`
+- `## Portfolio Health`
+- `## Workflow Portfolio`
+- `## Overlap Clusters`
+- `## Episode-Level Observations` (only if evidence exists)
+- `## Organizational Health Signals`
+- `## Deterministic vs Agentic Findings`
+- `## Highest-Value Actions`
+- `## Retirement Candidates`
+- `## Consolidation Opportunities`
+- `## Instrumentation Gaps`
+- `## Deterministic Portfolio JSON`
+
+## Recommendation Rules
+
+- Keep = high yield, high trust, low risk, low overlap.
+- Revise = plausible usefulness but excessive cost, maintenance drag, risk, or agentic fraction.
+- Merge = overlapping workflows or clusters competing for the same niche.
+- Instrument = missing telemetry, observability, or safe evidence.
+- Retire = low yield, low trust, and high drag.
+
+## Usage
+
+This workflow runs weekly and also supports manual `workflow_dispatch` for on-demand portfolio reviews.
diff --git a/.github/workflows/shared/otel-observability.md b/.github/workflows/shared/otel-observability.md
new file mode 100644
index 00000000000..fcffc46b999
--- /dev/null
+++ b/.github/workflows/shared/otel-observability.md
@@ -0,0 +1,18 @@
+---
+env:
+ OTEL_BACKEND_URL: ${{ secrets.OTLP_ENDPOINT }}
+ OTEL_BACKEND_TOKEN: ${{ secrets.OTLP_TOKEN }}
+observability:
+ otlp:
+ endpoint:
+ url: ${{ secrets.OTLP_ENDPOINT }}
+ headers:
+ Authorization: ${{ secrets.OTLP_TOKEN }}
+mcp-servers:
+ otel:
+ command: npx
+ args: ["@your-org/otel-query-mcp"]
+ env:
+ OTEL_BACKEND_URL: ${{ env.OTEL_BACKEND_URL }}
+ OTEL_BACKEND_TOKEN: ${{ env.OTEL_BACKEND_TOKEN }}
+---
diff --git a/scripts/aw_yield_postcompute.py b/scripts/aw_yield_postcompute.py
new file mode 100644
index 00000000000..c6a8c05e332
--- /dev/null
+++ b/scripts/aw_yield_postcompute.py
@@ -0,0 +1,429 @@
+#!/usr/bin/env python3
+"""Deterministic postcompute for Agentic Workflow Portfolio Yield."""
+
+from __future__ import annotations
+
+import argparse
+import datetime as dt
+import json
+import sys
+from pathlib import Path
+from typing import Any
+
+import aw_yield_precompute as pre
+
+ALLOWED_BUCKETS = {name.lower(): name for name in pre.ALLOWED_RECOMMENDATIONS}
+AGENT_SUMMARY_CANDIDATES = ("portfolio-yield-agent.json", "aw-portfolio-yield-agent.json")
+MAX_REPORT_LENGTH = 45000
+
+
+class FinalizeError(ValueError):
+ """Raised when postcompute inputs are malformed."""
+
+
+def load_json(path: Path) -> Any:
+ try:
+ return json.loads(path.read_text(encoding="utf-8"))
+ except FileNotFoundError as exc:
+ raise FinalizeError(f"Missing JSON file: {path}") from exc
+ except json.JSONDecodeError as exc:
+ raise FinalizeError(f"Malformed JSON in {path}: {exc}") from exc
+
+
+def clamp_workflow_scores(workflow: dict[str, Any]) -> dict[str, Any]:
+ bounded = dict(workflow)
+ for key in (
+ "permissions_risk",
+ "agentic_fraction",
+ "deterministic_fraction",
+ "usefulness",
+ "adoption",
+ "trust",
+ "cost",
+ "risk",
+ "maintenance_drag",
+ "overlap_drag",
+ "yield",
+ ):
+ bounded[key] = round(pre.clamp(workflow.get(key, 0.0)), 4)
+ bounded["deterministic_fraction"] = round(pre.clamp(1.0 - bounded["agentic_fraction"]), 4)
+ bounded["notes"] = list(dict.fromkeys(workflow.get("notes", [])))
+ return bounded
+
+
+def recommendation_buckets(seed: dict[str, Any], workflows: dict[str, dict[str, Any]]) -> dict[str, list[str]]:
+ buckets = {bucket: [] for bucket in ALLOWED_BUCKETS}
+ for bucket, entries in (seed or {}).items():
+ lower = bucket.lower()
+ if lower not in buckets:
+ continue
+ for entry in entries or []:
+ path = entry["path"] if isinstance(entry, dict) else str(entry)
+ if path in workflows and path not in buckets[lower]:
+ buckets[lower].append(path)
+ return buckets
+
+
+def read_agent_summary(agent_dir: Path) -> dict[str, Any]:
+ for candidate in AGENT_SUMMARY_CANDIDATES:
+ path = agent_dir / candidate
+ if path.exists():
+ payload = load_json(path)
+ if not isinstance(payload, dict):
+ raise FinalizeError(f"Agent summary must be an object: {path}")
+ return payload
+ return {}
+
+
+def normalize_agent_buckets(agent_summary: dict[str, Any], workflows: dict[str, dict[str, Any]]) -> tuple[dict[str, list[str]], list[str]]:
+ notes: list[str] = []
+ raw_buckets = agent_summary.get("recommendations") if isinstance(agent_summary.get("recommendations"), dict) else agent_summary
+ buckets = {bucket: [] for bucket in ALLOWED_BUCKETS}
+ seen: dict[str, str] = {}
+ for bucket in ALLOWED_BUCKETS:
+ entries = raw_buckets.get(bucket, raw_buckets.get(ALLOWED_BUCKETS[bucket], [])) if isinstance(raw_buckets, dict) else []
+ if entries is None:
+ entries = []
+ if not isinstance(entries, list):
+ raise FinalizeError(f"Recommendation bucket '{bucket}' must be a list")
+ for entry in entries:
+ path = entry.get("path") if isinstance(entry, dict) else str(entry)
+ path = pre.normalize_text(path)
+ if not path:
+ continue
+ if path not in workflows:
+ raise FinalizeError(f"Unknown workflow in recommendations: {path}")
+ other = seen.get(path)
+ if other and other != bucket:
+ raise FinalizeError(f"Workflow '{path}' appears in multiple recommendation buckets")
+ seen[path] = bucket
+ if path not in buckets[bucket]:
+ buckets[bucket].append(path)
+ telemetry_claims = agent_summary.get("telemetry_claims", [])
+ if isinstance(telemetry_claims, list):
+ for claim in telemetry_claims:
+ if not isinstance(claim, dict):
+ notes.append("Ignored malformed telemetry claim from agent output.")
+ continue
+ path = pre.normalize_text(claim.get("path") or claim.get("workflow"))
+ metric = pre.normalize_text(claim.get("metric"))
+ if not path or path not in workflows or metric not in workflows[path].get("telemetry_metrics", {}):
+ notes.append(f"Ignored invented telemetry claim: {path or 'unknown'}::{metric or 'unknown'}")
+ return buckets, notes
+
+
+def fill_missing_recommendations(current: dict[str, list[str]], seeds: dict[str, list[str]], workflows: dict[str, dict[str, Any]]) -> dict[str, list[str]]:
+ assigned = {path for entries in current.values() for path in entries}
+ for bucket, entries in seeds.items():
+ for path in entries:
+ if path in workflows and path not in assigned:
+ current.setdefault(bucket, []).append(path)
+ assigned.add(path)
+ for path in workflows:
+ if path not in assigned:
+ current.setdefault("revise", []).append(path)
+ return {bucket: sorted(dict.fromkeys(entries)) for bucket, entries in current.items()}
+
+
+def recompute_overlap_drag(payload: dict[str, Any]) -> float:
+ pairs = payload.get("overlap_pairs", [])
+ if not isinstance(pairs, list):
+ return 0.0
+ drag = 0.0
+ for pair in pairs:
+ if not isinstance(pair, dict):
+ continue
+ drag += float(pair.get("score", 0.0)) ** 2 * 2.0
+ return round(drag, 4)
+
+
+def derive_evidence_quality(workflows: list[dict[str, Any]], base_quality: str) -> str:
+ coverage = sum(
+ 1.0 if workflow.get("telemetry_metrics") else 0.5 if workflow.get("has_observability") or workflow.get("has_imported_observability") else 0.0
+ for workflow in workflows
+ ) / max(1, len(workflows))
+ derived = pre.portfolio_evidence_quality(workflows, coverage)
+ order = {"low": 0, "medium": 1, "high": 2}
+ return derived if order[derived] <= order.get(base_quality, 0) else base_quality
+
+
+def top_actions(final_payload: dict[str, Any]) -> list[str]:
+ actions: list[str] = []
+ instrument = final_payload.get("instrument", [])
+ merge = final_payload.get("merge", [])
+ retire = final_payload.get("retire", [])
+ revise = final_payload.get("revise", [])
+ if instrument:
+ actions.append(f"Instrument {instrument[0]} with stable OTel evidence and safe-output validation.")
+ if merge:
+ actions.append(f"Consolidate overlap around {merge[0]} to reduce portfolio drag.")
+ if revise:
+ actions.append(f"Revise {revise[0]} to shift deterministic work out of the agent path.")
+ if retire and len(actions) < 3:
+ actions.append(f"Retire or quarantine {retire[0]} if trust does not improve.")
+ return actions[:3]
+
+
+def build_report_markdown(final_payload: dict[str, Any], precompute_payload: dict[str, Any], agent_summary: dict[str, Any], post_notes: list[str]) -> str:
+ metrics = {
+ "Portfolio yield": final_payload["portfolio_yield"],
+ "Workflow count": final_payload["workflow_count"],
+ "Agentic fraction": final_payload["average_agentic_fraction"],
+ "Deterministic fraction": round(1.0 - final_payload["average_agentic_fraction"], 4),
+ "Telemetry coverage": precompute_payload.get("portfolio_metrics", {}).get("telemetry_coverage", 0.0),
+ "High-overlap clusters": len(final_payload.get("overlap_clusters", [])),
+ "Estimated governance drag": final_payload.get("organizational_health_signals", {}).get("governance_drag", 0.0),
+ "Estimated trust score": round(
+ sum(workflow.get("trust", 0.0) for workflow in precompute_payload.get("workflows", []))
+ / max(1, len(precompute_payload.get("workflows", []))),
+ 4,
+ ),
+ }
+ workflow_rows = []
+ for workflow in sorted(precompute_payload.get("workflows", []), key=lambda item: item.get("yield", 0.0), reverse=True):
+ recommendation = next(
+ (bucket.title() for bucket in ALLOWED_BUCKETS if workflow["path"] in final_payload.get(bucket, [])),
+ workflow.get("recommendation_seed", "Revise"),
+ )
+ note_text = "; ".join(workflow.get("notes", [])[:2]) or "-"
+ workflow_rows.append(
+ f"| `{workflow['path']}` | {recommendation} | {workflow['yield']:.4f} | {workflow['trust']:.4f} | {workflow['cost']:.4f} | {workflow['risk']:.4f} | {workflow['overlap_drag']:.4f} | {workflow['adoption']:.4f} | {workflow['agentic_fraction']:.4f} | {note_text} |"
+ )
+ overlap_lines = [
+ f"- {', '.join(cluster['workflows'])} (max overlap {cluster['max_overlap']:.4f}; {cluster['reason']})"
+ for cluster in final_payload.get("overlap_clusters", [])
+ ] or ["- No high-overlap clusters detected."]
+ episode_lines = [
+ f"- {episode['episode']}: workflows={', '.join(episode['workflows'])}; coordination drag={episode['coordination_drag']:.4f}; episode yield={episode['episode_yield']:.4f}"
+ for episode in precompute_payload.get("episode_metrics", [])
+ ]
+ org = final_payload.get("organizational_health_signals", {})
+ deterministic_findings = agent_summary.get("deterministic_vs_agentic_findings", [])
+ if not deterministic_findings:
+ deterministic_findings = [
+ f"{workflow['path']} has agentic fraction {workflow['agentic_fraction']:.4f} despite limited deterministic scaffolding."
+ for workflow in sorted(precompute_payload.get("workflows", []), key=lambda item: item.get("agentic_fraction", 0.0), reverse=True)[:3]
+ if workflow.get("agentic_fraction", 0.0) > 0.6
+ ]
+ highest_value_actions = agent_summary.get("highest_value_actions") or top_actions(final_payload)
+ retirement_candidates = agent_summary.get("retirement_candidates") or final_payload.get("retire", [])
+ consolidation = agent_summary.get("consolidation_opportunities") or final_payload.get("merge", [])
+ instrumentation_gaps = agent_summary.get("instrumentation_gaps") or final_payload.get("instrument", [])
+ executive_summary = pre.normalize_text(agent_summary.get("executive_summary"))
+ if not executive_summary:
+ if final_payload["evidence_quality"] == "low":
+ executive_summary = "The workflow ecosystem is under-instrumented, so the portfolio signal is directionally useful but not yet strong enough for confident optimization."
+ elif org.get("fragmentation", 0.0) > 0.6:
+ executive_summary = "The workflow ecosystem is fragmenting: overlap drag and governance drag are eroding portfolio yield."
+ elif final_payload["portfolio_yield"] > 0.12:
+ executive_summary = "The workflow ecosystem is producing positive value overall, with enough trust and reuse to justify continued investment."
+ else:
+ executive_summary = "The workflow ecosystem is mixed: some workflows are valuable, but overlap, cost, or trust gaps are holding the portfolio back."
+ compact_json = json.dumps(
+ {
+ "portfolio_yield": final_payload["portfolio_yield"],
+ "workflow_count": final_payload["workflow_count"],
+ "keep": final_payload.get("keep", []),
+ "revise": final_payload.get("revise", []),
+ "merge": final_payload.get("merge", []),
+ "instrument": final_payload.get("instrument", []),
+ "retire": final_payload.get("retire", []),
+ "evidence_quality": final_payload["evidence_quality"],
+ },
+ separators=(",", ":"),
+ sort_keys=True,
+ )
+ lines = [
+ "# Agentic Workflow Portfolio Yield Report",
+ "",
+ "## Executive Summary",
+ "",
+ executive_summary,
+ "",
+ "## Portfolio Health",
+ "",
+ "| Metric | Value |",
+ "|---|---:|",
+ ]
+ lines.extend(f"| {metric} | {value} |" for metric, value in metrics.items())
+ lines.extend(
+ [
+ "",
+ "## Workflow Portfolio",
+ "",
+ "| Workflow | Recommendation | Yield | Trust | Cost | Risk | Overlap | Adoption | Agentic Fraction | Notes |",
+ "|---|---|---:|---:|---:|---:|---:|---:|---:|---|",
+ *workflow_rows,
+ "",
+ "## Overlap Clusters",
+ "",
+ *overlap_lines,
+ ]
+ )
+ if episode_lines:
+ lines.extend(["", "## Episode-Level Observations", "", *episode_lines])
+ lines.extend(
+ [
+ "",
+ "## Organizational Health Signals",
+ "",
+ f"- fragmentation: {org.get('fragmentation', 0.0):.4f}",
+ f"- reuse: {org.get('reuse', 0.0):.4f}",
+ f"- trust concentration: {org.get('trust_concentration', 0.0):.4f}",
+ f"- governance drag: {org.get('governance_drag', 0.0):.4f}",
+ *[f"- {note}" for note in org.get("notes", [])],
+ *[f"- {note}" for note in post_notes],
+ "",
+ "## Deterministic vs Agentic Findings",
+ "",
+ *([f"- {item}" for item in deterministic_findings] or ["- No outsized agentic misuse detected from current evidence."]),
+ "",
+ "## Highest-Value Actions",
+ "",
+ *([f"1. {item}" if index == 0 else f"{index + 1}. {item}" for index, item in enumerate(highest_value_actions[:3])] or ["1. Improve observability coverage."]),
+ "",
+ "## Retirement Candidates",
+ "",
+ *([f"- {item}" for item in retirement_candidates] or ["- No immediate retirement candidates."]),
+ "",
+ "## Consolidation Opportunities",
+ "",
+ *([f"- {item}" for item in consolidation] or ["- No consolidation opportunities identified."]),
+ "",
+ "## Instrumentation Gaps",
+ "",
+ *([f"- {item}" for item in instrumentation_gaps] or ["- No critical instrumentation gaps detected."]),
+ "",
+ "## Deterministic Portfolio JSON",
+ "",
+ "```json",
+ compact_json,
+ "```",
+ ]
+ )
+ report = "\n".join(lines).strip() + "\n"
+ if len(report) > MAX_REPORT_LENGTH:
+ report = report[: MAX_REPORT_LENGTH - 16].rstrip() + "\n\n[truncated]\n"
+ return report
+
+
+def finalize(precompute_payload: dict[str, Any], agent_dir: Path) -> tuple[dict[str, Any], dict[str, Any], list[str]]:
+ workflows_raw = precompute_payload.get("workflows")
+ if not isinstance(workflows_raw, list):
+ raise FinalizeError("Precompute JSON must contain a workflows array")
+ workflows = [clamp_workflow_scores(workflow) for workflow in workflows_raw if isinstance(workflow, dict)]
+ workflow_index: dict[str, dict[str, Any]] = {}
+ for workflow in workflows:
+ path = workflow.get("path")
+ if not path or path in workflow_index:
+ raise FinalizeError(f"Duplicate or missing workflow path: {path}")
+ workflow_index[path] = workflow
+ for workflow in workflows:
+ workflow["yield"] = pre.compute_workflow_yield(
+ workflow["usefulness"],
+ workflow["adoption"],
+ workflow["trust"],
+ workflow["cost"],
+ workflow["risk"],
+ workflow["maintenance_drag"],
+ workflow["overlap_drag"],
+ )
+ seeds = recommendation_buckets(precompute_payload.get("recommendations_seed", {}), workflow_index)
+ agent_summary = read_agent_summary(agent_dir)
+ post_notes: list[str] = []
+ try:
+ agent_buckets, notes = normalize_agent_buckets(agent_summary, workflow_index)
+ post_notes.extend(notes)
+ except FinalizeError:
+ raise
+ buckets = fill_missing_recommendations(agent_buckets, seeds, workflow_index)
+ overlap_drag_value = recompute_overlap_drag(precompute_payload)
+ portfolio_yield = round(sum(workflow["yield"] for workflow in workflows) / max(1, len(workflows)) - pre.LAMBDA * overlap_drag_value, 4)
+ final_payload = {
+ "portfolio_yield": portfolio_yield,
+ "workflow_count": len(workflows),
+ "portfolio_cost": round(sum(workflow["cost"] for workflow in workflows) / max(1, len(workflows)), 4),
+ "portfolio_risk": round(sum(workflow["risk"] for workflow in workflows) / max(1, len(workflows)), 4),
+ "portfolio_maintenance_drag": round(sum(workflow["maintenance_drag"] for workflow in workflows) / max(1, len(workflows)), 4),
+ "portfolio_overlap_drag": overlap_drag_value,
+ "average_agentic_fraction": round(sum(workflow["agentic_fraction"] for workflow in workflows) / max(1, len(workflows)), 4),
+ "evidence_quality": derive_evidence_quality(workflows, precompute_payload.get("portfolio_metrics", {}).get("evidence_quality", "low")),
+ "keep": buckets.get("keep", []),
+ "revise": buckets.get("revise", []),
+ "merge": buckets.get("merge", []),
+ "instrument": buckets.get("instrument", []),
+ "retire": buckets.get("retire", []),
+ "overlap_clusters": precompute_payload.get("overlap_clusters", []),
+ "organizational_health_signals": precompute_payload.get("organizational_health_signals", {}),
+ }
+ final_payload["report_markdown"] = build_report_markdown(final_payload, {**precompute_payload, "workflows": workflows}, agent_summary, post_notes)
+ return final_payload, agent_summary, post_notes
+
+
+def write_safe_output(agent_dir: Path, report_markdown: str) -> None:
+ title = f"Agentic Workflow Portfolio Yield Report — {dt.date.today().isoformat()}"
+ payload = {
+ "items": [
+ {
+ "type": "create_issue",
+ "title": title,
+ "body": report_markdown,
+ }
+ ],
+ "errors": [],
+ }
+ (agent_dir / "agent_output.json").write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8")
+
+
+def error_payload(message: str) -> dict[str, Any]:
+ return {
+ "error": message,
+ "portfolio_yield": 0.0,
+ "workflow_count": 0,
+ "portfolio_cost": 0.0,
+ "portfolio_risk": 0.0,
+ "portfolio_maintenance_drag": 0.0,
+ "portfolio_overlap_drag": 0.0,
+ "average_agentic_fraction": 0.0,
+ "evidence_quality": "low",
+ "keep": [],
+ "revise": [],
+ "merge": [],
+ "instrument": [],
+ "retire": [],
+ "overlap_clusters": [],
+ "organizational_health_signals": {"fragmentation": 0.0, "reuse": 0.0, "trust_concentration": 0.0, "governance_drag": 0.0, "notes": [message]},
+ "report_markdown": "# Agentic Workflow Portfolio Yield Report\n\n## Executive Summary\n\nPostcompute failed safely.\n",
+ }
+
+
+def parse_args(argv: list[str] | None = None) -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument("--precompute", required=True, help="Path to the precompute JSON")
+ parser.add_argument("--agent-output", required=True, help="Directory containing agent outputs")
+ parser.add_argument("--out", required=True, help="Output JSON path")
+ return parser.parse_args(argv)
+
+
+def main(argv: list[str] | None = None) -> int:
+ args = parse_args(argv)
+ out_path = Path(args.out)
+ out_path.parent.mkdir(parents=True, exist_ok=True)
+ agent_dir = Path(args.agent_output)
+ agent_dir.mkdir(parents=True, exist_ok=True)
+ try:
+ precompute_payload = load_json(Path(args.precompute))
+ if not isinstance(precompute_payload, dict):
+ raise FinalizeError("Precompute JSON must be an object")
+ final_payload, _agent_summary, _notes = finalize(precompute_payload, agent_dir)
+ write_safe_output(agent_dir, final_payload["report_markdown"])
+ out_path.write_text(json.dumps(final_payload, indent=2, sort_keys=True) + "\n", encoding="utf-8")
+ return 0
+ except FinalizeError as exc:
+ payload = error_payload(str(exc))
+ out_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8")
+ return 1
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
new file mode 100644
index 00000000000..37c09c2396b
--- /dev/null
+++ b/scripts/aw_yield_precompute.py
@@ -0,0 +1,1174 @@
+#!/usr/bin/env python3
+"""Deterministic precompute for Agentic Workflow Portfolio Yield."""
+
+from __future__ import annotations
+
+import argparse
+import json
+import math
+import os
+import re
+import sys
+from collections import Counter, defaultdict
+from pathlib import Path
+from typing import Any
+
+LAMBDA = 0.25
+OVERLAP_THRESHOLD = 0.70
+ALLOWED_RECOMMENDATIONS = ("Keep", "Revise", "Merge", "Instrument", "Retire")
+STOPWORDS = {
+ "a",
+ "about",
+ "after",
+ "all",
+ "also",
+ "an",
+ "and",
+ "any",
+ "are",
+ "as",
+ "at",
+ "be",
+ "been",
+ "before",
+ "being",
+ "by",
+ "can",
+ "do",
+ "for",
+ "from",
+ "get",
+ "github",
+ "have",
+ "if",
+ "in",
+ "into",
+ "is",
+ "it",
+ "its",
+ "job",
+ "of",
+ "on",
+ "or",
+ "repo",
+ "repository",
+ "that",
+ "the",
+ "their",
+ "this",
+ "to",
+ "use",
+ "using",
+ "workflow",
+ "workflows",
+ "with",
+ "you",
+ "your",
+}
+RISKY_PERMISSION_LEVELS = {"write", "admin"}
+TELEMETRY_KEYS = {
+ "input_tokens",
+ "output_tokens",
+ "runtime_duration",
+ "tool_calls",
+ "retries",
+ "success_rate",
+ "safe_output_success",
+ "workflow_invocation_count",
+ "user_interaction_count",
+ "reviewer_interaction_count",
+ "accepted_outputs",
+ "outputs_acted_upon",
+ "actionable_comments",
+ "pr_impact",
+ "issues_resolved",
+ "bugs_found",
+ "manual_minutes_saved",
+}
+
+
+class InputError(ValueError):
+ """Raised when an input document cannot be processed safely."""
+
+
+def clamp(value: Any, lower: float = 0.0, upper: float = 1.0) -> float:
+ try:
+ numeric = float(value)
+ except (TypeError, ValueError):
+ numeric = lower
+ if math.isnan(numeric) or math.isinf(numeric):
+ numeric = lower
+ return max(lower, min(upper, numeric))
+
+
+def round_score(value: Any) -> float:
+ return round(clamp(value), 4)
+
+
+def normalize_text(value: Any) -> str:
+ if value is None:
+ return ""
+ if isinstance(value, str):
+ return value.strip()
+ return str(value).strip()
+
+
+def split_frontmatter(text: str) -> tuple[str, str]:
+ if not text.startswith("---"):
+ return "", text
+ lines = text.splitlines()
+ end_index = None
+ for index in range(1, len(lines)):
+ if lines[index].strip() == "---":
+ end_index = index
+ break
+ if end_index is None:
+ return "", text
+ frontmatter = "\n".join(lines[1:end_index])
+ body = "\n".join(lines[end_index + 1 :])
+ return frontmatter, body
+
+
+def _split_inline_items(text: str) -> list[str]:
+ items: list[str] = []
+ current: list[str] = []
+ depth = 0
+ quote: str | None = None
+ for char in text:
+ if quote:
+ current.append(char)
+ if char == quote:
+ quote = None
+ continue
+ if char in {"'", '"'}:
+ quote = char
+ current.append(char)
+ continue
+ if char in "[{":
+ depth += 1
+ current.append(char)
+ continue
+ if char in "]}":
+ depth = max(0, depth - 1)
+ current.append(char)
+ continue
+ if char == "," and depth == 0:
+ items.append("".join(current).strip())
+ current = []
+ continue
+ current.append(char)
+ if current:
+ items.append("".join(current).strip())
+ return [item for item in items if item != ""]
+
+
+def parse_scalar(value: str) -> Any:
+ value = value.strip()
+ if value == "":
+ return ""
+ lower = value.lower()
+ if lower == "true":
+ return True
+ if lower == "false":
+ return False
+ if lower in {"null", "none", "~"}:
+ return None
+ if value.startswith("[") and value.endswith("]"):
+ inner = value[1:-1].strip()
+ if not inner:
+ return []
+ return [parse_scalar(item) for item in _split_inline_items(inner)]
+ if value.startswith("{") and value.endswith("}"):
+ inner = value[1:-1].strip()
+ if not inner:
+ return {}
+ parsed: dict[str, Any] = {}
+ for item in _split_inline_items(inner):
+ key, raw = split_key_value(item)
+ parsed[key] = parse_scalar(raw or "")
+ return parsed
+ if (value.startswith('"') and value.endswith('"')) or (value.startswith("'") and value.endswith("'")):
+ return value[1:-1]
+ if re.fullmatch(r"-?\d+", value):
+ try:
+ return int(value)
+ except ValueError:
+ return value
+ if re.fullmatch(r"-?\d+\.\d+", value):
+ try:
+ return float(value)
+ except ValueError:
+ return value
+ return value
+
+
+def split_key_value(text: str) -> tuple[str, str | None]:
+ depth = 0
+ quote: str | None = None
+ for index, char in enumerate(text):
+ if quote:
+ if char == quote:
+ quote = None
+ continue
+ if char in {"'", '"'}:
+ quote = char
+ continue
+ if char in "[{":
+ depth += 1
+ continue
+ if char in "]}":
+ depth = max(0, depth - 1)
+ continue
+ if char == ":" and depth == 0:
+ key = text[:index].strip()
+ rest = text[index + 1 :].strip()
+ return key, rest if rest != "" else None
+ raise InputError(f"Invalid frontmatter line: {text}")
+
+
+def _next_significant(lines: list[str], start: int) -> int:
+ index = start
+ while index < len(lines):
+ stripped = lines[index].strip()
+ if stripped and not stripped.startswith("#"):
+ break
+ index += 1
+ return index
+
+
+def parse_yaml_block(lines: list[str], start: int = 0, indent: int = 0) -> tuple[Any, int]:
+ start = _next_significant(lines, start)
+ if start >= len(lines):
+ return {}, start
+ line = lines[start]
+ current_indent = len(line) - len(line.lstrip(" "))
+ if current_indent < indent:
+ return {}, start
+ is_list = line.lstrip().startswith("- ")
+ if is_list:
+ items: list[Any] = []
+ index = start
+ while index < len(lines):
+ index = _next_significant(lines, index)
+ if index >= len(lines):
+ break
+ raw = lines[index]
+ item_indent = len(raw) - len(raw.lstrip(" "))
+ if item_indent < indent:
+ break
+ stripped = raw[item_indent:]
+ if item_indent != indent or not stripped.startswith("- "):
+ break
+ payload = stripped[2:].strip()
+ index += 1
+ if payload == "":
+ child, index = parse_yaml_block(lines, index, indent + 2)
+ items.append(child)
+ continue
+ if ":" in payload:
+ key, rest = split_key_value(payload)
+ item: dict[str, Any] = {}
+ if rest is None:
+ child, index = parse_yaml_block(lines, index, indent + 2)
+ item[key] = child
+ else:
+ item[key] = parse_scalar(rest)
+ while True:
+ lookahead = _next_significant(lines, index)
+ if lookahead >= len(lines):
+ break
+ next_raw = lines[lookahead]
+ next_indent = len(next_raw) - len(next_raw.lstrip(" "))
+ if next_indent < indent + 2:
+ break
+ if next_indent == indent and next_raw.lstrip().startswith("- "):
+ break
+ if next_indent > indent + 2:
+ break
+ extra_key, extra_rest = split_key_value(next_raw.strip())
+ index = lookahead + 1
+ if extra_rest is None:
+ child, index = parse_yaml_block(lines, index, indent + 4)
+ item[extra_key] = child
+ else:
+ item[extra_key] = parse_scalar(extra_rest)
+ items.append(item)
+ continue
+ items.append(parse_scalar(payload))
+ return items, index
+ mapping: dict[str, Any] = {}
+ index = start
+ while index < len(lines):
+ index = _next_significant(lines, index)
+ if index >= len(lines):
+ break
+ raw = lines[index]
+ current_indent = len(raw) - len(raw.lstrip(" "))
+ if current_indent < indent:
+ break
+ if current_indent > indent:
+ raise InputError(f"Unexpected indentation in frontmatter: {raw}")
+ stripped = raw.strip()
+ if stripped.startswith("- "):
+ break
+ key, rest = split_key_value(stripped)
+ index += 1
+ if rest is None:
+ if index < len(lines) and _next_significant(lines, index) < len(lines):
+ child, index = parse_yaml_block(lines, index, indent + 2)
+ mapping[key] = child
+ else:
+ mapping[key] = {}
+ else:
+ mapping[key] = parse_scalar(rest)
+ return mapping, index
+
+
+def parse_frontmatter_text(frontmatter: str) -> dict[str, Any]:
+ if not frontmatter.strip():
+ return {}
+ parsed, _ = parse_yaml_block(frontmatter.splitlines())
+ if not isinstance(parsed, dict):
+ raise InputError("Workflow frontmatter must parse to an object")
+ return parsed
+
+
+def read_workflow(path: Path) -> tuple[dict[str, Any], str]:
+ text = path.read_text(encoding="utf-8")
+ frontmatter_text, body = split_frontmatter(text)
+ return parse_frontmatter_text(frontmatter_text), body
+
+
+def discover_workflow_files(workflows_root: Path) -> list[Path]:
+ files = []
+ for path in workflows_root.rglob("*.md"):
+ if "shared" in path.relative_to(workflows_root).parts:
+ continue
+ files.append(path)
+ return sorted(files)
+
+
+def as_list(value: Any) -> list[Any]:
+ if value is None:
+ return []
+ if isinstance(value, list):
+ return value
+ return [value]
+
+
+def normalize_import_paths(workflow_path: Path, frontmatter: dict[str, Any]) -> list[Path]:
+ imports = []
+ for item in as_list(frontmatter.get("imports")):
+ raw: str | None = None
+ if isinstance(item, str):
+ raw = item
+ elif isinstance(item, dict):
+ raw = normalize_text(item.get("uses")) or normalize_text(item.get("path"))
+ if not raw or "@" in raw or "/" in raw and not raw.startswith("shared/") and not raw.startswith("."):
+ if raw and raw.startswith(("shared/", "./", "../", "/")):
+ pass
+ else:
+ continue
+ if raw.startswith("shared/"):
+ imports.append(workflow_path.parent / raw)
+ elif raw.startswith("./") or raw.startswith("../"):
+ imports.append((workflow_path.parent / raw).resolve())
+ elif raw.startswith("/"):
+ imports.append(Path(raw))
+ return imports
+
+
+def has_observability_config(frontmatter: dict[str, Any]) -> bool:
+ observability = frontmatter.get("observability")
+ return isinstance(observability, dict) and isinstance(observability.get("otlp"), dict)
+
+
+def has_imported_observability(workflow_path: Path, frontmatter: dict[str, Any]) -> bool:
+ for import_path in normalize_import_paths(workflow_path, frontmatter):
+ if not import_path.exists():
+ continue
+ try:
+ imported_frontmatter, _ = read_workflow(import_path)
+ except Exception:
+ continue
+ if has_observability_config(imported_frontmatter):
+ return True
+ if normalize_text(import_path.name).lower().find("otel") >= 0:
+ return True
+ mcp_servers = imported_frontmatter.get("mcp-servers")
+ if isinstance(mcp_servers, dict) and "otel" in mcp_servers:
+ return True
+ return False
+
+
+def get_matching_lockfile(workflow_path: Path) -> Path:
+ return workflow_path.with_suffix(".lock.yml")
+
+
+def detect_lockfile_status(workflow_path: Path) -> tuple[bool, bool]:
+ lockfile = get_matching_lockfile(workflow_path)
+ if not lockfile.exists():
+ return False, False
+ try:
+ workflow_mtime = workflow_path.stat().st_mtime
+ lockfile_mtime = lockfile.stat().st_mtime
+ except OSError:
+ return True, False
+ return True, lockfile_mtime < workflow_mtime
+
+
+def count_steps(value: Any) -> int:
+ if isinstance(value, list):
+ return len(value)
+ if isinstance(value, dict) and value:
+ return 1
+ return 0
+
+
+def collect_step_text(value: Any) -> str:
+ return json.dumps(value, sort_keys=True, ensure_ascii=False) if value else ""
+
+
+def infer_timeout_minutes(value: Any) -> int | None:
+ if isinstance(value, (int, float)):
+ return int(value)
+ if isinstance(value, str):
+ match = re.search(r"\d+", value)
+ if match:
+ return int(match.group(0))
+ return None
+
+
+def permissions_risk(permissions: Any) -> float:
+ if not isinstance(permissions, dict) or not permissions:
+ return 0.45
+ read_scopes = 0
+ elevated = 0
+ id_token = 0
+ for level in permissions.values():
+ normalized = normalize_text(level).lower()
+ if normalized == "read":
+ read_scopes += 1
+ elif normalized in RISKY_PERMISSION_LEVELS:
+ elevated += 1
+ elif normalized == "write":
+ elevated += 1
+ elif normalized == "none":
+ continue
+ elif normalized == "id-token":
+ id_token += 1
+ breadth = clamp(read_scopes / 6.0)
+ return round_score(0.2 + breadth * 0.35 + elevated * 0.45 + id_token * 0.1)
+
+
+def count_tools(frontmatter: dict[str, Any]) -> int:
+ tool_count = len(frontmatter.get("tools", {}) or {})
+ mcp_servers = frontmatter.get("mcp-servers") or {}
+ if isinstance(mcp_servers, dict):
+ tool_count += len(mcp_servers)
+ return tool_count
+
+
+def tokenize(text: str) -> list[str]:
+ return [token for token in re.findall(r"[a-z0-9]+", text.lower()) if token not in STOPWORDS and len(token) > 2]
+
+
+def extract_trigger_tokens(on_value: Any) -> list[str]:
+ if isinstance(on_value, str):
+ return tokenize(on_value)
+ if isinstance(on_value, list):
+ tokens: list[str] = []
+ for item in on_value:
+ tokens.extend(tokenize(normalize_text(item)))
+ return tokens
+ if isinstance(on_value, dict):
+ tokens = []
+ for key, value in on_value.items():
+ tokens.extend(tokenize(key))
+ tokens.extend(tokenize(json.dumps(value, sort_keys=True)))
+ return tokens
+ return []
+
+
+def extract_headings(body: str) -> list[str]:
+ return [match.group(1).strip() for match in re.finditer(r"^#+\s+(.+)$", body, re.MULTILINE)]
+
+
+def build_intent_text(path: Path, frontmatter: dict[str, Any], body: str) -> str:
+ parts = [
+ path.stem.replace("-", " "),
+ normalize_text(frontmatter.get("name")),
+ normalize_text(frontmatter.get("description")),
+ " ".join(extract_trigger_tokens(frontmatter.get("on"))),
+ " ".join((frontmatter.get("safe-outputs") or {}).keys()) if isinstance(frontmatter.get("safe-outputs"), dict) else "",
+ " ".join((frontmatter.get("tools") or {}).keys()) if isinstance(frontmatter.get("tools"), dict) else "",
+ " ".join(extract_headings(body)),
+ re.sub(r"\s+", " ", body)[:1500],
+ ]
+ return " ".join(part for part in parts if part).strip()
+
+
+def estimate_agentic_fraction(frontmatter: dict[str, Any], body: str) -> tuple[float, float]:
+ pre_text = collect_step_text(frontmatter.get("pre-agent-steps"))
+ post_text = collect_step_text(frontmatter.get("post-steps"))
+ body_words = len(body.split())
+ pre_weight = count_steps(frontmatter.get("pre-agent-steps")) * 1.3
+ post_weight = count_steps(frontmatter.get("post-steps")) * 1.1
+ pre_weight += 0.8 * len(re.findall(r"\b(python3?|jq|grep|awk|sed|sort|uniq|cat|find)\b", pre_text))
+ post_weight += 0.8 * len(re.findall(r"\b(python3?|jq|grep|awk|sed|sort|uniq|cat|find)\b", post_text))
+ tool_weight = count_tools(frontmatter) * 0.15
+ agent_weight = max(0.25, body_words / 220.0 + tool_weight)
+ total = pre_weight + post_weight + agent_weight
+ if total <= 0:
+ return 0.5, 0.5
+ agentic_fraction = round_score(agent_weight / total)
+ deterministic_fraction = round_score(1.0 - agentic_fraction)
+ return agentic_fraction, deterministic_fraction
+
+
+def score_observability(has_direct: bool, has_imported: bool, telemetry_metrics: dict[str, Any]) -> float:
+ score = 0.0
+ if has_direct:
+ score += 0.6
+ if has_imported:
+ score += 0.3
+ if telemetry_metrics:
+ score += 0.4
+ return round_score(score)
+
+
+def score_safe_outputs(safe_outputs: Any) -> float:
+ if not isinstance(safe_outputs, dict) or not safe_outputs:
+ return 0.0
+ score = 0.3
+ if "create-issue" in safe_outputs:
+ score += 0.3
+ if any(key in safe_outputs for key in ("mentions", "allowed-github-references", "max-bot-mentions")):
+ score += 0.2
+ for key, value in safe_outputs.items():
+ if isinstance(value, dict) and value.get("max") is not None:
+ score += 0.2
+ break
+ return round_score(score)
+
+
+def score_cost(frontmatter: dict[str, Any], body: str, telemetry_metrics: dict[str, Any], agentic_fraction: float) -> float:
+ timeout = infer_timeout_minutes(frontmatter.get("timeout-minutes")) or 20
+ base = 0.15 + clamp(timeout / 60.0) * 0.2 + agentic_fraction * 0.25 + clamp(count_tools(frontmatter) / 8.0) * 0.15
+ base += clamp(len(body) / 6000.0) * 0.1
+ if telemetry_metrics:
+ base += clamp((telemetry_metrics.get("input_tokens", 0) + telemetry_metrics.get("output_tokens", 0)) / 250000.0) * 0.3
+ base += clamp(telemetry_metrics.get("runtime_duration", 0) / 1800.0) * 0.25
+ base += clamp(telemetry_metrics.get("tool_calls", 0) / 150.0) * 0.2
+ base += clamp(telemetry_metrics.get("retries", 0) / 6.0) * 0.2
+ return round_score(base)
+
+
+def score_trust(
+ strict: bool,
+ timeout_minutes: int | None,
+ has_lockfile: bool,
+ lockfile_stale: bool,
+ safe_output_score: float,
+ observability_score: float,
+ telemetry_metrics: dict[str, Any],
+) -> float:
+ score = 0.2
+ if strict:
+ score += 0.2
+ if timeout_minutes:
+ score += 0.1
+ if has_lockfile and not lockfile_stale:
+ score += 0.15
+ score += safe_output_score * 0.2
+ score += observability_score * 0.1
+ if telemetry_metrics:
+ score += clamp(telemetry_metrics.get("success_rate", 0.5)) * 0.35
+ score += clamp(1.0 - telemetry_metrics.get("retries", 0) / 6.0) * 0.15
+ score += clamp(telemetry_metrics.get("safe_output_success", 0.0)) * 0.2
+ return round_score(score)
+
+
+def score_usefulness(
+ frontmatter: dict[str, Any],
+ body: str,
+ safe_output_score: float,
+ telemetry_metrics: dict[str, Any],
+) -> float:
+ score = 0.1 + safe_output_score * 0.25 + clamp(len(extract_headings(body)) / 8.0) * 0.1
+ triggers = extract_trigger_tokens(frontmatter.get("on"))
+ if triggers:
+ score += 0.1
+ if telemetry_metrics:
+ score += clamp(telemetry_metrics.get("outputs_acted_upon", telemetry_metrics.get("accepted_outputs", 0.0))) * 0.35
+ score += clamp(telemetry_metrics.get("issues_resolved", 0) / 10.0) * 0.15
+ score += clamp(telemetry_metrics.get("manual_minutes_saved", 0) / 180.0) * 0.2
+ score += clamp(telemetry_metrics.get("actionable_comments", 0) / 10.0) * 0.1
+ else:
+ score += 0.15 if safe_output_score > 0 else 0.0
+ return round_score(score)
+
+
+def score_adoption(frontmatter: dict[str, Any], telemetry_metrics: dict[str, Any]) -> float:
+ score = 0.05
+ on_value = frontmatter.get("on")
+ if isinstance(on_value, dict) and "workflow_dispatch" in on_value:
+ score += 0.1
+ if isinstance(on_value, dict) and "schedule" in on_value:
+ score += 0.1
+ imports = as_list(frontmatter.get("imports"))
+ score += clamp(len(imports) / 4.0) * 0.1
+ if telemetry_metrics:
+ score += clamp(telemetry_metrics.get("workflow_invocation_count", 0) / 50.0) * 0.45
+ interactions = telemetry_metrics.get("user_interaction_count", 0) + telemetry_metrics.get("reviewer_interaction_count", 0)
+ score += clamp(interactions / 25.0) * 0.2
+ return round_score(score)
+
+
+def score_maintenance(
+ frontmatter: dict[str, Any],
+ body: str,
+ overlap_hint: float,
+ agentic_fraction: float,
+ has_precompute: bool,
+ has_postcompute: bool,
+) -> float:
+ body_lines = len(body.splitlines())
+ imports = len(as_list(frontmatter.get("imports")))
+ tool_count = count_tools(frontmatter)
+ score = 0.1 + clamp(body_lines / 250.0) * 0.2 + clamp(tool_count / 8.0) * 0.15 + clamp(imports / 5.0) * 0.1
+ score += overlap_hint * 0.2 + agentic_fraction * 0.2
+ if not has_precompute:
+ score += 0.1
+ if not has_postcompute:
+ score += 0.1
+ return round_score(score)
+
+
+def score_risk(
+ permission_score: float,
+ strict: bool,
+ timeout_minutes: int | None,
+ has_lockfile: bool,
+ lockfile_stale: bool,
+ safe_output_score: float,
+ observability_score: float,
+ network: Any,
+ agentic_fraction: float,
+) -> float:
+ score = permission_score * 0.35 + agentic_fraction * 0.15
+ if not strict:
+ score += 0.2
+ if timeout_minutes is None:
+ score += 0.15
+ if not has_lockfile:
+ score += 0.15
+ if lockfile_stale:
+ score += 0.15
+ if safe_output_score == 0.0:
+ score += 0.2
+ if observability_score < 0.4:
+ score += 0.15
+ if not isinstance(network, dict) or "allowed" not in network:
+ score += 0.1
+ return round_score(score)
+
+
+def evidence_quality_for_workflow(observability_score: float, telemetry_metrics: dict[str, Any]) -> str:
+ if observability_score >= 0.9 and len(telemetry_metrics) >= 5:
+ return "high"
+ if observability_score >= 0.5 or len(telemetry_metrics) >= 2:
+ return "medium"
+ return "low"
+
+
+def normalize_telemetry_entry(entry: dict[str, Any]) -> dict[str, Any]:
+ normalized: dict[str, Any] = {}
+ aliases = {
+ "runtime_duration": ("runtime_duration", "duration_seconds", "duration", "runtime_seconds"),
+ "tool_calls": ("tool_calls", "tool_call_count"),
+ "retries": ("retries", "retry_count"),
+ "success_rate": ("success_rate",),
+ "safe_output_success": ("safe_output_success", "safe_output_success_rate"),
+ "workflow_invocation_count": ("workflow_invocation_count", "invocation_count", "runs"),
+ "user_interaction_count": ("user_interaction_count", "user_interactions"),
+ "reviewer_interaction_count": ("reviewer_interaction_count", "reviewer_interactions"),
+ "input_tokens": ("input_tokens",),
+ "output_tokens": ("output_tokens",),
+ "accepted_outputs": ("accepted_outputs",),
+ "outputs_acted_upon": ("outputs_acted_upon", "acted_upon_rate"),
+ "actionable_comments": ("actionable_comments",),
+ "pr_impact": ("pr_impact",),
+ "issues_resolved": ("issues_resolved",),
+ "bugs_found": ("bugs_found",),
+ "manual_minutes_saved": ("manual_minutes_saved", "minutes_saved"),
+ }
+ for target, keys in aliases.items():
+ for key in keys:
+ if key in entry:
+ normalized[target] = entry[key]
+ break
+ return {key: value for key, value in normalized.items() if key in TELEMETRY_KEYS}
+
+
+def load_otel_summary(path: str | None) -> dict[str, dict[str, Any]]:
+ if not path:
+ return {}
+ summary_path = Path(path)
+ if not summary_path.exists():
+ return {}
+ try:
+ payload = json.loads(summary_path.read_text(encoding="utf-8"))
+ except json.JSONDecodeError:
+ return {}
+ entries: list[dict[str, Any]] = []
+ if isinstance(payload, dict):
+ if isinstance(payload.get("workflows"), list):
+ entries = [entry for entry in payload["workflows"] if isinstance(entry, dict)]
+ elif isinstance(payload.get("workflow_metrics"), dict):
+ for key, value in payload["workflow_metrics"].items():
+ if isinstance(value, dict):
+ item = dict(value)
+ item.setdefault("name", key)
+ entries.append(item)
+ else:
+ for key, value in payload.items():
+ if isinstance(value, dict):
+ item = dict(value)
+ item.setdefault("name", key)
+ entries.append(item)
+ elif isinstance(payload, list):
+ entries = [entry for entry in payload if isinstance(entry, dict)]
+ index: dict[str, dict[str, Any]] = {}
+ for entry in entries:
+ normalized = normalize_telemetry_entry(entry)
+ if not normalized:
+ continue
+ keys = {
+ normalize_text(entry.get("path")),
+ normalize_text(entry.get("workflow_path")),
+ normalize_text(entry.get("name")),
+ normalize_text(entry.get("workflow")),
+ normalize_text(entry.get("workflow_name")),
+ }
+ for key in list(keys):
+ if key:
+ index[key] = normalized
+ index[Path(key).stem] = normalized
+ return index
+
+
+def telemetry_for_workflow(workflow_path: Path, frontmatter: dict[str, Any], telemetry_index: dict[str, dict[str, Any]]) -> dict[str, Any]:
+ candidates = [
+ workflow_path.as_posix(),
+ workflow_path.name,
+ workflow_path.stem,
+ normalize_text(frontmatter.get("name")),
+ ]
+ for key in candidates:
+ if key in telemetry_index:
+ return dict(telemetry_index[key])
+ return {}
+
+
+def build_workflow_record(workflow_path: Path, workflows_root: Path, telemetry_index: dict[str, dict[str, Any]]) -> dict[str, Any]:
+ frontmatter, body = read_workflow(workflow_path)
+ relative_path = workflow_path.relative_to(workflows_root.parent.parent).as_posix()
+ has_lockfile, lockfile_stale = detect_lockfile_status(workflow_path)
+ strict = bool(frontmatter.get("strict", False))
+ timeout_minutes = infer_timeout_minutes(frontmatter.get("timeout-minutes"))
+ safe_outputs = frontmatter.get("safe-outputs") or {}
+ has_safe_outputs = isinstance(safe_outputs, dict) and bool(safe_outputs)
+ telemetry_metrics = telemetry_for_workflow(workflow_path, frontmatter, telemetry_index)
+ has_direct_observability = has_observability_config(frontmatter)
+ has_imported = has_imported_observability(workflow_path, frontmatter)
+ observability_score = score_observability(has_direct_observability, has_imported, telemetry_metrics)
+ safe_output_score = score_safe_outputs(safe_outputs)
+ agentic_fraction, deterministic_fraction = estimate_agentic_fraction(frontmatter, body)
+ permission_score = permissions_risk(frontmatter.get("permissions"))
+ notes: list[str] = []
+ if not has_lockfile:
+ notes.append("missing lockfile")
+ elif lockfile_stale:
+ notes.append("stale lockfile")
+ if not strict:
+ notes.append("strict mode disabled")
+ if timeout_minutes is None:
+ notes.append("missing timeout")
+ if not has_safe_outputs:
+ notes.append("missing safe outputs")
+ if observability_score < 0.4:
+ notes.append("missing telemetry")
+ network = frontmatter.get("network")
+ usefulness = score_usefulness(frontmatter, body, safe_output_score, telemetry_metrics)
+ adoption = score_adoption(frontmatter, telemetry_metrics)
+ trust = score_trust(strict, timeout_minutes, has_lockfile, lockfile_stale, safe_output_score, observability_score, telemetry_metrics)
+ cost = score_cost(frontmatter, body, telemetry_metrics, agentic_fraction)
+ maintenance_drag = score_maintenance(
+ frontmatter,
+ body,
+ overlap_hint=0.0,
+ agentic_fraction=agentic_fraction,
+ has_precompute=count_steps(frontmatter.get("pre-agent-steps")) > 0,
+ has_postcompute=count_steps(frontmatter.get("post-steps")) > 0,
+ )
+ risk = score_risk(
+ permission_score,
+ strict,
+ timeout_minutes,
+ has_lockfile,
+ lockfile_stale,
+ safe_output_score,
+ observability_score,
+ network,
+ agentic_fraction,
+ )
+ return {
+ "path": relative_path,
+ "name": normalize_text(frontmatter.get("name")) or workflow_path.stem,
+ "description": normalize_text(frontmatter.get("description")),
+ "has_lockfile": has_lockfile,
+ "lockfile_stale": lockfile_stale,
+ "has_safe_outputs": has_safe_outputs,
+ "has_observability": has_direct_observability,
+ "has_imported_observability": has_imported,
+ "strict": strict,
+ "timeout_minutes": timeout_minutes,
+ "permissions_risk": permission_score,
+ "tool_count": count_tools(frontmatter),
+ "pre_agent_steps_count": count_steps(frontmatter.get("pre-agent-steps")),
+ "post_steps_count": count_steps(frontmatter.get("post-steps")),
+ "agentic_fraction": agentic_fraction,
+ "deterministic_fraction": deterministic_fraction,
+ "usefulness": usefulness,
+ "adoption": adoption,
+ "trust": trust,
+ "cost": cost,
+ "risk": risk,
+ "maintenance_drag": maintenance_drag,
+ "overlap_drag": 0.0,
+ "yield": 0.0,
+ "intent_text": build_intent_text(workflow_path, frontmatter, body),
+ "recommendation_seed": "Instrument" if observability_score < 0.4 else "Revise",
+ "evidence_quality": evidence_quality_for_workflow(observability_score, telemetry_metrics),
+ "notes": notes,
+ "telemetry_metrics": telemetry_metrics,
+ }
+
+
+def compute_similarity_matrix(workflows: list[dict[str, Any]]) -> tuple[dict[tuple[str, str], float], dict[str, Counter[str]]]:
+ documents: dict[str, Counter[str]] = {}
+ doc_frequency: Counter[str] = Counter()
+ for workflow in workflows:
+ counts = Counter(tokenize(workflow.get("intent_text", "")))
+ documents[workflow["path"]] = counts
+ for token in counts:
+ doc_frequency[token] += 1
+ total_docs = max(1, len(workflows))
+ tfidf_vectors: dict[str, dict[str, float]] = {}
+ norms: dict[str, float] = {}
+ for path, counts in documents.items():
+ vector: dict[str, float] = {}
+ for token, frequency in counts.items():
+ idf = math.log((1.0 + total_docs) / (1.0 + doc_frequency[token])) + 1.0
+ vector[token] = frequency * idf
+ tfidf_vectors[path] = vector
+ norms[path] = math.sqrt(sum(value * value for value in vector.values())) or 1.0
+ similarities: dict[tuple[str, str], float] = {}
+ paths = [workflow["path"] for workflow in workflows]
+ for index, left in enumerate(paths):
+ for right in paths[index + 1 :]:
+ dot = 0.0
+ shared = set(tfidf_vectors[left]).intersection(tfidf_vectors[right])
+ for token in shared:
+ dot += tfidf_vectors[left][token] * tfidf_vectors[right][token]
+ similarity = clamp(dot / (norms[left] * norms[right]))
+ similarities[(left, right)] = round(similarity, 4)
+ return similarities, documents
+
+
+def build_overlap_clusters(workflows: list[dict[str, Any]], similarities: dict[tuple[str, str], float], documents: dict[str, Counter[str]]) -> list[dict[str, Any]]:
+ adjacency: dict[str, set[str]] = defaultdict(set)
+ for (left, right), similarity in similarities.items():
+ if similarity >= OVERLAP_THRESHOLD:
+ adjacency[left].add(right)
+ adjacency[right].add(left)
+ seen: set[str] = set()
+ clusters: list[dict[str, Any]] = []
+ for workflow in workflows:
+ path = workflow["path"]
+ if path in seen or path not in adjacency:
+ continue
+ stack = [path]
+ members: list[str] = []
+ while stack:
+ current = stack.pop()
+ if current in seen:
+ continue
+ seen.add(current)
+ members.append(current)
+ stack.extend(sorted(adjacency[current] - seen))
+ members.sort()
+ if len(members) < 2:
+ continue
+ max_overlap = max(
+ similarities.get((left, right), similarities.get((right, left), 0.0))
+ for index, left in enumerate(members)
+ for right in members[index + 1 :]
+ )
+ token_counts = Counter()
+ for member in members:
+ token_counts.update(documents.get(member, Counter()))
+ reason = ", ".join(token for token, _ in token_counts.most_common(4)) or "shared operational intent"
+ clusters.append({"workflows": members, "max_overlap": round(max_overlap, 4), "reason": reason})
+ return clusters
+
+
+def portfolio_overlap_drag(similarities: dict[tuple[str, str], float]) -> float:
+ drag = 0.0
+ for similarity in similarities.values():
+ drag += similarity * similarity * 2.0
+ return round(drag, 4)
+
+
+def compute_workflow_yield(
+ usefulness: float,
+ adoption: float,
+ trust: float,
+ cost: float,
+ risk: float,
+ maintenance_drag: float,
+ overlap_drag: float,
+) -> float:
+ denominator = 1.0 + cost + risk + maintenance_drag + overlap_drag
+ if denominator <= 0:
+ return 0.0
+ return round((usefulness * adoption * trust) / denominator, 4)
+
+
+def assign_recommendation(workflow: dict[str, Any], clustered_paths: set[str]) -> str:
+ if not workflow.get("has_observability") and not workflow.get("has_imported_observability"):
+ return "Instrument"
+ if workflow["yield"] < 0.08 and workflow["trust"] < 0.45 and (
+ workflow["risk"] > 0.55 or workflow["cost"] > 0.55 or workflow["maintenance_drag"] > 0.55
+ ):
+ return "Retire"
+ if workflow["overlap_drag"] >= 0.45 or workflow["path"] in clustered_paths:
+ return "Merge"
+ if workflow["usefulness"] >= 0.35 and (
+ workflow["cost"] > 0.55
+ or workflow["risk"] > 0.55
+ or workflow["maintenance_drag"] > 0.55
+ or workflow["agentic_fraction"] > 0.65
+ ):
+ return "Revise"
+ return "Keep"
+
+
+def compute_episode_metrics(workflows: list[dict[str, Any]], similarities: dict[tuple[str, str], float]) -> list[dict[str, Any]]:
+ buckets: dict[str, list[dict[str, Any]]] = defaultdict(list)
+ for workflow in workflows:
+ text = workflow.get("intent_text", "").lower()
+ if "pull request" in text or "pr" in text or "review" in text:
+ buckets["pr-pipeline"].append(workflow)
+ elif "issue" in text or "triage" in text:
+ buckets["issue-pipeline"].append(workflow)
+ elif "release" in text or "deploy" in text:
+ buckets["release-pipeline"].append(workflow)
+ elif "incident" in text or "security" in text:
+ buckets["incident-pipeline"].append(workflow)
+ episodes: list[dict[str, Any]] = []
+ for label, members in buckets.items():
+ if len(members) < 2:
+ continue
+ paths = [member["path"] for member in members]
+ pair_scores = []
+ for index, left in enumerate(paths):
+ for right in paths[index + 1 :]:
+ pair_scores.append(similarities.get((left, right), similarities.get((right, left), 0.0)))
+ if not pair_scores:
+ continue
+ avg_overlap = sum(pair_scores) / len(pair_scores)
+ avg_cost = sum(member["cost"] for member in members) / len(members)
+ avg_yield = sum(member["yield"] for member in members) / len(members)
+ coordination_drag = round_score(avg_overlap * (0.5 + avg_cost))
+ episode_yield = round(max(0.0, avg_yield / (1.0 + coordination_drag)), 4)
+ if avg_overlap < 0.2 and coordination_drag < 0.2:
+ continue
+ episodes.append(
+ {
+ "episode": label,
+ "workflows": paths,
+ "coordination_drag": coordination_drag,
+ "episode_yield": episode_yield,
+ "evidence_quality": "medium" if avg_overlap >= 0.35 else "low",
+ }
+ )
+ return episodes
+
+
+def compute_organizational_health(workflows: list[dict[str, Any]], overlap_drag_value: float) -> dict[str, Any]:
+ if not workflows:
+ return {"fragmentation": 0.0, "reuse": 0.0, "trust_concentration": 0.0, "governance_drag": 0.0, "notes": []}
+ reuse = round_score(
+ sum(1.0 for workflow in workflows if workflow.get("has_imported_observability") or workflow.get("tool_count", 0) > 0) / len(workflows)
+ )
+ average_trust = sum(workflow["trust"] for workflow in workflows) / len(workflows)
+ trust_concentration = round_score(max(workflow["trust"] for workflow in workflows) - average_trust)
+ governance_drag = round_score(
+ sum(workflow["risk"] + workflow["agentic_fraction"] * 0.25 for workflow in workflows) / len(workflows)
+ )
+ fragmentation = round_score(clamp(overlap_drag_value / max(1.0, len(workflows)) * 0.7 + (1.0 - reuse) * 0.3))
+ notes: list[str] = []
+ if fragmentation > 0.6 and average_trust < 0.5:
+ notes.append("High overlap plus uneven trust suggests organizational fragmentation.")
+ if reuse > 0.55 and fragmentation < 0.45 and average_trust > 0.55:
+ notes.append("Shared imports and higher trust indicate improving operational coherence.")
+ if governance_drag > 0.55:
+ notes.append("Governance drag is elevated by broad scope, missing telemetry, or high agentic fractions.")
+ return {
+ "fragmentation": fragmentation,
+ "reuse": reuse,
+ "trust_concentration": trust_concentration,
+ "governance_drag": governance_drag,
+ "notes": notes,
+ }
+
+
+def portfolio_evidence_quality(workflows: list[dict[str, Any]], telemetry_coverage: float) -> str:
+ if telemetry_coverage >= 0.75 and all(workflow["evidence_quality"] != "low" for workflow in workflows):
+ return "high"
+ if telemetry_coverage >= 0.35:
+ return "medium"
+ return "low"
+
+
+def build_recommendation_seed(workflows: list[dict[str, Any]]) -> dict[str, list[str]]:
+ buckets = {key.lower(): [] for key in ALLOWED_RECOMMENDATIONS}
+ for workflow in workflows:
+ buckets[workflow["recommendation_seed"].lower()].append(workflow["path"])
+ return buckets
+
+
+def compute_portfolio_metrics(workflows: list[dict[str, Any]], overlap_drag_value: float) -> dict[str, Any]:
+ if not workflows:
+ return {
+ "workflow_count": 0,
+ "portfolio_yield": 0.0,
+ "portfolio_overlap_drag": 0.0,
+ "portfolio_cost": 0.0,
+ "portfolio_risk": 0.0,
+ "portfolio_maintenance_drag": 0.0,
+ "average_agentic_fraction": 0.0,
+ "average_deterministic_fraction": 0.0,
+ "telemetry_coverage": 0.0,
+ "evidence_quality": "low",
+ }
+ average_yield = sum(workflow["yield"] for workflow in workflows) / len(workflows)
+ telemetry_coverage = sum(
+ 1.0 if workflow["telemetry_metrics"] else 0.5 if workflow["has_observability"] or workflow["has_imported_observability"] else 0.0
+ for workflow in workflows
+ ) / len(workflows)
+ return {
+ "workflow_count": len(workflows),
+ "portfolio_yield": round(average_yield - LAMBDA * overlap_drag_value, 4),
+ "portfolio_overlap_drag": round(overlap_drag_value, 4),
+ "portfolio_cost": round(sum(workflow["cost"] for workflow in workflows) / len(workflows), 4),
+ "portfolio_risk": round(sum(workflow["risk"] for workflow in workflows) / len(workflows), 4),
+ "portfolio_maintenance_drag": round(sum(workflow["maintenance_drag"] for workflow in workflows) / len(workflows), 4),
+ "average_agentic_fraction": round(sum(workflow["agentic_fraction"] for workflow in workflows) / len(workflows), 4),
+ "average_deterministic_fraction": round(sum(workflow["deterministic_fraction"] for workflow in workflows) / len(workflows), 4),
+ "telemetry_coverage": round(telemetry_coverage, 4),
+ "evidence_quality": portfolio_evidence_quality(workflows, telemetry_coverage),
+ }
+
+
+def precompute(workflows_root: Path, otel_summary_path: str | None = None) -> dict[str, Any]:
+ telemetry_index = load_otel_summary(otel_summary_path)
+ workflow_files = discover_workflow_files(workflows_root)
+ workflows = [build_workflow_record(path, workflows_root, telemetry_index) for path in workflow_files]
+ similarities, documents = compute_similarity_matrix(workflows)
+ overlap_by_path: dict[str, float] = defaultdict(float)
+ overlap_peers: dict[str, dict[str, float]] = defaultdict(dict)
+ for (left, right), similarity in similarities.items():
+ squared = similarity * similarity
+ overlap_by_path[left] += squared
+ overlap_by_path[right] += squared
+ overlap_peers[left][right] = similarity
+ overlap_peers[right][left] = similarity
+ for workflow in workflows:
+ workflow["overlap_drag"] = round_score(overlap_by_path.get(workflow["path"], 0.0))
+ workflow["maintenance_drag"] = score_maintenance(
+ {"imports": [], "tools": {}},
+ "",
+ overlap_hint=workflow["overlap_drag"],
+ agentic_fraction=workflow["agentic_fraction"],
+ has_precompute=workflow["pre_agent_steps_count"] > 0,
+ has_postcompute=workflow["post_steps_count"] > 0,
+ )
+ workflow["yield"] = compute_workflow_yield(
+ workflow["usefulness"],
+ workflow["adoption"],
+ workflow["trust"],
+ workflow["cost"],
+ workflow["risk"],
+ workflow["maintenance_drag"],
+ workflow["overlap_drag"],
+ )
+ workflow["overlap_peers"] = overlap_peers.get(workflow["path"], {})
+ overlap_clusters = build_overlap_clusters(workflows, similarities, documents)
+ clustered_paths = {path for cluster in overlap_clusters for path in cluster["workflows"]}
+ for workflow in workflows:
+ workflow["recommendation_seed"] = assign_recommendation(workflow, clustered_paths)
+ overlap_drag_value = portfolio_overlap_drag(similarities)
+ portfolio_metrics = compute_portfolio_metrics(workflows, overlap_drag_value)
+ telemetry_coverage = {
+ "coverage": portfolio_metrics["telemetry_coverage"],
+ "covered_workflows": [workflow["path"] for workflow in workflows if workflow["telemetry_metrics"]],
+ "instrumented_without_evidence": [
+ workflow["path"]
+ for workflow in workflows
+ if not workflow["telemetry_metrics"] and (workflow["has_observability"] or workflow["has_imported_observability"])
+ ],
+ "missing_workflows": [
+ workflow["path"]
+ for workflow in workflows
+ if not workflow["telemetry_metrics"] and not workflow["has_observability"] and not workflow["has_imported_observability"]
+ ],
+ }
+ episode_metrics = compute_episode_metrics(workflows, similarities)
+ organizational_health = compute_organizational_health(workflows, overlap_drag_value)
+ return {
+ "portfolio_metrics": portfolio_metrics,
+ "workflows": workflows,
+ "overlap_clusters": overlap_clusters,
+ "telemetry_coverage": telemetry_coverage,
+ "episode_metrics": episode_metrics,
+ "organizational_health_signals": organizational_health,
+ "recommendations_seed": build_recommendation_seed(workflows),
+ "overlap_pairs": [
+ {"left": left, "right": right, "score": score}
+ for (left, right), score in sorted(similarities.items())
+ ],
+ }
+
+
+def parse_args(argv: list[str] | None = None) -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument("--workflows", required=True, help="Path to the .github/workflows directory")
+ parser.add_argument("--out", required=True, help="Output JSON path")
+ return parser.parse_args(argv)
+
+
+def main(argv: list[str] | None = None) -> int:
+ args = parse_args(argv)
+ workflows_root = Path(args.workflows)
+ payload = precompute(workflows_root, os.environ.get("AWY_OTEL_SUMMARY_JSON"))
+ output_path = Path(args.out)
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+ output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8")
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tests/test_aw_yield_postcompute.py b/tests/test_aw_yield_postcompute.py
new file mode 100644
index 00000000000..1ef9ed2678e
--- /dev/null
+++ b/tests/test_aw_yield_postcompute.py
@@ -0,0 +1,170 @@
+from __future__ import annotations
+
+import json
+import sys
+from pathlib import Path
+
+ROOT = Path(__file__).resolve().parents[1]
+SCRIPTS = ROOT / "scripts"
+sys.path.insert(0, str(SCRIPTS))
+
+import aw_yield_postcompute as post
+
+
+def sample_precompute() -> dict:
+ return {
+ "portfolio_metrics": {
+ "workflow_count": 2,
+ "portfolio_yield": 0.1,
+ "portfolio_overlap_drag": 0.4,
+ "portfolio_cost": 0.3,
+ "portfolio_risk": 0.2,
+ "portfolio_maintenance_drag": 0.3,
+ "average_agentic_fraction": 0.5,
+ "average_deterministic_fraction": 0.5,
+ "telemetry_coverage": 0.2,
+ "evidence_quality": "low",
+ },
+ "workflows": [
+ {
+ "path": ".github/workflows/a.md",
+ "name": "A",
+ "description": "A",
+ "has_lockfile": True,
+ "lockfile_stale": False,
+ "has_safe_outputs": True,
+ "has_observability": False,
+ "has_imported_observability": False,
+ "strict": True,
+ "timeout_minutes": 10,
+ "permissions_risk": 1.2,
+ "tool_count": 2,
+ "pre_agent_steps_count": 1,
+ "post_steps_count": 1,
+ "agentic_fraction": 1.4,
+ "deterministic_fraction": -0.4,
+ "usefulness": 0.8,
+ "adoption": 0.6,
+ "trust": 0.7,
+ "cost": 1.2,
+ "risk": -0.2,
+ "maintenance_drag": 0.4,
+ "overlap_drag": 0.5,
+ "yield": 0.0,
+ "intent_text": "review pull request",
+ "recommendation_seed": "Instrument",
+ "evidence_quality": "low",
+ "notes": ["missing telemetry"],
+ "telemetry_metrics": {},
+ },
+ {
+ "path": ".github/workflows/b.md",
+ "name": "B",
+ "description": "B",
+ "has_lockfile": True,
+ "lockfile_stale": False,
+ "has_safe_outputs": True,
+ "has_observability": True,
+ "has_imported_observability": False,
+ "strict": True,
+ "timeout_minutes": 10,
+ "permissions_risk": 0.2,
+ "tool_count": 2,
+ "pre_agent_steps_count": 1,
+ "post_steps_count": 1,
+ "agentic_fraction": 0.4,
+ "deterministic_fraction": 0.6,
+ "usefulness": 0.7,
+ "adoption": 0.5,
+ "trust": 0.8,
+ "cost": 0.3,
+ "risk": 0.2,
+ "maintenance_drag": 0.2,
+ "overlap_drag": 0.5,
+ "yield": 0.0,
+ "intent_text": "review pull request security",
+ "recommendation_seed": "Keep",
+ "evidence_quality": "medium",
+ "notes": [],
+ "telemetry_metrics": {"success_rate": 0.9},
+ },
+ ],
+ "overlap_clusters": [{"workflows": [".github/workflows/a.md", ".github/workflows/b.md"], "max_overlap": 0.9, "reason": "review"}],
+ "episode_metrics": [],
+ "organizational_health_signals": {
+ "fragmentation": 0.6,
+ "reuse": 0.2,
+ "trust_concentration": 0.2,
+ "governance_drag": 0.7,
+ "notes": [],
+ },
+ "recommendations_seed": {
+ "keep": [".github/workflows/b.md"],
+ "revise": [],
+ "merge": [],
+ "instrument": [".github/workflows/a.md"],
+ "retire": [],
+ },
+ "overlap_pairs": [{"left": ".github/workflows/a.md", "right": ".github/workflows/b.md", "score": 0.9}],
+ }
+
+
+def test_scores_are_clamped() -> None:
+ bounded = post.clamp_workflow_scores(sample_precompute()["workflows"][0])
+ assert bounded["permissions_risk"] == 1.0
+ assert bounded["cost"] == 1.0
+ assert bounded["risk"] == 0.0
+ assert bounded["agentic_fraction"] == 1.0
+ assert bounded["deterministic_fraction"] == 0.0
+
+
+def test_recommendations_are_valid_and_mutually_exclusive(tmp_path: Path) -> None:
+ agent_dir = tmp_path / "agent"
+ agent_dir.mkdir()
+ (agent_dir / "portfolio-yield-agent.json").write_text(
+ json.dumps(
+ {
+ "recommendations": {
+ "keep": [{"path": ".github/workflows/b.md"}],
+ "instrument": [{"path": ".github/workflows/a.md"}],
+ }
+ }
+ ),
+ encoding="utf-8",
+ )
+ final_payload, _summary, _notes = post.finalize(sample_precompute(), agent_dir)
+ assert final_payload["keep"] == [".github/workflows/b.md"]
+ assert final_payload["instrument"] == [".github/workflows/a.md"]
+ assert set(final_payload["keep"]).isdisjoint(final_payload["instrument"])
+
+
+def test_postcompute_handles_malformed_agent_output_safely(tmp_path: Path) -> None:
+ bad_precompute = tmp_path / "precompute.json"
+ bad_precompute.write_text("{}", encoding="utf-8")
+ out = tmp_path / "out.json"
+ exit_code = post.main(["--precompute", str(bad_precompute), "--agent-output", str(tmp_path / "agent"), "--out", str(out)])
+ payload = json.loads(out.read_text(encoding="utf-8"))
+ assert exit_code == 1
+ assert "error" in payload
+ assert payload["evidence_quality"] == "low"
+
+
+def test_postcompute_does_not_allow_invented_telemetry_to_increase_confidence(tmp_path: Path) -> None:
+ agent_dir = tmp_path / "agent"
+ agent_dir.mkdir()
+ (agent_dir / "portfolio-yield-agent.json").write_text(
+ json.dumps(
+ {
+ "recommendations": {
+ "keep": [{"path": ".github/workflows/b.md"}],
+ "instrument": [{"path": ".github/workflows/a.md"}],
+ },
+ "telemetry_claims": [{"path": ".github/workflows/a.md", "metric": "success_rate"}],
+ "evidence_quality": "high",
+ }
+ ),
+ encoding="utf-8",
+ )
+ final_payload, _summary, notes = post.finalize(sample_precompute(), agent_dir)
+ assert final_payload["evidence_quality"] == "low"
+ assert any("invented telemetry" in note.lower() for note in notes)
diff --git a/tests/test_aw_yield_precompute.py b/tests/test_aw_yield_precompute.py
new file mode 100644
index 00000000000..47af1eb6568
--- /dev/null
+++ b/tests/test_aw_yield_precompute.py
@@ -0,0 +1,150 @@
+from __future__ import annotations
+
+import os
+import sys
+from pathlib import Path
+
+ROOT = Path(__file__).resolve().parents[1]
+SCRIPTS = ROOT / "scripts"
+sys.path.insert(0, str(SCRIPTS))
+
+import aw_yield_precompute as pre
+
+
+def write_workflow(path: Path, content: str) -> None:
+ path.parent.mkdir(parents=True, exist_ok=True)
+ path.write_text(content, encoding="utf-8")
+
+
+def test_workflow_discovery_excludes_shared(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ write_workflow(workflows / "alpha.md", "---\non: workflow_dispatch\n---\n# Alpha\n")
+ write_workflow(workflows / "shared" / "helper.md", "---\non: workflow_dispatch\n---\n# Helper\n")
+ discovered = pre.discover_workflow_files(workflows)
+ assert [path.name for path in discovered] == ["alpha.md"]
+
+
+def test_frontmatter_parsing_works() -> None:
+ frontmatter = """name: Portfolio Yield\ndescription: Example\nstrict: true\ntimeout-minutes: 15\nimports:\n - uses: shared/otel-observability.md\n with:\n mode: summary\ntools:\n github:\n mode: gh-proxy\n bash: true\nsafe-outputs:\n create-issue:\n max: 1\n"""
+ parsed = pre.parse_frontmatter_text(frontmatter)
+ assert parsed["name"] == "Portfolio Yield"
+ assert parsed["strict"] is True
+ assert parsed["timeout-minutes"] == 15
+ assert parsed["imports"][0]["uses"] == "shared/otel-observability.md"
+ assert parsed["tools"]["github"]["mode"] == "gh-proxy"
+ assert parsed["safe-outputs"]["create-issue"]["max"] == 1
+
+
+def test_imports_are_detected(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ workflow = workflows / "alpha.md"
+ write_workflow(workflow, "---\nimports:\n - shared/otel-observability.md\n---\n# Alpha\n")
+ imports = pre.normalize_import_paths(workflow, pre.read_workflow(workflow)[0])
+ assert imports == [workflows / "shared" / "otel-observability.md"]
+
+
+def test_imported_observability_is_detected(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ shared = workflows / "shared" / "otel-observability.md"
+ write_workflow(
+ shared,
+ "---\nobservability:\n otlp:\n endpoint:\n url: ${{ secrets.OTLP_ENDPOINT }}\n---\n",
+ )
+ workflow = workflows / "alpha.md"
+ write_workflow(workflow, "---\nimports:\n - shared/otel-observability.md\n---\n# Alpha\n")
+ frontmatter, _ = pre.read_workflow(workflow)
+ assert pre.has_imported_observability(workflow, frontmatter) is True
+
+
+def test_missing_safe_outputs_increases_risk(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ base = "---\non:\n workflow_dispatch:\npermissions:\n contents: read\nstrict: true\ntimeout-minutes: 10\n---\n# Alpha\n"
+ with_safe = workflows / "with-safe.md"
+ without_safe = workflows / "without-safe.md"
+ write_workflow(with_safe, base.replace("---\n# Alpha", "safe-outputs:\n create-issue:\n max: 1\n---\n# Alpha"))
+ write_workflow(without_safe, base)
+ risk_with = pre.build_workflow_record(with_safe, workflows, {})["risk"]
+ risk_without = pre.build_workflow_record(without_safe, workflows, {})["risk"]
+ assert risk_without > risk_with
+
+
+def test_missing_lockfile_is_detected(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ workflow = workflows / "alpha.md"
+ write_workflow(workflow, "---\non: workflow_dispatch\nstrict: true\n---\n# Alpha\n")
+ record = pre.build_workflow_record(workflow, workflows, {})
+ assert record["has_lockfile"] is False
+
+
+def test_stale_lockfile_is_detected_where_mtimes_allow(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ workflow = workflows / "alpha.md"
+ lockfile = workflows / "alpha.lock.yml"
+ write_workflow(workflow, "---\non: workflow_dispatch\nstrict: true\n---\n# Alpha\n")
+ lockfile.write_text("name: alpha\n", encoding="utf-8")
+ os.utime(lockfile, (1, 1))
+ os.utime(workflow, (10, 10))
+ record = pre.build_workflow_record(workflow, workflows, {})
+ assert record["has_lockfile"] is True
+ assert record["lockfile_stale"] is True
+
+
+def test_missing_strict_mode_increases_risk(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ strict_path = workflows / "strict.md"
+ loose_path = workflows / "loose.md"
+ write_workflow(strict_path, "---\non: workflow_dispatch\nstrict: true\ntimeout-minutes: 10\nsafe-outputs:\n create-issue:\n max: 1\n---\n# Strict\n")
+ write_workflow(loose_path, "---\non: workflow_dispatch\nstrict: false\ntimeout-minutes: 10\nsafe-outputs:\n create-issue:\n max: 1\n---\n# Loose\n")
+ assert pre.build_workflow_record(loose_path, workflows, {})["risk"] > pre.build_workflow_record(strict_path, workflows, {})["risk"]
+
+
+def test_missing_timeout_increases_risk(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ timed = workflows / "timed.md"
+ untimed = workflows / "untimed.md"
+ write_workflow(timed, "---\non: workflow_dispatch\nstrict: true\ntimeout-minutes: 10\nsafe-outputs:\n create-issue:\n max: 1\n---\n# Timed\n")
+ write_workflow(untimed, "---\non: workflow_dispatch\nstrict: true\nsafe-outputs:\n create-issue:\n max: 1\n---\n# Untimed\n")
+ assert pre.build_workflow_record(untimed, workflows, {})["risk"] > pre.build_workflow_record(timed, workflows, {})["risk"]
+
+
+def test_overlap_detection_finds_similar_workflows() -> None:
+ workflows = [
+ {"path": "a.md", "intent_text": "review pull request code quality security review", "agentic_fraction": 0.4},
+ {"path": "b.md", "intent_text": "review pull request security and code quality", "agentic_fraction": 0.4},
+ ]
+ similarities, _docs = pre.compute_similarity_matrix(workflows)
+ assert max(similarities.values()) >= 0.7
+
+
+def test_high_overlap_clusters_are_produced() -> None:
+ workflows = [
+ {"path": "a.md", "intent_text": "review pull request code quality security review", "agentic_fraction": 0.4},
+ {"path": "b.md", "intent_text": "review pull request security and code quality", "agentic_fraction": 0.4},
+ {"path": "c.md", "intent_text": "weekly release note generation", "agentic_fraction": 0.4},
+ ]
+ similarities, docs = pre.compute_similarity_matrix(workflows)
+ clusters = pre.build_overlap_clusters(workflows, similarities, docs)
+ assert clusters
+ assert {"a.md", "b.md"}.issubset(set(clusters[0]["workflows"]))
+
+
+def test_awy_formula_is_computed_correctly() -> None:
+ result = pre.compute_workflow_yield(0.6, 0.5, 0.8, 0.2, 0.1, 0.1, 0.0)
+ assert result == round((0.6 * 0.5 * 0.8) / (1 + 0.2 + 0.1 + 0.1 + 0.0), 4)
+
+
+def test_portfolio_overlap_drag_is_computed_correctly() -> None:
+ drag = pre.portfolio_overlap_drag({("a", "b"): 0.8, ("a", "c"): 0.5})
+ assert drag == round((0.8**2 + 0.5**2) * 2, 4)
+
+
+def test_agentic_fraction_is_computed_and_bounded() -> None:
+ frontmatter = {
+ "pre-agent-steps": [{"run": "python3 make_summary.py"}],
+ "post-steps": [{"run": "jq . report.json"}],
+ "tools": {"bash": True, "github": {"mode": "gh-proxy"}},
+ }
+ agentic_fraction, deterministic_fraction = pre.estimate_agentic_fraction(frontmatter, "word " * 1000)
+ assert 0.0 <= agentic_fraction <= 1.0
+ assert 0.0 <= deterministic_fraction <= 1.0
+ assert round(agentic_fraction + deterministic_fraction, 4) == 1.0
From b9ab44b2619e5aa298738dc81205c6aa1f423c15 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 17:08:11 +0000
Subject: [PATCH 02/13] Compile Agentic Workflow Portfolio Yield workflow
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
.github/workflows/aw-portfolio-yield.lock.yml | 1439 +++++++++++++++++
.github/workflows/aw-portfolio-yield.md | 3 +-
scripts/aw_yield_precompute.py | 62 +-
3 files changed, 1488 insertions(+), 16 deletions(-)
create mode 100644 .github/workflows/aw-portfolio-yield.lock.yml
diff --git a/.github/workflows/aw-portfolio-yield.lock.yml b/.github/workflows/aw-portfolio-yield.lock.yml
new file mode 100644
index 00000000000..b8800f5fe3b
--- /dev/null
+++ b/.github/workflows/aw-portfolio-yield.lock.yml
@@ -0,0 +1,1439 @@
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"d36a8beb35fc7079f029faf93be7fc0e20f0546e3735b7676083df09be7bcb7b","strict":true,"agent_id":"copilot"}
+# gh-aw-manifest: {"version":1,"secrets":["COPILOT_GITHUB_TOKEN","GH_AW_GITHUB_MCP_SERVER_TOKEN","GH_AW_GITHUB_TOKEN","GITHUB_TOKEN","OTLP_ENDPOINT","OTLP_TOKEN"],"actions":[{"repo":"actions/checkout","sha":"de0fac2e4500dabe0009e67214ff5f5447ce83dd","version":"v6.0.2"},{"repo":"actions/download-artifact","sha":"3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c","version":"v8.0.1"},{"repo":"actions/github-script","sha":"3a2844b7e9c422d3c10d287c895573f7108da1b3","version":"v9.0.0"},{"repo":"actions/setup-node","sha":"48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e","version":"v6.4.0"},{"repo":"actions/upload-artifact","sha":"043fb46d1a93c77aae656e7c1c64a875d1fc6a0a","version":"v7.0.1"}],"containers":[{"image":"ghcr.io/github/gh-aw-firewall/agent:0.25.42"},{"image":"ghcr.io/github/gh-aw-firewall/api-proxy:0.25.42"},{"image":"ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.42"},{"image":"ghcr.io/github/gh-aw-firewall/squid:0.25.42"},{"image":"ghcr.io/github/gh-aw-mcpg:v0.3.6","digest":"sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c","pinned_image":"ghcr.io/github/gh-aw-mcpg:v0.3.6@sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c"},{"image":"ghcr.io/github/github-mcp-server:v1.0.3","digest":"sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959","pinned_image":"ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959"},{"image":"node:lts-alpine","digest":"sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f","pinned_image":"node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f"}]}
+# ___ _ _
+# / _ \ | | (_)
+# | |_| | __ _ ___ _ __ | |_ _ ___
+# | _ |/ _` |/ _ \ '_ \| __| |/ __|
+# | | | | (_| | __/ | | | |_| | (__
+# \_| |_/\__, |\___|_| |_|\__|_|\___|
+# __/ |
+# _ _ |___/
+# | | | | / _| |
+# | | | | ___ _ __ _ __| |_| | _____ ____
+# | |/\| |/ _ \ '__| |/ /| _| |/ _ \ \ /\ / / ___|
+# \ /\ / (_) | | | | ( | | | | (_) \ V V /\__ \
+# \/ \/ \___/|_| |_|\_\|_| |_|\___/ \_/\_/ |___/
+#
+# This file was automatically generated by gh-aw. DO NOT EDIT.
+#
+# To update this file, edit the corresponding .md file and run:
+# gh aw compile
+# Not all edits will cause changes to this file.
+#
+# For more information: https://github.github.com/gh-aw/introduction/overview/
+#
+# Weekly portfolio analysis of agentic workflows using deterministic scoring, overlap detection, and OTel-backed evidence for governance recommendations
+#
+# Resolved workflow manifest:
+# Imports:
+# - shared/otel-observability.md
+#
+# Frontmatter env variables:
+# - OTEL_BACKEND_TOKEN: shared/otel-observability.md
+# - OTEL_BACKEND_URL: shared/otel-observability.md
+#
+# Secrets used:
+# - COPILOT_GITHUB_TOKEN
+# - GH_AW_GITHUB_MCP_SERVER_TOKEN
+# - GH_AW_GITHUB_TOKEN
+# - GITHUB_TOKEN
+# - OTLP_ENDPOINT
+# - OTLP_TOKEN
+#
+# Custom actions used:
+# - actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+# - actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
+# - actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+# - actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 (source v9)
+# - actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0
+# - actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
+#
+# Container images used:
+# - ghcr.io/github/gh-aw-firewall/agent:0.25.42
+# - ghcr.io/github/gh-aw-firewall/api-proxy:0.25.42
+# - ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.42
+# - ghcr.io/github/gh-aw-firewall/squid:0.25.42
+# - ghcr.io/github/gh-aw-mcpg:v0.3.6@sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c
+# - ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959
+# - node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f
+
+name: "Agentic Workflow Portfolio Yield"
+"on":
+ schedule:
+ - cron: "40 8 * * 1"
+ # Friendly format: weekly on monday around 09:00 (scattered)
+ workflow_dispatch:
+ inputs:
+ aw_context:
+ default: ""
+ description: Agent caller context (used internally by Agentic Workflows).
+ required: false
+ type: string
+
+permissions: {}
+
+concurrency:
+ group: "gh-aw-${{ github.workflow }}"
+
+run-name: "Agentic Workflow Portfolio Yield"
+
+env:
+ OTEL_BACKEND_TOKEN: ${{ secrets.OTLP_TOKEN }}
+ OTEL_BACKEND_URL: ${{ secrets.OTLP_ENDPOINT }}
+ OTEL_EXPORTER_OTLP_ENDPOINT: ${{ secrets.OTLP_ENDPOINT }}
+ OTEL_SERVICE_NAME: gh-aw
+ COPILOT_OTEL_FILE_EXPORTER_PATH: /tmp/gh-aw/copilot-otel.jsonl
+ OTEL_EXPORTER_OTLP_HEADERS: Authorization=${{ secrets.OTLP_TOKEN }}
+ GH_AW_OTLP_ENDPOINTS: '[{"url":"${{ secrets.OTLP_ENDPOINT }}","headers":"Authorization=${{ secrets.OTLP_TOKEN }}"}]'
+
+jobs:
+ activation:
+ runs-on: ubuntu-slim
+ permissions:
+ actions: read
+ contents: read
+ outputs:
+ comment_id: ""
+ comment_repo: ""
+ engine_id: ${{ steps.generate_aw_info.outputs.engine_id }}
+ lockdown_check_failed: ${{ steps.generate_aw_info.outputs.lockdown_check_failed == 'true' }}
+ model: ${{ steps.generate_aw_info.outputs.model }}
+ secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }}
+ setup-parent-span-id: ${{ steps.setup.outputs.parent-span-id || steps.setup.outputs.span-id }}
+ setup-span-id: ${{ steps.setup.outputs.span-id }}
+ setup-trace-id: ${{ steps.setup.outputs.trace-id }}
+ stale_lock_file_failed: ${{ steps.check-lock-file.outputs.stale_lock_file_failed == 'true' }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ repository: github/gh-aw
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ id: setup
+ uses: ./actions/setup
+ with:
+ destination: ${{ runner.temp }}/gh-aw/actions
+ job-name: ${{ github.job }}
+ env:
+ GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
+ GH_AW_INFO_VERSION: "1.0.43"
+ - name: Mask OTLP telemetry headers
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/mask_otlp_headers.sh"
+ - name: Generate agentic run info
+ id: generate_aw_info
+ env:
+ GH_AW_INFO_ENGINE_ID: "copilot"
+ GH_AW_INFO_ENGINE_NAME: "GitHub Copilot CLI"
+ GH_AW_INFO_MODEL: ${{ vars.GH_AW_MODEL_AGENT_COPILOT || 'claude-sonnet-4.6' }}
+ GH_AW_INFO_VERSION: "1.0.43"
+ GH_AW_INFO_AGENT_VERSION: "1.0.43"
+ GH_AW_INFO_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_INFO_EXPERIMENTAL: "false"
+ GH_AW_INFO_SUPPORTS_TOOLS_ALLOWLIST: "true"
+ GH_AW_INFO_STAGED: "false"
+ GH_AW_INFO_ALLOWED_DOMAINS: '["defaults","github"]'
+ GH_AW_INFO_FIREWALL_ENABLED: "true"
+ GH_AW_INFO_AWF_VERSION: "v0.25.42"
+ GH_AW_INFO_AWMG_VERSION: ""
+ GH_AW_INFO_FIREWALL_TYPE: "squid"
+ GH_AW_COMPILED_STRICT: "true"
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/generate_aw_info.cjs');
+ await main(core, context);
+ - name: Validate COPILOT_GITHUB_TOKEN secret
+ id: validate-secret
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/validate_multi_secret.sh" COPILOT_GITHUB_TOKEN 'GitHub Copilot CLI' https://github.github.com/gh-aw/reference/engines/#github-copilot-default
+ env:
+ COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
+ - name: Checkout .github and .agents folders
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ persist-credentials: false
+ sparse-checkout: |
+ .github
+ .agents
+ actions/setup
+ .claude
+ .codex
+ .crush
+ .gemini
+ .opencode
+ .pi
+ sparse-checkout-cone-mode: true
+ fetch-depth: 1
+ - name: Save agent config folders for base branch restoration
+ env:
+ GH_AW_AGENT_FOLDERS: ".agents .claude .codex .crush .gemini .github .opencode .pi"
+ GH_AW_AGENT_FILES: ".crush.json AGENTS.md CLAUDE.md GEMINI.md PI.md opencode.jsonc"
+ # poutine:ignore untrusted_checkout_exec
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/save_base_github_folders.sh"
+ - name: Check workflow lock file
+ id: check-lock-file
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_WORKFLOW_FILE: "aw-portfolio-yield.lock.yml"
+ GH_AW_CONTEXT_WORKFLOW_REF: "${{ github.workflow_ref }}"
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/check_workflow_timestamp_api.cjs');
+ await main();
+ - name: Create prompt with built-in context
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_SAFE_OUTPUTS: ${{ runner.temp }}/gh-aw/safeoutputs/outputs.jsonl
+ GH_AW_GITHUB_ACTOR: ${{ github.actor }}
+ GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }}
+ GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }}
+ GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
+ GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
+ GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
+ GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
+ GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }}
+ # poutine:ignore untrusted_checkout_exec
+ run: |
+ bash "${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh"
+ {
+ cat << 'GH_AW_PROMPT_4387b81dbd2ba9bc_EOF'
+
+ GH_AW_PROMPT_4387b81dbd2ba9bc_EOF
+ cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
+ cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
+ cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
+ cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
+ cat << 'GH_AW_PROMPT_4387b81dbd2ba9bc_EOF'
+
+ Tools: create_issue, missing_tool, missing_data, noop
+
+ GH_AW_PROMPT_4387b81dbd2ba9bc_EOF
+ cat "${RUNNER_TEMP}/gh-aw/prompts/mcp_cli_tools_prompt.md"
+ cat << 'GH_AW_PROMPT_4387b81dbd2ba9bc_EOF'
+
+ The following GitHub context information is available for this workflow:
+ {{#if __GH_AW_GITHUB_ACTOR__ }}
+ - **actor**: __GH_AW_GITHUB_ACTOR__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_REPOSITORY__ }}
+ - **repository**: __GH_AW_GITHUB_REPOSITORY__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_WORKSPACE__ }}
+ - **workspace**: __GH_AW_GITHUB_WORKSPACE__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_EVENT_ISSUE_NUMBER__ }}
+ - **issue-number**: #__GH_AW_GITHUB_EVENT_ISSUE_NUMBER__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__ }}
+ - **discussion-number**: #__GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__ }}
+ - **pull-request-number**: #__GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_EVENT_COMMENT_ID__ }}
+ - **comment-id**: __GH_AW_GITHUB_EVENT_COMMENT_ID__
+ {{/if}}
+ {{#if __GH_AW_GITHUB_RUN_ID__ }}
+ - **workflow-run-id**: __GH_AW_GITHUB_RUN_ID__
+ {{/if}}
+
+
+ GH_AW_PROMPT_4387b81dbd2ba9bc_EOF
+ cat "${RUNNER_TEMP}/gh-aw/prompts/cli_proxy_with_safeoutputs_prompt.md"
+ cat << 'GH_AW_PROMPT_4387b81dbd2ba9bc_EOF'
+
+ {{#runtime-import .github/workflows/shared/otel-observability.md}}
+ {{#runtime-import .github/workflows/aw-portfolio-yield.md}}
+ GH_AW_PROMPT_4387b81dbd2ba9bc_EOF
+ } > "$GH_AW_PROMPT"
+ - name: Interpolate variables and render templates
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_ENGINE_ID: "copilot"
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/interpolate_prompt.cjs');
+ await main();
+ - name: Substitute placeholders
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_GITHUB_ACTOR: ${{ github.actor }}
+ GH_AW_GITHUB_EVENT_COMMENT_ID: ${{ github.event.comment.id }}
+ GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: ${{ github.event.discussion.number }}
+ GH_AW_GITHUB_EVENT_ISSUE_NUMBER: ${{ github.event.issue.number }}
+ GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }}
+ GH_AW_GITHUB_REPOSITORY: ${{ github.repository }}
+ GH_AW_GITHUB_RUN_ID: ${{ github.run_id }}
+ GH_AW_GITHUB_WORKSPACE: ${{ github.workspace }}
+ GH_AW_MCP_CLI_SERVERS_LIST: '- `safeoutputs` — run `safeoutputs --help` to see available tools'
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+
+ const substitutePlaceholders = require('${{ runner.temp }}/gh-aw/actions/substitute_placeholders.cjs');
+
+ // Call the substitution function
+ return await substitutePlaceholders({
+ file: process.env.GH_AW_PROMPT,
+ substitutions: {
+ GH_AW_GITHUB_ACTOR: process.env.GH_AW_GITHUB_ACTOR,
+ GH_AW_GITHUB_EVENT_COMMENT_ID: process.env.GH_AW_GITHUB_EVENT_COMMENT_ID,
+ GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER: process.env.GH_AW_GITHUB_EVENT_DISCUSSION_NUMBER,
+ GH_AW_GITHUB_EVENT_ISSUE_NUMBER: process.env.GH_AW_GITHUB_EVENT_ISSUE_NUMBER,
+ GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER: process.env.GH_AW_GITHUB_EVENT_PULL_REQUEST_NUMBER,
+ GH_AW_GITHUB_REPOSITORY: process.env.GH_AW_GITHUB_REPOSITORY,
+ GH_AW_GITHUB_RUN_ID: process.env.GH_AW_GITHUB_RUN_ID,
+ GH_AW_GITHUB_WORKSPACE: process.env.GH_AW_GITHUB_WORKSPACE,
+ GH_AW_MCP_CLI_SERVERS_LIST: process.env.GH_AW_MCP_CLI_SERVERS_LIST
+ }
+ });
+ - name: Validate prompt placeholders
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ # poutine:ignore untrusted_checkout_exec
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/validate_prompt_placeholders.sh"
+ - name: Print prompt
+ env:
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ # poutine:ignore untrusted_checkout_exec
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/print_prompt_summary.sh"
+ - name: Upload activation artifact
+ if: success()
+ uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
+ with:
+ name: activation
+ include-hidden-files: true
+ path: |
+ /tmp/gh-aw/aw_info.json
+ /tmp/gh-aw/aw-prompts/prompt.txt
+ /tmp/gh-aw/aw-prompts/prompt-template.txt
+ /tmp/gh-aw/aw-prompts/prompt-import-tree.json
+ /tmp/gh-aw/github_rate_limits.jsonl
+ /tmp/gh-aw/base
+ /tmp/gh-aw/.github/agents
+ if-no-files-found: ignore
+ retention-days: 1
+
+ agent:
+ needs: activation
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ issues: read
+ pull-requests: read
+ concurrency:
+ group: "gh-aw-copilot-${{ github.workflow }}"
+ env:
+ DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
+ GH_AW_ASSETS_ALLOWED_EXTS: ""
+ GH_AW_ASSETS_BRANCH: ""
+ GH_AW_ASSETS_MAX_SIZE_KB: 0
+ GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs
+ GH_AW_WORKFLOW_ID_SANITIZED: awportfolioyield
+ outputs:
+ agentic_engine_timeout: ${{ steps.detect-copilot-errors.outputs.agentic_engine_timeout || 'false' }}
+ checkout_pr_success: ${{ steps.checkout-pr.outputs.checkout_pr_success || 'true' }}
+ effective_tokens: ${{ steps.parse-mcp-gateway.outputs.effective_tokens }}
+ effective_tokens_rate_limit_error: ${{ steps.parse-mcp-gateway.outputs.effective_tokens_rate_limit_error || 'false' }}
+ has_patch: ${{ steps.collect_output.outputs.has_patch }}
+ inference_access_error: ${{ steps.detect-copilot-errors.outputs.inference_access_error || 'false' }}
+ mcp_policy_error: ${{ steps.detect-copilot-errors.outputs.mcp_policy_error || 'false' }}
+ model: ${{ needs.activation.outputs.model }}
+ model_not_supported_error: ${{ steps.detect-copilot-errors.outputs.model_not_supported_error || 'false' }}
+ output: ${{ steps.collect_output.outputs.output }}
+ output_types: ${{ steps.collect_output.outputs.output_types }}
+ setup-parent-span-id: ${{ steps.setup.outputs.parent-span-id || steps.setup.outputs.span-id }}
+ setup-span-id: ${{ steps.setup.outputs.span-id }}
+ setup-trace-id: ${{ steps.setup.outputs.trace-id }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ repository: github/gh-aw
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ id: setup
+ uses: ./actions/setup
+ with:
+ destination: ${{ runner.temp }}/gh-aw/actions
+ job-name: ${{ github.job }}
+ trace-id: ${{ needs.activation.outputs.setup-trace-id }}
+ parent-span-id: ${{ needs.activation.outputs.setup-parent-span-id || needs.activation.outputs.setup-span-id }}
+ env:
+ GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
+ GH_AW_INFO_VERSION: "1.0.43"
+ - name: Set runtime paths
+ id: set-runtime-paths
+ run: |
+ {
+ echo "GH_AW_SAFE_OUTPUTS=${RUNNER_TEMP}/gh-aw/safeoutputs/outputs.jsonl"
+ echo "GH_AW_SAFE_OUTPUTS_CONFIG_PATH=${RUNNER_TEMP}/gh-aw/safeoutputs/config.json"
+ echo "GH_AW_SAFE_OUTPUTS_TOOLS_PATH=${RUNNER_TEMP}/gh-aw/safeoutputs/tools.json"
+ } >> "$GITHUB_OUTPUT"
+ - name: Mask OTLP telemetry headers
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/mask_otlp_headers.sh"
+ - name: Checkout repository
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ persist-credentials: false
+ - name: Setup Node.js
+ uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0
+ with:
+ node-version: '24'
+ package-manager-cache: false
+ - name: Create gh-aw temp directory
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/create_gh_aw_tmp_dir.sh"
+ - name: Configure gh CLI for GitHub Enterprise
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/configure_gh_for_ghe.sh"
+ env:
+ GH_TOKEN: ${{ github.token }}
+ - name: Configure Git credentials
+ env:
+ REPO_NAME: ${{ github.repository }}
+ SERVER_URL: ${{ github.server_url }}
+ GITHUB_TOKEN: ${{ github.token }}
+ run: |
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
+ git config --global user.name "github-actions[bot]"
+ git config --global am.keepcr true
+ # Re-authenticate git with GitHub token
+ SERVER_URL_STRIPPED="${SERVER_URL#https://}"
+ git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git"
+ echo "Git configured with standard GitHub Actions identity"
+ - name: Checkout PR branch
+ id: checkout-pr
+ if: |
+ github.event.pull_request || github.event.issue.pull_request
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/checkout_pr_branch.cjs');
+ await main();
+ - name: Install GitHub Copilot CLI
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/install_copilot_cli.sh" 1.0.43
+ env:
+ GH_HOST: github.com
+ - name: Install AWF binary
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh" v0.25.42
+ - name: Determine automatic lockdown mode for GitHub MCP Server
+ id: determine-automatic-lockdown
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 (source v9)
+ env:
+ GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
+ GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
+ with:
+ script: |
+ const determineAutomaticLockdown = require('${{ runner.temp }}/gh-aw/actions/determine_automatic_lockdown.cjs');
+ await determineAutomaticLockdown(github, context, core);
+ - name: Download activation artifact
+ uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
+ with:
+ name: activation
+ path: /tmp/gh-aw
+ - name: Restore agent config folders from base branch
+ if: steps.checkout-pr.outcome == 'success'
+ env:
+ GH_AW_AGENT_FOLDERS: ".agents .claude .codex .crush .gemini .github .opencode .pi"
+ GH_AW_AGENT_FILES: ".crush.json AGENTS.md CLAUDE.md GEMINI.md PI.md opencode.jsonc"
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/restore_base_github_folders.sh"
+ - name: Restore inline sub-agents from activation artifact
+ env:
+ GH_AW_SUB_AGENT_DIR: ".github/agents"
+ GH_AW_SUB_AGENT_EXT: ".agent.md"
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/restore_inline_sub_agents.sh"
+ - name: Precompute workflow portfolio data
+ run: |-
+ set -euo pipefail
+ mkdir -p /tmp/gh-aw
+ python3 scripts/aw_yield_precompute.py --workflows ".github/workflows" --out /tmp/aw-yield-precompute.json
+
+ - name: Download container images
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh" ghcr.io/github/gh-aw-firewall/agent:0.25.42 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.42 ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.42 ghcr.io/github/gh-aw-firewall/squid:0.25.42 ghcr.io/github/gh-aw-mcpg:v0.3.6@sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959 node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f
+ - name: Generate Safe Outputs Config
+ run: |
+ mkdir -p "${RUNNER_TEMP}/gh-aw/safeoutputs"
+ mkdir -p /tmp/gh-aw/safeoutputs
+ mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
+ cat > "${RUNNER_TEMP}/gh-aw/safeoutputs/config.json" << 'GH_AW_SAFE_OUTPUTS_CONFIG_193b35d124023bca_EOF'
+ {"create_issue":{"close_older_issues":true,"expires":720,"labels":["automation","report","observability"],"max":1},"create_report_incomplete_issue":{},"mentions":{"enabled":false},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"},"report_incomplete":{}}
+ GH_AW_SAFE_OUTPUTS_CONFIG_193b35d124023bca_EOF
+ - name: Generate Safe Outputs Tools
+ env:
+ GH_AW_TOOLS_META_JSON: |
+ {
+ "description_suffixes": {
+ "create_issue": " CONSTRAINTS: Maximum 1 issue(s) can be created. Labels [\"automation\" \"report\" \"observability\"] will be automatically added."
+ },
+ "repo_params": {},
+ "dynamic_tools": []
+ }
+ GH_AW_VALIDATION_JSON: |
+ {
+ "create_issue": {
+ "defaultMax": 1,
+ "fields": {
+ "body": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 65000
+ },
+ "fields": {
+ "type": "array"
+ },
+ "labels": {
+ "type": "array",
+ "itemType": "string",
+ "itemSanitize": true,
+ "itemMaxLength": 128
+ },
+ "parent": {
+ "issueOrPRNumber": true
+ },
+ "repo": {
+ "type": "string",
+ "maxLength": 256
+ },
+ "temporary_id": {
+ "type": "string"
+ },
+ "title": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 128
+ }
+ }
+ },
+ "missing_data": {
+ "defaultMax": 20,
+ "fields": {
+ "alternatives": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 256
+ },
+ "context": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 256
+ },
+ "data_type": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 128
+ },
+ "reason": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 256
+ }
+ }
+ },
+ "missing_tool": {
+ "defaultMax": 20,
+ "fields": {
+ "alternatives": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 512
+ },
+ "reason": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 256
+ },
+ "tool": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 128
+ }
+ }
+ },
+ "noop": {
+ "defaultMax": 1,
+ "fields": {
+ "message": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 65000
+ }
+ }
+ },
+ "report_incomplete": {
+ "defaultMax": 5,
+ "fields": {
+ "details": {
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 65000
+ },
+ "reason": {
+ "required": true,
+ "type": "string",
+ "sanitize": true,
+ "maxLength": 1024
+ }
+ }
+ }
+ }
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/generate_safe_outputs_tools.cjs');
+ await main();
+ - name: Generate Safe Outputs MCP Server Config
+ id: safe-outputs-config
+ run: |
+ # Generate a secure random API key (360 bits of entropy, 40+ chars)
+ # Mask immediately to prevent timing vulnerabilities
+ API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
+ echo "::add-mask::${API_KEY}"
+
+ PORT=3001
+
+ # Set outputs for next steps
+ {
+ echo "safe_outputs_api_key=${API_KEY}"
+ echo "safe_outputs_port=${PORT}"
+ } >> "$GITHUB_OUTPUT"
+
+ echo "Safe Outputs MCP server will run on port ${PORT}"
+
+ - name: Start Safe Outputs MCP HTTP Server
+ id: safe-outputs-start
+ env:
+ DEBUG: '*'
+ GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
+ GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-config.outputs.safe_outputs_port }}
+ GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-config.outputs.safe_outputs_api_key }}
+ GH_AW_SAFE_OUTPUTS_TOOLS_PATH: ${{ runner.temp }}/gh-aw/safeoutputs/tools.json
+ GH_AW_SAFE_OUTPUTS_CONFIG_PATH: ${{ runner.temp }}/gh-aw/safeoutputs/config.json
+ GH_AW_MCP_LOG_DIR: /tmp/gh-aw/mcp-logs/safeoutputs
+ run: |
+ # Environment variables are set above to prevent template injection
+ export DEBUG
+ export GH_AW_SAFE_OUTPUTS
+ export GH_AW_SAFE_OUTPUTS_PORT
+ export GH_AW_SAFE_OUTPUTS_API_KEY
+ export GH_AW_SAFE_OUTPUTS_TOOLS_PATH
+ export GH_AW_SAFE_OUTPUTS_CONFIG_PATH
+ export GH_AW_MCP_LOG_DIR
+
+ bash "${RUNNER_TEMP}/gh-aw/actions/start_safe_outputs_server.sh"
+
+ - name: Start MCP Gateway
+ id: start-mcp-gateway
+ env:
+ GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
+ GH_AW_SAFE_OUTPUTS_API_KEY: ${{ steps.safe-outputs-start.outputs.api_key }}
+ GH_AW_SAFE_OUTPUTS_PORT: ${{ steps.safe-outputs-start.outputs.port }}
+ OTEL_BACKEND_TOKEN: ${{ env.OTEL_BACKEND_TOKEN }}
+ OTEL_BACKEND_URL: ${{ env.OTEL_BACKEND_URL }}
+ run: |
+ set -eo pipefail
+ mkdir -p "${RUNNER_TEMP}/gh-aw/mcp-config"
+
+ # Export gateway environment variables for MCP config and gateway script
+ export MCP_GATEWAY_PORT="8080"
+ export MCP_GATEWAY_DOMAIN="host.docker.internal"
+ export MCP_GATEWAY_HOST_DOMAIN="localhost"
+ MCP_GATEWAY_API_KEY=$(openssl rand -base64 45 | tr -d '/+=')
+ echo "::add-mask::${MCP_GATEWAY_API_KEY}"
+ export MCP_GATEWAY_API_KEY
+ export MCP_GATEWAY_PAYLOAD_DIR="/tmp/gh-aw/mcp-payloads"
+ mkdir -p "${MCP_GATEWAY_PAYLOAD_DIR}"
+ export MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD="524288"
+ export DEBUG="*"
+
+ export GH_AW_ENGINE="copilot"
+ MCP_GATEWAY_UID=$(id -u 2>/dev/null || echo '0')
+ MCP_GATEWAY_GID=$(id -g 2>/dev/null || echo '0')
+ DOCKER_SOCK_GID=$(stat -c '%g' /var/run/docker.sock 2>/dev/null || echo '0')
+ export MCP_GATEWAY_DOCKER_COMMAND='docker run -i --rm --network host --add-host host.docker.internal:127.0.0.1 --user '"${MCP_GATEWAY_UID}"':'"${MCP_GATEWAY_GID}"' --group-add '"${DOCKER_SOCK_GID}"' -v /var/run/docker.sock:/var/run/docker.sock -e MCP_GATEWAY_PORT -e MCP_GATEWAY_DOMAIN -e MCP_GATEWAY_API_KEY -e MCP_GATEWAY_PAYLOAD_DIR -e MCP_GATEWAY_PAYLOAD_SIZE_THRESHOLD -e DEBUG -e MCP_GATEWAY_LOG_DIR -e GH_AW_MCP_LOG_DIR -e GH_AW_SAFE_OUTPUTS -e GH_AW_SAFE_OUTPUTS_CONFIG_PATH -e GH_AW_SAFE_OUTPUTS_TOOLS_PATH -e GH_AW_ASSETS_BRANCH -e GH_AW_ASSETS_MAX_SIZE_KB -e GH_AW_ASSETS_ALLOWED_EXTS -e DEFAULT_BRANCH -e GITHUB_MCP_SERVER_TOKEN -e GITHUB_MCP_GUARD_MIN_INTEGRITY -e GITHUB_MCP_GUARD_REPOS -e GITHUB_REPOSITORY -e GITHUB_SERVER_URL -e GITHUB_SHA -e GITHUB_WORKSPACE -e GITHUB_TOKEN -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RUN_ATTEMPT -e GITHUB_JOB -e GITHUB_ACTION -e GITHUB_EVENT_NAME -e GITHUB_EVENT_PATH -e GITHUB_ACTOR -e GITHUB_ACTOR_ID -e GITHUB_TRIGGERING_ACTOR -e GITHUB_WORKFLOW -e GITHUB_WORKFLOW_REF -e GITHUB_WORKFLOW_SHA -e GITHUB_REF -e GITHUB_REF_NAME -e GITHUB_REF_TYPE -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GH_AW_SAFE_OUTPUTS_PORT -e GH_AW_SAFE_OUTPUTS_API_KEY -e GITHUB_AW_OTEL_TRACE_ID -e GITHUB_AW_OTEL_PARENT_SPAN_ID -e OTEL_BACKEND_TOKEN -e OTEL_BACKEND_URL -v /tmp/gh-aw/mcp-payloads:/tmp/gh-aw/mcp-payloads:rw -v /opt:/opt:ro -v /tmp:/tmp:rw -v '"${GITHUB_WORKSPACE}"':'"${GITHUB_WORKSPACE}"':rw ghcr.io/github/gh-aw-mcpg:v0.3.6'
+
+ mkdir -p /home/runner/.copilot
+ GH_AW_NODE=$(which node 2>/dev/null || command -v node 2>/dev/null || echo node)
+ cat << GH_AW_MCP_CONFIG_15bccfb2d4a49cee_EOF | "$GH_AW_NODE" "${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.cjs"
+ {
+ "mcpServers": {
+ "otel": {
+ "type": "stdio",
+ "container": "node:lts-alpine",
+ "entrypoint": "npx",
+ "entrypointArgs": [
+ "@your-org/otel-query-mcp"
+ ],
+ "tools": [
+ "*"
+ ],
+ "env": {
+ "OTEL_BACKEND_TOKEN": "\${OTEL_BACKEND_TOKEN}",
+ "OTEL_BACKEND_URL": "\${OTEL_BACKEND_URL}"
+ },
+ "guard-policies": {
+ "write-sink": {
+ "accept": [
+ "*"
+ ]
+ }
+ }
+ },
+ "safeoutputs": {
+ "type": "http",
+ "url": "http://host.docker.internal:$GH_AW_SAFE_OUTPUTS_PORT",
+ "headers": {
+ "Authorization": "\${GH_AW_SAFE_OUTPUTS_API_KEY}"
+ },
+ "guard-policies": {
+ "write-sink": {
+ "accept": [
+ "*"
+ ]
+ }
+ }
+ }
+ },
+ "gateway": {
+ "port": $MCP_GATEWAY_PORT,
+ "domain": "${MCP_GATEWAY_DOMAIN}",
+ "apiKey": "${MCP_GATEWAY_API_KEY}",
+ "payloadDir": "${MCP_GATEWAY_PAYLOAD_DIR}",
+ "opentelemetry": {
+ "endpoint": "${OTEL_EXPORTER_OTLP_ENDPOINT}",
+ "headers": "${OTEL_EXPORTER_OTLP_HEADERS}",
+ "traceId": "${GITHUB_AW_OTEL_TRACE_ID}",
+ "spanId": "${GITHUB_AW_OTEL_PARENT_SPAN_ID}"
+ }
+ }
+ }
+ GH_AW_MCP_CONFIG_15bccfb2d4a49cee_EOF
+ - name: Mount MCP servers as CLIs
+ id: mount-mcp-clis
+ continue-on-error: true
+ env:
+ MCP_GATEWAY_API_KEY: ${{ steps.start-mcp-gateway.outputs.gateway-api-key }}
+ MCP_GATEWAY_DOMAIN: ${{ steps.start-mcp-gateway.outputs.gateway-domain }}
+ MCP_GATEWAY_PORT: ${{ steps.start-mcp-gateway.outputs.gateway-port }}
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/mount_mcp_as_cli.cjs');
+ await main();
+ - name: Clean credentials
+ continue-on-error: true
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/clean_git_credentials.sh"
+ - name: Audit pre-agent workspace
+ id: pre_agent_audit
+ continue-on-error: true
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/audit_pre_agent_workspace.sh"
+ - name: Start CLI Proxy
+ env:
+ GH_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ GITHUB_SERVER_URL: ${{ github.server_url }}
+ CLI_PROXY_POLICY: '{"allow-only":{"repos":"all","min-integrity":"none"}}'
+ CLI_PROXY_IMAGE: 'ghcr.io/github/gh-aw-mcpg:v0.3.6'
+ run: |
+ bash "${RUNNER_TEMP}/gh-aw/actions/start_cli_proxy.sh"
+ - name: Execute GitHub Copilot CLI
+ id: agentic_execution
+ # Copilot CLI tool arguments (sorted):
+ timeout-minutes: 25
+ run: |
+ set -o pipefail
+ touch /tmp/gh-aw/agent-step-summary.md
+ GH_AW_NODE_BIN=$(command -v node 2>/dev/null || true)
+ export GH_AW_NODE_BIN
+ (umask 177 && touch /tmp/gh-aw/agent-stdio.log)
+ printf '%s\n' '{"$schema":"https://github.com/github/gh-aw-firewall/releases/download/v0.25.42/awf-config.schema.json","network":{"allowDomains":["*.githubusercontent.com","api.business.githubcopilot.com","api.enterprise.githubcopilot.com","api.github.com","api.githubcopilot.com","api.individual.githubcopilot.com","api.snapcraft.io","archive.ubuntu.com","azure.archive.ubuntu.com","codeload.github.com","crl.geotrust.com","crl.globalsign.com","crl.identrust.com","crl.sectigo.com","crl.thawte.com","crl.usertrust.com","crl.verisign.com","crl3.digicert.com","crl4.digicert.com","crls.ssl.com","docs.github.com","github-cloud.githubusercontent.com","github-cloud.s3.amazonaws.com","github.blog","github.com","github.githubassets.com","host.docker.internal","json-schema.org","json.schemastore.org","keyserver.ubuntu.com","lfs.github.com","objects.githubusercontent.com","ocsp.digicert.com","ocsp.geotrust.com","ocsp.globalsign.com","ocsp.identrust.com","ocsp.sectigo.com","ocsp.ssl.com","ocsp.thawte.com","ocsp.usertrust.com","ocsp.verisign.com","packagecloud.io","packages.cloud.google.com","packages.microsoft.com","ppa.launchpad.net","raw.githubusercontent.com","registry.npmjs.org","s.symcb.com","s.symcd.com","security.ubuntu.com","telemetry.enterprise.githubcopilot.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","www.googleapis.com"]},"apiProxy":{"enabled":true,"maxEffectiveTokens":10000000,"models":{"auto":["large"],"deep-research":["copilot/deep-research*","copilot/o3-deep-research*","copilot/o4-mini-deep-research*","google/deep-research*","gemini/deep-research*","openai/o3-deep-research*","openai/o4-mini-deep-research*"],"gemini-flash":["copilot/gemini-*flash*","google/gemini-*flash*","gemini/gemini-*flash*"],"gemini-flash-lite":["copilot/gemini-*flash*lite*","google/gemini-*flash*lite*","gemini/gemini-*flash*lite*"],"gemini-pro":["copilot/gemini-*pro*","google/gemini-*pro*","gemini/gemini-*pro*"],"gpt-4.1":["copilot/gpt-4.1*","openai/gpt-4.1*"],"gpt-5":["copilot/gpt-5*","openai/gpt-5*"],"gpt-5-codex":["copilot/gpt-5*codex*","openai/gpt-5*codex*"],"gpt-5-mini":["copilot/gpt-5*mini*","openai/gpt-5*mini*"],"gpt-5-nano":["copilot/gpt-5*nano*","openai/gpt-5*nano*"],"gpt-5-pro":["copilot/gpt-5*pro*","openai/gpt-5*pro*"],"haiku":["copilot/*haiku*","anthropic/*haiku*"],"large":["sonnet","gpt-5-pro","gpt-5","gemini-pro"],"mini":["haiku","gpt-5-mini","gpt-5-nano","gemini-flash-lite"],"opus":["copilot/*opus*","anthropic/*opus*"],"reasoning":["copilot/o1*","copilot/o3*","copilot/o4*","openai/o1*","openai/o3*","openai/o4*"],"small":["mini"],"sonnet":["copilot/*sonnet*","anthropic/*sonnet*"]}},"container":{"imageTag":"0.25.42"}}' > "${RUNNER_TEMP}/gh-aw/awf-config.json" && cp "${RUNNER_TEMP}/gh-aw/awf-config.json" /tmp/gh-aw/awf-config.json
+ # shellcheck disable=SC1003
+ sudo -E awf --config "${RUNNER_TEMP}/gh-aw/awf-config.json" --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --env-all --exclude-env COPILOT_GITHUB_TOKEN --exclude-env GH_TOKEN --exclude-env GITHUB_MCP_SERVER_TOKEN --exclude-env MCP_GATEWAY_API_KEY --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --allow-host-ports 80,443,8080 --skip-pull --difc-proxy-host host.docker.internal:18443 --difc-proxy-ca-cert /tmp/gh-aw/difc-proxy-tls/ca.crt \
+ -- /bin/bash -c 'export PATH="${RUNNER_TEMP}/gh-aw/mcp-cli/bin:$PATH" && export PATH="$(find /opt/hostedtoolcache /home/runner/work/_tool -maxdepth 5 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && GH_AW_NODE_EXEC="${GH_AW_NODE_BIN:-}"; if [ -z "$GH_AW_NODE_EXEC" ] || [ ! -x "$GH_AW_NODE_EXEC" ]; then GH_AW_NODE_EXEC="$(command -v node 2>/dev/null || true)"; fi; if [ -z "$GH_AW_NODE_EXEC" ]; then echo "node runtime missing on this runner — check runtimes.node in workflow YAML" >&2; exit 127; fi; "$GH_AW_NODE_EXEC" ${RUNNER_TEMP}/gh-aw/actions/copilot_harness.cjs /usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --no-ask-user --allow-all-tools --allow-all-paths --add-dir "${GITHUB_WORKSPACE}" --prompt-file /tmp/gh-aw/aw-prompts/prompt.txt' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
+ env:
+ AWF_REFLECT_ENABLED: 1
+ COPILOT_AGENT_RUNNER_TYPE: STANDALONE
+ COPILOT_API_KEY: dummy-byok-key-for-offline-mode
+ COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
+ COPILOT_MODEL: ${{ vars.GH_AW_MODEL_AGENT_COPILOT || 'claude-sonnet-4.6' }}
+ GH_AW_MCP_CONFIG: /home/runner/.copilot/mcp-config.json
+ GH_AW_PHASE: agent
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
+ GH_AW_VERSION: dev
+ GH_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN || github.token }}
+ GITHUB_API_URL: ${{ github.api_url }}
+ GITHUB_AW: true
+ GITHUB_COPILOT_INTEGRATION_ID: agentic-workflows
+ GITHUB_HEAD_REF: ${{ github.head_ref }}
+ GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ GITHUB_REF_NAME: ${{ github.ref_name }}
+ GITHUB_SERVER_URL: ${{ github.server_url }}
+ GITHUB_STEP_SUMMARY: /tmp/gh-aw/agent-step-summary.md
+ GITHUB_WORKSPACE: ${{ github.workspace }}
+ GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com
+ GIT_AUTHOR_NAME: github-actions[bot]
+ GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com
+ GIT_COMMITTER_NAME: github-actions[bot]
+ XDG_CONFIG_HOME: /home/runner
+ - name: Stop CLI Proxy
+ if: always()
+ continue-on-error: true
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/stop_cli_proxy.sh"
+ - name: Detect Copilot errors
+ id: detect-copilot-errors
+ if: always()
+ continue-on-error: true
+ run: node "${RUNNER_TEMP}/gh-aw/actions/detect_copilot_errors.cjs"
+ - name: Configure Git credentials
+ env:
+ REPO_NAME: ${{ github.repository }}
+ SERVER_URL: ${{ github.server_url }}
+ GITHUB_TOKEN: ${{ github.token }}
+ run: |
+ git config --global user.email "github-actions[bot]@users.noreply.github.com"
+ git config --global user.name "github-actions[bot]"
+ git config --global am.keepcr true
+ # Re-authenticate git with GitHub token
+ SERVER_URL_STRIPPED="${SERVER_URL#https://}"
+ git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@${SERVER_URL_STRIPPED}/${REPO_NAME}.git"
+ echo "Git configured with standard GitHub Actions identity"
+ - name: Copy Copilot session state files to logs
+ if: always()
+ continue-on-error: true
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/copy_copilot_session_state.sh"
+ - name: Stop MCP Gateway
+ if: always()
+ continue-on-error: true
+ env:
+ MCP_GATEWAY_PORT: ${{ steps.start-mcp-gateway.outputs.gateway-port }}
+ MCP_GATEWAY_API_KEY: ${{ steps.start-mcp-gateway.outputs.gateway-api-key }}
+ GATEWAY_PID: ${{ steps.start-mcp-gateway.outputs.gateway-pid }}
+ run: |
+ bash "${RUNNER_TEMP}/gh-aw/actions/stop_mcp_gateway.sh" "$GATEWAY_PID"
+ - name: Redact secrets in logs
+ if: always()
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/redact_secrets.cjs');
+ await main();
+ env:
+ GH_AW_SECRET_NAMES: 'COPILOT_GITHUB_TOKEN,GH_AW_GITHUB_MCP_SERVER_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN'
+ SECRET_COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
+ SECRET_GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
+ SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
+ SECRET_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ - name: Append agent step summary
+ if: always()
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/append_agent_step_summary.sh"
+ - name: Copy Safe Outputs
+ if: always()
+ env:
+ GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
+ run: |
+ mkdir -p /tmp/gh-aw
+ cp "$GH_AW_SAFE_OUTPUTS" /tmp/gh-aw/safeoutputs.jsonl 2>/dev/null || true
+ - name: Ingest agent output
+ id: collect_output
+ if: always()
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
+ GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,docs.github.com,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.blog,github.com,github.githubassets.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.googleapis.com"
+ GH_AW_ALLOWED_GITHUB_REFS: ""
+ GITHUB_SERVER_URL: ${{ github.server_url }}
+ GITHUB_API_URL: ${{ github.api_url }}
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/collect_ndjson_output.cjs');
+ await main();
+ - name: Parse agent logs for step summary
+ if: always()
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_AGENT_OUTPUT: /tmp/gh-aw/sandbox/agent/logs/
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_copilot_log.cjs');
+ await main();
+ - name: Parse MCP Gateway logs for step summary
+ if: always()
+ id: parse-mcp-gateway
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_mcp_gateway_log.cjs');
+ await main();
+ - name: Print firewall logs
+ if: always()
+ continue-on-error: true
+ env:
+ AWF_LOGS_DIR: /tmp/gh-aw/sandbox/firewall/logs
+ run: |
+ # Fix permissions on firewall logs/audit dirs so they can be uploaded as artifacts
+ # AWF runs with sudo, creating files owned by root
+ sudo chmod -R a+rX /tmp/gh-aw/sandbox/firewall 2>/dev/null || true
+ # Only run awf logs summary if awf command exists (it may not be installed if workflow failed before install step)
+ if command -v awf &> /dev/null; then
+ awf logs summary | tee -a "$GITHUB_STEP_SUMMARY"
+ else
+ echo 'AWF binary not installed, skipping firewall log summary'
+ fi
+ - name: Parse token usage for step summary
+ if: always()
+ continue-on-error: true
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_token_usage.cjs');
+ await main();
+ - name: Print AWF reflect summary
+ if: always()
+ continue-on-error: true
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/awf_reflect_summary.cjs');
+ await main();
+ - name: Generate observability summary
+ if: always()
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/generate_observability_summary.cjs');
+ await main(core);
+ - name: Write agent output placeholder if missing
+ if: always()
+ run: |
+ if [ ! -f /tmp/gh-aw/agent_output.json ]; then
+ echo '{"items":[]}' > /tmp/gh-aw/agent_output.json
+ fi
+ - name: Finalize workflow portfolio report
+ run: |-
+ set -euo pipefail
+ mkdir -p /tmp/gh-aw
+ python3 scripts/aw_yield_postcompute.py --precompute /tmp/aw-yield-precompute.json --agent-output /tmp/gh-aw --out /tmp/aw-yield-final.json
+
+ - name: Upload agent artifacts
+ if: always()
+ continue-on-error: true
+ uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
+ with:
+ name: agent
+ path: |
+ /tmp/gh-aw/aw-prompts/prompt.txt
+ /tmp/gh-aw/sandbox/agent/logs/
+ /tmp/gh-aw/redacted-urls.log
+ /tmp/gh-aw/mcp-logs/
+ /tmp/gh-aw/agent_usage.json
+ /tmp/gh-aw/agent-stdio.log
+ /tmp/gh-aw/pre-agent-audit.txt
+ /tmp/gh-aw/agent/
+ /tmp/gh-aw/github_rate_limits.jsonl
+ /tmp/gh-aw/otel.jsonl
+ /tmp/gh-aw/copilot-otel.jsonl
+ /tmp/gh-aw/safeoutputs.jsonl
+ /tmp/gh-aw/agent_output.json
+ /tmp/gh-aw/aw-*.patch
+ /tmp/gh-aw/aw-*.bundle
+ /tmp/gh-aw/awf-config.json
+ /tmp/gh-aw/sandbox/firewall/logs/
+ /tmp/gh-aw/sandbox/firewall/audit/
+ /tmp/gh-aw/sandbox/firewall/awf-reflect.json
+ if-no-files-found: ignore
+
+ conclusion:
+ needs:
+ - activation
+ - agent
+ - detection
+ - safe_outputs
+ if: >
+ always() && (needs.agent.result != 'skipped' || needs.activation.outputs.lockdown_check_failed == 'true' ||
+ needs.activation.outputs.stale_lock_file_failed == 'true')
+ runs-on: ubuntu-slim
+ permissions:
+ contents: read
+ issues: write
+ concurrency:
+ group: "gh-aw-conclusion-aw-portfolio-yield"
+ cancel-in-progress: false
+ outputs:
+ incomplete_count: ${{ steps.report_incomplete.outputs.incomplete_count }}
+ noop_message: ${{ steps.noop.outputs.noop_message }}
+ tools_reported: ${{ steps.missing_tool.outputs.tools_reported }}
+ total_count: ${{ steps.missing_tool.outputs.total_count }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ repository: github/gh-aw
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ id: setup
+ uses: ./actions/setup
+ with:
+ destination: ${{ runner.temp }}/gh-aw/actions
+ job-name: ${{ github.job }}
+ trace-id: ${{ needs.activation.outputs.setup-trace-id }}
+ parent-span-id: ${{ needs.activation.outputs.setup-parent-span-id || needs.activation.outputs.setup-span-id }}
+ env:
+ GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
+ GH_AW_INFO_VERSION: "1.0.43"
+ - name: Download agent output artifact
+ id: download-agent-output
+ continue-on-error: true
+ uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
+ with:
+ name: agent
+ path: /tmp/gh-aw/
+ - name: Setup agent output environment variable
+ id: setup-agent-output-env
+ if: steps.download-agent-output.outcome == 'success'
+ run: |
+ mkdir -p /tmp/gh-aw/
+ find "/tmp/gh-aw/" -type f -print
+ echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT"
+ - name: Process no-op messages
+ id: noop
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
+ GH_AW_NOOP_MAX: "1"
+ GH_AW_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+ GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
+ GH_AW_NOOP_REPORT_AS_ISSUE: "true"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/handle_noop_message.cjs');
+ await main();
+ - name: Log detection run
+ id: detection_runs
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
+ GH_AW_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+ GH_AW_DETECTION_CONCLUSION: ${{ needs.detection.outputs.detection_conclusion }}
+ GH_AW_DETECTION_REASON: ${{ needs.detection.outputs.detection_reason }}
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/handle_detection_runs.cjs');
+ await main();
+ - name: Record missing tool
+ id: missing_tool
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
+ GH_AW_MISSING_TOOL_CREATE_ISSUE: "true"
+ GH_AW_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/missing_tool.cjs');
+ await main();
+ - name: Record incomplete
+ id: report_incomplete
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
+ GH_AW_REPORT_INCOMPLETE_CREATE_ISSUE: "true"
+ GH_AW_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/report_incomplete_handler.cjs');
+ await main();
+ - name: Handle agent failure
+ id: handle_agent_failure
+ if: always()
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
+ GH_AW_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+ GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
+ GH_AW_WORKFLOW_ID: "aw-portfolio-yield"
+ GH_AW_ACTION_FAILURE_ISSUE_EXPIRES_HOURS: "12"
+ GH_AW_ENGINE_ID: "copilot"
+ GH_AW_SECRET_VERIFICATION_RESULT: ${{ needs.activation.outputs.secret_verification_result }}
+ GH_AW_CHECKOUT_PR_SUCCESS: ${{ needs.agent.outputs.checkout_pr_success }}
+ GH_AW_EFFECTIVE_TOKENS: ${{ needs.agent.outputs.effective_tokens || '' }}
+ GH_AW_EFFECTIVE_TOKENS_RATE_LIMIT_ERROR: ${{ needs.agent.outputs.effective_tokens_rate_limit_error || 'false' }}
+ GH_AW_INFERENCE_ACCESS_ERROR: ${{ needs.agent.outputs.inference_access_error }}
+ GH_AW_MCP_POLICY_ERROR: ${{ needs.agent.outputs.mcp_policy_error }}
+ GH_AW_AGENTIC_ENGINE_TIMEOUT: ${{ needs.agent.outputs.agentic_engine_timeout }}
+ GH_AW_MODEL_NOT_SUPPORTED_ERROR: ${{ needs.agent.outputs.model_not_supported_error }}
+ GH_AW_ENGINE_API_HOSTS: "api.enterprise.githubcopilot.com,api.githubcopilot.com,api.business.githubcopilot.com,api.individual.githubcopilot.com"
+ GH_AW_LOCKDOWN_CHECK_FAILED: ${{ needs.activation.outputs.lockdown_check_failed }}
+ GH_AW_STALE_LOCK_FILE_FAILED: ${{ needs.activation.outputs.stale_lock_file_failed }}
+ GH_AW_GROUP_REPORTS: "false"
+ GH_AW_FAILURE_REPORT_AS_ISSUE: "true"
+ GH_AW_MISSING_TOOL_REPORT_AS_FAILURE: "true"
+ GH_AW_MISSING_DATA_REPORT_AS_FAILURE: "true"
+ GH_AW_TIMEOUT_MINUTES: "25"
+ GH_AW_MAX_EFFECTIVE_TOKENS: "10000000"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/handle_agent_failure.cjs');
+ await main();
+
+ detection:
+ needs:
+ - activation
+ - agent
+ if: >
+ always() && needs.agent.result != 'skipped' && (needs.agent.outputs.output_types != '' || needs.agent.outputs.has_patch == 'true')
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ outputs:
+ detection_conclusion: ${{ steps.detection_conclusion.outputs.conclusion }}
+ detection_reason: ${{ steps.detection_conclusion.outputs.reason }}
+ detection_success: ${{ steps.detection_conclusion.outputs.success }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ repository: github/gh-aw
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ id: setup
+ uses: ./actions/setup
+ with:
+ destination: ${{ runner.temp }}/gh-aw/actions
+ job-name: ${{ github.job }}
+ trace-id: ${{ needs.activation.outputs.setup-trace-id }}
+ parent-span-id: ${{ needs.activation.outputs.setup-parent-span-id || needs.activation.outputs.setup-span-id }}
+ env:
+ GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
+ GH_AW_INFO_VERSION: "1.0.43"
+ - name: Download agent output artifact
+ id: download-agent-output
+ continue-on-error: true
+ uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
+ with:
+ name: agent
+ path: /tmp/gh-aw/
+ - name: Setup agent output environment variable
+ id: setup-agent-output-env
+ if: steps.download-agent-output.outcome == 'success'
+ run: |
+ mkdir -p /tmp/gh-aw/
+ find "/tmp/gh-aw/" -type f -print
+ echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT"
+ - name: Checkout repository for patch context
+ if: needs.agent.outputs.has_patch == 'true'
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ persist-credentials: false
+ # --- Threat Detection ---
+ - name: Clean stale firewall files from agent artifact
+ run: |
+ rm -rf /tmp/gh-aw/sandbox/firewall/logs
+ rm -rf /tmp/gh-aw/sandbox/firewall/audit
+ - name: Download container images
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh" ghcr.io/github/gh-aw-firewall/agent:0.25.42 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.42 ghcr.io/github/gh-aw-firewall/squid:0.25.42
+ - name: Check if detection needed
+ id: detection_guard
+ if: always()
+ env:
+ OUTPUT_TYPES: ${{ needs.agent.outputs.output_types }}
+ HAS_PATCH: ${{ needs.agent.outputs.has_patch }}
+ run: |
+ if [[ -n "$OUTPUT_TYPES" || "$HAS_PATCH" == "true" ]]; then
+ echo "run_detection=true" >> "$GITHUB_OUTPUT"
+ echo "Detection will run: output_types=$OUTPUT_TYPES, has_patch=$HAS_PATCH"
+ else
+ echo "run_detection=false" >> "$GITHUB_OUTPUT"
+ echo "Detection skipped: no agent outputs or patches to analyze"
+ fi
+ - name: Clear MCP Config for detection
+ if: always() && steps.detection_guard.outputs.run_detection == 'true'
+ run: |
+ rm -f "${RUNNER_TEMP}/gh-aw/mcp-config/mcp-servers.json"
+ rm -f /home/runner/.copilot/mcp-config.json
+ rm -f "$GITHUB_WORKSPACE/.gemini/settings.json"
+ - name: Prepare threat detection files
+ if: always() && steps.detection_guard.outputs.run_detection == 'true'
+ run: |
+ mkdir -p /tmp/gh-aw/threat-detection/aw-prompts
+ cp /tmp/gh-aw/aw-prompts/prompt.txt /tmp/gh-aw/threat-detection/aw-prompts/prompt.txt 2>/dev/null || true
+ cp /tmp/gh-aw/agent_output.json /tmp/gh-aw/threat-detection/agent_output.json 2>/dev/null || true
+ for f in /tmp/gh-aw/aw-*.patch; do
+ [ -f "$f" ] && cp "$f" /tmp/gh-aw/threat-detection/ 2>/dev/null || true
+ done
+ for f in /tmp/gh-aw/aw-*.bundle; do
+ [ -f "$f" ] && cp "$f" /tmp/gh-aw/threat-detection/ 2>/dev/null || true
+ done
+ echo "Prepared threat detection files:"
+ ls -la /tmp/gh-aw/threat-detection/ 2>/dev/null || true
+ - name: Setup threat detection
+ if: always() && steps.detection_guard.outputs.run_detection == 'true'
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ WORKFLOW_DESCRIPTION: "Weekly portfolio analysis of agentic workflows using deterministic scoring, overlap detection, and OTel-backed evidence for governance recommendations"
+ HAS_PATCH: ${{ needs.agent.outputs.has_patch }}
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/setup_threat_detection.cjs');
+ await main();
+ - name: Ensure threat-detection directory and log
+ if: always() && steps.detection_guard.outputs.run_detection == 'true'
+ run: |
+ mkdir -p /tmp/gh-aw/threat-detection
+ touch /tmp/gh-aw/threat-detection/detection.log
+ - name: Setup Node.js
+ uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0
+ with:
+ node-version: '24'
+ package-manager-cache: false
+ - name: Install GitHub Copilot CLI
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/install_copilot_cli.sh" 1.0.43
+ env:
+ GH_HOST: github.com
+ - name: Install AWF binary
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh" v0.25.42
+ - name: Execute GitHub Copilot CLI
+ if: always() && steps.detection_guard.outputs.run_detection == 'true'
+ continue-on-error: true
+ id: detection_agentic_execution
+ # Copilot CLI tool arguments (sorted):
+ timeout-minutes: 20
+ run: |
+ set -o pipefail
+ touch /tmp/gh-aw/agent-step-summary.md
+ GH_AW_NODE_BIN=$(command -v node 2>/dev/null || true)
+ export GH_AW_NODE_BIN
+ (umask 177 && touch /tmp/gh-aw/threat-detection/detection.log)
+ printf '%s\n' '{"$schema":"https://github.com/github/gh-aw-firewall/releases/download/v0.25.42/awf-config.schema.json","network":{"allowDomains":["api.business.githubcopilot.com","api.enterprise.githubcopilot.com","api.github.com","api.githubcopilot.com","api.individual.githubcopilot.com","github.com","host.docker.internal","telemetry.enterprise.githubcopilot.com"]},"apiProxy":{"enabled":true,"maxEffectiveTokens":10000000},"container":{"imageTag":"0.25.42"}}' > "${RUNNER_TEMP}/gh-aw/awf-config.json" && cp "${RUNNER_TEMP}/gh-aw/awf-config.json" /tmp/gh-aw/awf-config.json
+ # shellcheck disable=SC1003
+ sudo -E awf --config "${RUNNER_TEMP}/gh-aw/awf-config.json" --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --env-all --exclude-env COPILOT_GITHUB_TOKEN --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --allow-host-ports 80,443,8080 --skip-pull \
+ -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache /home/runner/work/_tool -maxdepth 5 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && GH_AW_NODE_EXEC="${GH_AW_NODE_BIN:-}"; if [ -z "$GH_AW_NODE_EXEC" ] || [ ! -x "$GH_AW_NODE_EXEC" ]; then GH_AW_NODE_EXEC="$(command -v node 2>/dev/null || true)"; fi; if [ -z "$GH_AW_NODE_EXEC" ]; then echo "node runtime missing on this runner — check runtimes.node in workflow YAML" >&2; exit 127; fi; "$GH_AW_NODE_EXEC" ${RUNNER_TEMP}/gh-aw/actions/copilot_harness.cjs /usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --no-ask-user --allow-all-tools --add-dir "${GITHUB_WORKSPACE}" --prompt-file /tmp/gh-aw/aw-prompts/prompt.txt' 2>&1 | tee -a /tmp/gh-aw/threat-detection/detection.log
+ env:
+ AWF_REFLECT_ENABLED: 1
+ COPILOT_AGENT_RUNNER_TYPE: STANDALONE
+ COPILOT_API_KEY: dummy-byok-key-for-offline-mode
+ COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
+ COPILOT_MODEL: ${{ vars.GH_AW_MODEL_DETECTION_COPILOT || 'claude-sonnet-4.6' }}
+ GH_AW_PHASE: detection
+ GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
+ GH_AW_VERSION: dev
+ GITHUB_API_URL: ${{ github.api_url }}
+ GITHUB_AW: true
+ GITHUB_COPILOT_INTEGRATION_ID: agentic-workflows
+ GITHUB_HEAD_REF: ${{ github.head_ref }}
+ GITHUB_REF_NAME: ${{ github.ref_name }}
+ GITHUB_SERVER_URL: ${{ github.server_url }}
+ GITHUB_STEP_SUMMARY: /tmp/gh-aw/agent-step-summary.md
+ GITHUB_WORKSPACE: ${{ github.workspace }}
+ GIT_AUTHOR_EMAIL: github-actions[bot]@users.noreply.github.com
+ GIT_AUTHOR_NAME: github-actions[bot]
+ GIT_COMMITTER_EMAIL: github-actions[bot]@users.noreply.github.com
+ GIT_COMMITTER_NAME: github-actions[bot]
+ XDG_CONFIG_HOME: /home/runner
+ - name: Upload threat detection log
+ if: always() && steps.detection_guard.outputs.run_detection == 'true'
+ uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
+ with:
+ name: detection
+ path: /tmp/gh-aw/threat-detection/detection.log
+ if-no-files-found: ignore
+ - name: Parse and conclude threat detection
+ id: detection_conclusion
+ if: always()
+ continue-on-error: true
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ RUN_DETECTION: ${{ steps.detection_guard.outputs.run_detection }}
+ DETECTION_AGENTIC_EXECUTION_OUTCOME: ${{ steps.detection_agentic_execution.outcome }}
+ GH_AW_DETECTION_CONTINUE_ON_ERROR: "true"
+ with:
+ script: |
+ try {
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/parse_threat_detection_results.cjs');
+ await main();
+ } catch (loadErr) {
+ const continueOnError = process.env.GH_AW_DETECTION_CONTINUE_ON_ERROR !== 'false';
+ const detectionExecutionFailed = process.env.DETECTION_AGENTIC_EXECUTION_OUTCOME === 'failure';
+ const msg = 'ERR_SYSTEM: \u274C Unexpected error loading threat detection module: ' + (loadErr && loadErr.message ? loadErr.message : String(loadErr));
+ core.error(msg);
+ core.setOutput('reason', 'parse_error');
+ if (continueOnError && !detectionExecutionFailed) {
+ core.warning('\u26A0\uFE0F ' + msg);
+ core.setOutput('conclusion', 'warning');
+ core.setOutput('success', 'false');
+ } else {
+ core.setOutput('conclusion', 'failure');
+ core.setOutput('success', 'false');
+ core.setFailed(msg);
+ }
+ }
+
+ safe_outputs:
+ needs:
+ - activation
+ - agent
+ - detection
+ if: (!cancelled()) && needs.agent.result != 'skipped' && needs.detection.result == 'success'
+ runs-on: ubuntu-slim
+ permissions:
+ contents: read
+ issues: write
+ timeout-minutes: 15
+ env:
+ GH_AW_CALLER_WORKFLOW_ID: "${{ github.repository }}/aw-portfolio-yield"
+ GH_AW_DETECTION_CONCLUSION: ${{ needs.detection.outputs.detection_conclusion }}
+ GH_AW_DETECTION_REASON: ${{ needs.detection.outputs.detection_reason }}
+ GH_AW_EFFECTIVE_TOKENS: ${{ needs.agent.outputs.effective_tokens }}
+ GH_AW_ENGINE_ID: "copilot"
+ GH_AW_ENGINE_MODEL: ${{ needs.agent.outputs.model }}
+ GH_AW_ENGINE_VERSION: "1.0.43"
+ GH_AW_WORKFLOW_ID: "aw-portfolio-yield"
+ GH_AW_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ outputs:
+ code_push_failure_count: ${{ steps.process_safe_outputs.outputs.code_push_failure_count }}
+ code_push_failure_errors: ${{ steps.process_safe_outputs.outputs.code_push_failure_errors }}
+ create_discussion_error_count: ${{ steps.process_safe_outputs.outputs.create_discussion_error_count }}
+ create_discussion_errors: ${{ steps.process_safe_outputs.outputs.create_discussion_errors }}
+ created_issue_number: ${{ steps.process_safe_outputs.outputs.created_issue_number }}
+ created_issue_url: ${{ steps.process_safe_outputs.outputs.created_issue_url }}
+ process_safe_outputs_processed_count: ${{ steps.process_safe_outputs.outputs.processed_count }}
+ process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }}
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ repository: github/gh-aw
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+ - name: Setup Scripts
+ id: setup
+ uses: ./actions/setup
+ with:
+ destination: ${{ runner.temp }}/gh-aw/actions
+ job-name: ${{ github.job }}
+ trace-id: ${{ needs.activation.outputs.setup-trace-id }}
+ parent-span-id: ${{ needs.activation.outputs.setup-parent-span-id || needs.activation.outputs.setup-span-id }}
+ env:
+ GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
+ GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
+ GH_AW_INFO_VERSION: "1.0.43"
+ - name: Mask OTLP telemetry headers
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/mask_otlp_headers.sh"
+ - name: Download agent output artifact
+ id: download-agent-output
+ continue-on-error: true
+ uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
+ with:
+ name: agent
+ path: /tmp/gh-aw/
+ - name: Setup agent output environment variable
+ id: setup-agent-output-env
+ if: steps.download-agent-output.outcome == 'success'
+ run: |
+ mkdir -p /tmp/gh-aw/
+ find "/tmp/gh-aw/" -type f -print
+ echo "GH_AW_AGENT_OUTPUT=/tmp/gh-aw/agent_output.json" >> "$GITHUB_OUTPUT"
+ - name: Configure GH_HOST for enterprise compatibility
+ id: ghes-host-config
+ shell: bash
+ run: |
+ # Derive GH_HOST from GITHUB_SERVER_URL so the gh CLI targets the correct
+ # GitHub instance (GHES/GHEC). On github.com this is a harmless no-op.
+ GH_HOST="${GITHUB_SERVER_URL#https://}"
+ GH_HOST="${GH_HOST#http://}"
+ echo "GH_HOST=${GH_HOST}" >> "$GITHUB_ENV"
+ - name: Process Safe Outputs
+ id: process_safe_outputs
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_AGENT_OUTPUT: ${{ steps.setup-agent-output-env.outputs.GH_AW_AGENT_OUTPUT }}
+ GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,docs.github.com,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.blog,github.com,github.githubassets.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.googleapis.com"
+ GITHUB_SERVER_URL: ${{ github.server_url }}
+ GITHUB_API_URL: ${{ github.api_url }}
+ GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG: "{\"create_issue\":{\"close_older_issues\":true,\"expires\":720,\"labels\":[\"automation\",\"report\",\"observability\"],\"max\":1},\"create_report_incomplete_issue\":{},\"missing_data\":{},\"missing_tool\":{},\"noop\":{\"max\":1,\"report-as-issue\":\"true\"},\"report_incomplete\":{}}"
+ with:
+ github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/safe_output_handler_manager.cjs');
+ await main();
+ - name: Upload Safe Outputs Items
+ if: always()
+ uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
+ with:
+ name: safe-outputs-items
+ path: |
+ /tmp/gh-aw/safe-output-items.jsonl
+ /tmp/gh-aw/temporary-id-map.json
+ if-no-files-found: ignore
+
diff --git a/.github/workflows/aw-portfolio-yield.md b/.github/workflows/aw-portfolio-yield.md
index f5f59879d38..307fd1e46d6 100644
--- a/.github/workflows/aw-portfolio-yield.md
+++ b/.github/workflows/aw-portfolio-yield.md
@@ -9,7 +9,7 @@ permissions:
actions: read
issues: read
pull-requests: read
-engine: claude
+engine: copilot
strict: true
timeout-minutes: 25
network:
@@ -22,7 +22,6 @@ tools:
safe-outputs:
mentions: false
allowed-github-references: []
- max-bot-mentions: 0
create-issue:
labels: [automation, report, observability]
max: 1
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
index 37c09c2396b..ed72abdb2fb 100644
--- a/scripts/aw_yield_precompute.py
+++ b/scripts/aw_yield_precompute.py
@@ -226,6 +226,16 @@ def split_key_value(text: str) -> tuple[str, str | None]:
raise InputError(f"Invalid frontmatter line: {text}")
+def maybe_split_mapping(text: str) -> tuple[str, str | None] | None:
+ try:
+ key, rest = split_key_value(text)
+ except InputError:
+ return None
+ if not re.fullmatch(r"[A-Za-z0-9_-]+", key):
+ return None
+ return key, rest
+
+
def _next_significant(lines: list[str], start: int) -> int:
index = start
while index < len(lines):
@@ -236,6 +246,26 @@ def _next_significant(lines: list[str], start: int) -> int:
return index
+def parse_block_scalar(lines: list[str], start: int, indent: int) -> tuple[str, int]:
+ chunks: list[str] = []
+ index = start
+ while index < len(lines):
+ raw = lines[index]
+ stripped = raw.strip()
+ current_indent = len(raw) - len(raw.lstrip(" "))
+ if stripped and current_indent < indent:
+ break
+ if stripped == "":
+ chunks.append("")
+ index += 1
+ continue
+ if current_indent < indent:
+ break
+ chunks.append(raw[indent:])
+ index += 1
+ return "\n".join(chunks).rstrip(), index
+
+
def parse_yaml_block(lines: list[str], start: int = 0, indent: int = 0) -> tuple[Any, int]:
start = _next_significant(lines, start)
if start >= len(lines):
@@ -244,6 +274,7 @@ def parse_yaml_block(lines: list[str], start: int = 0, indent: int = 0) -> tuple
current_indent = len(line) - len(line.lstrip(" "))
if current_indent < indent:
return {}, start
+ indent = current_indent
is_list = line.lstrip().startswith("- ")
if is_list:
items: list[Any] = []
@@ -265,10 +296,14 @@ def parse_yaml_block(lines: list[str], start: int = 0, indent: int = 0) -> tuple
child, index = parse_yaml_block(lines, index, indent + 2)
items.append(child)
continue
- if ":" in payload:
- key, rest = split_key_value(payload)
+ mapping = maybe_split_mapping(payload)
+ if mapping is not None:
+ key, rest = mapping
item: dict[str, Any] = {}
- if rest is None:
+ if rest in {"|", ">", "|-", ">-"}:
+ child, index = parse_block_scalar(lines, index, indent + 4)
+ item[key] = child
+ elif rest is None:
child, index = parse_yaml_block(lines, index, indent + 2)
item[key] = child
else:
@@ -287,7 +322,10 @@ def parse_yaml_block(lines: list[str], start: int = 0, indent: int = 0) -> tuple
break
extra_key, extra_rest = split_key_value(next_raw.strip())
index = lookahead + 1
- if extra_rest is None:
+ if extra_rest in {"|", ">", "|-", ">-"}:
+ child, index = parse_block_scalar(lines, index, indent + 4)
+ item[extra_key] = child
+ elif extra_rest is None:
child, index = parse_yaml_block(lines, index, indent + 4)
item[extra_key] = child
else:
@@ -307,13 +345,16 @@ def parse_yaml_block(lines: list[str], start: int = 0, indent: int = 0) -> tuple
if current_indent < indent:
break
if current_indent > indent:
- raise InputError(f"Unexpected indentation in frontmatter: {raw}")
+ break
stripped = raw.strip()
if stripped.startswith("- "):
break
key, rest = split_key_value(stripped)
index += 1
- if rest is None:
+ if rest in {"|", ">", "|-", ">-"}:
+ child, index = parse_block_scalar(lines, index, indent + 2)
+ mapping[key] = child
+ elif rest is None:
if index < len(lines) and _next_significant(lines, index) < len(lines):
child, index = parse_yaml_block(lines, index, indent + 2)
mapping[key] = child
@@ -1098,14 +1139,7 @@ def precompute(workflows_root: Path, otel_summary_path: str | None = None) -> di
overlap_peers[right][left] = similarity
for workflow in workflows:
workflow["overlap_drag"] = round_score(overlap_by_path.get(workflow["path"], 0.0))
- workflow["maintenance_drag"] = score_maintenance(
- {"imports": [], "tools": {}},
- "",
- overlap_hint=workflow["overlap_drag"],
- agentic_fraction=workflow["agentic_fraction"],
- has_precompute=workflow["pre_agent_steps_count"] > 0,
- has_postcompute=workflow["post_steps_count"] > 0,
- )
+ workflow["maintenance_drag"] = round_score(workflow["maintenance_drag"] + workflow["overlap_drag"] * 0.2)
workflow["yield"] = compute_workflow_yield(
workflow["usefulness"],
workflow["adoption"],
From 3240c27cc404db4d2f97f56119ebab079bc782e7 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:27:15 +0000
Subject: [PATCH 03/13] Fix AWY permission risk scoring for id-token
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_precompute.py | 17 ++++++++---------
tests/test_aw_yield_precompute.py | 6 ++++++
2 files changed, 14 insertions(+), 9 deletions(-)
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
index ed72abdb2fb..f75240d9700 100644
--- a/scripts/aw_yield_precompute.py
+++ b/scripts/aw_yield_precompute.py
@@ -486,18 +486,17 @@ def permissions_risk(permissions: Any) -> float:
read_scopes = 0
elevated = 0
id_token = 0
- for level in permissions.values():
- normalized = normalize_text(level).lower()
- if normalized == "read":
+ for scope, level in permissions.items():
+ normalized_scope = normalize_text(scope).lower()
+ normalized_level = normalize_text(level).lower()
+ if normalized_scope == "id-token" and normalized_level in RISKY_PERMISSION_LEVELS:
+ id_token += 1
+ if normalized_level == "read":
read_scopes += 1
- elif normalized in RISKY_PERMISSION_LEVELS:
- elevated += 1
- elif normalized == "write":
+ elif normalized_level in RISKY_PERMISSION_LEVELS:
elevated += 1
- elif normalized == "none":
+ elif normalized_level == "none":
continue
- elif normalized == "id-token":
- id_token += 1
breadth = clamp(read_scopes / 6.0)
return round_score(0.2 + breadth * 0.35 + elevated * 0.45 + id_token * 0.1)
diff --git a/tests/test_aw_yield_precompute.py b/tests/test_aw_yield_precompute.py
index 47af1eb6568..b547fa154d3 100644
--- a/tests/test_aw_yield_precompute.py
+++ b/tests/test_aw_yield_precompute.py
@@ -107,6 +107,12 @@ def test_missing_timeout_increases_risk(tmp_path: Path) -> None:
assert pre.build_workflow_record(untimed, workflows, {})["risk"] > pre.build_workflow_record(timed, workflows, {})["risk"]
+def test_id_token_permission_increases_risk() -> None:
+ base = pre.permissions_risk({"contents": "write"})
+ with_id_token = pre.permissions_risk({"contents": "write", "id-token": "write"})
+ assert with_id_token > base
+
+
def test_overlap_detection_finds_similar_workflows() -> None:
workflows = [
{"path": "a.md", "intent_text": "review pull request code quality security review", "agentic_fraction": 0.4},
From 0fceb5eca14aabfa1c7717d7ceba4d925d33b1ce Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:46:19 +0000
Subject: [PATCH 04/13] Guard AWY overlap drag against invalid scores
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_postcompute.py | 3 ++-
tests/test_aw_yield_postcompute.py | 13 +++++++++++++
2 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/scripts/aw_yield_postcompute.py b/scripts/aw_yield_postcompute.py
index c6a8c05e332..5e8110e913c 100644
--- a/scripts/aw_yield_postcompute.py
+++ b/scripts/aw_yield_postcompute.py
@@ -133,7 +133,8 @@ def recompute_overlap_drag(payload: dict[str, Any]) -> float:
for pair in pairs:
if not isinstance(pair, dict):
continue
- drag += float(pair.get("score", 0.0)) ** 2 * 2.0
+ score = pre.clamp(pair.get("score", 0.0))
+ drag += score**2 * 2.0
return round(drag, 4)
diff --git a/tests/test_aw_yield_postcompute.py b/tests/test_aw_yield_postcompute.py
index 1ef9ed2678e..222d9534e77 100644
--- a/tests/test_aw_yield_postcompute.py
+++ b/tests/test_aw_yield_postcompute.py
@@ -168,3 +168,16 @@ def test_postcompute_does_not_allow_invented_telemetry_to_increase_confidence(tm
final_payload, _summary, notes = post.finalize(sample_precompute(), agent_dir)
assert final_payload["evidence_quality"] == "low"
assert any("invented telemetry" in note.lower() for note in notes)
+
+
+def test_recompute_overlap_drag_ignores_invalid_scores() -> None:
+ payload = {
+ "overlap_pairs": [
+ {"score": 0.5},
+ {"score": "bad"},
+ {"score": float("nan")},
+ {"score": float("inf")},
+ {"score": -1},
+ ]
+ }
+ assert post.recompute_overlap_drag(payload) == 0.5
From 939ac97158a95f2653c16ad841f9fdda47574a5b Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:47:36 +0000
Subject: [PATCH 05/13] Refine AWY overlap drag regression coverage
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
tests/test_aw_yield_postcompute.py | 17 ++++++-----------
1 file changed, 6 insertions(+), 11 deletions(-)
diff --git a/tests/test_aw_yield_postcompute.py b/tests/test_aw_yield_postcompute.py
index 222d9534e77..093af5eed09 100644
--- a/tests/test_aw_yield_postcompute.py
+++ b/tests/test_aw_yield_postcompute.py
@@ -170,14 +170,9 @@ def test_postcompute_does_not_allow_invented_telemetry_to_increase_confidence(tm
assert any("invented telemetry" in note.lower() for note in notes)
-def test_recompute_overlap_drag_ignores_invalid_scores() -> None:
- payload = {
- "overlap_pairs": [
- {"score": 0.5},
- {"score": "bad"},
- {"score": float("nan")},
- {"score": float("inf")},
- {"score": -1},
- ]
- }
- assert post.recompute_overlap_drag(payload) == 0.5
+def test_recompute_overlap_drag_clamps_invalid_scores() -> None:
+ assert post.recompute_overlap_drag({"overlap_pairs": [{"score": "bad"}]}) == 0.0
+ assert post.recompute_overlap_drag({"overlap_pairs": [{"score": float("nan")}]}) == 0.0
+ assert post.recompute_overlap_drag({"overlap_pairs": [{"score": float("inf")}]}) == 0.0
+ assert post.recompute_overlap_drag({"overlap_pairs": [{"score": -1}]}) == 0.0
+ assert post.recompute_overlap_drag({"overlap_pairs": [{"score": 0.5}]}) == 0.5
From 4829ec10dc5675d31f17cd78d745632373e13f07 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:54:20 +0000
Subject: [PATCH 06/13] Restrict AWY workflow import paths
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_precompute.py | 30 ++++++++++++++++++++++++++----
tests/test_aw_yield_precompute.py | 31 +++++++++++++++++++++++++++++++
2 files changed, 57 insertions(+), 4 deletions(-)
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
index f75240d9700..57fe1512b09 100644
--- a/scripts/aw_yield_precompute.py
+++ b/scripts/aw_yield_precompute.py
@@ -397,7 +397,25 @@ def as_list(value: Any) -> list[Any]:
return [value]
+def _get_workflows_root(workflow_path: Path) -> Path | None:
+ resolved = workflow_path.resolve()
+ for candidate in (resolved.parent, *resolved.parents):
+ if candidate.name == "workflows" and candidate.parent.name == ".github":
+ return candidate
+ return None
+
+
+def _path_is_within(path: Path, root: Path) -> bool:
+ try:
+ path.relative_to(root)
+ return True
+ except ValueError:
+ return False
+
+
def normalize_import_paths(workflow_path: Path, frontmatter: dict[str, Any]) -> list[Path]:
+ workflows_root = _get_workflows_root(workflow_path)
+ shared_root = workflows_root / "shared" if workflows_root else None
imports = []
for item in as_list(frontmatter.get("imports")):
raw: str | None = None
@@ -410,12 +428,16 @@ def normalize_import_paths(workflow_path: Path, frontmatter: dict[str, Any]) ->
pass
else:
continue
+ if not workflows_root or raw.startswith("/"):
+ continue
if raw.startswith("shared/"):
- imports.append(workflow_path.parent / raw)
+ import_path = (workflows_root / raw).resolve()
+ if shared_root and _path_is_within(import_path, shared_root):
+ imports.append(import_path)
elif raw.startswith("./") or raw.startswith("../"):
- imports.append((workflow_path.parent / raw).resolve())
- elif raw.startswith("/"):
- imports.append(Path(raw))
+ import_path = (workflow_path.parent / raw).resolve()
+ if _path_is_within(import_path, workflows_root):
+ imports.append(import_path)
return imports
diff --git a/tests/test_aw_yield_precompute.py b/tests/test_aw_yield_precompute.py
index b547fa154d3..e354253f0c0 100644
--- a/tests/test_aw_yield_precompute.py
+++ b/tests/test_aw_yield_precompute.py
@@ -56,6 +56,37 @@ def test_imported_observability_is_detected(tmp_path: Path) -> None:
assert pre.has_imported_observability(workflow, frontmatter) is True
+def test_imports_outside_workflows_root_are_rejected(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ escaped = tmp_path / "outside.md"
+ write_workflow(
+ escaped,
+ "---\nobservability:\n otlp:\n endpoint:\n url: https://example.invalid\n---\n",
+ )
+ workflow = workflows / "alpha.md"
+ write_workflow(
+ workflow,
+ f"---\nimports:\n - ../outside.md\n - {escaped}\n---\n# Alpha\n",
+ )
+ frontmatter, _ = pre.read_workflow(workflow)
+ assert pre.normalize_import_paths(workflow, frontmatter) == []
+ assert pre.has_imported_observability(workflow, frontmatter) is False
+
+
+def test_shared_imports_must_stay_under_shared_directory(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ escaped = workflows / "outside.md"
+ write_workflow(
+ escaped,
+ "---\nobservability:\n otlp:\n endpoint:\n url: https://example.invalid\n---\n",
+ )
+ workflow = workflows / "alpha.md"
+ write_workflow(workflow, "---\nimports:\n - shared/../outside.md\n---\n# Alpha\n")
+ frontmatter, _ = pre.read_workflow(workflow)
+ assert pre.normalize_import_paths(workflow, frontmatter) == []
+ assert pre.has_imported_observability(workflow, frontmatter) is False
+
+
def test_missing_safe_outputs_increases_risk(tmp_path: Path) -> None:
workflows = tmp_path / ".github" / "workflows"
base = "---\non:\n workflow_dispatch:\npermissions:\n contents: read\nstrict: true\ntimeout-minutes: 10\n---\n# Alpha\n"
From ae49d996e4cae0391a1bf67d3e5fcac9ba4651b3 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:55:39 +0000
Subject: [PATCH 07/13] Clarify AWY import path guards
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_precompute.py | 6 ++++--
tests/test_aw_yield_precompute.py | 17 ++++++++++++++---
2 files changed, 18 insertions(+), 5 deletions(-)
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
index 57fe1512b09..9fa6164ab94 100644
--- a/scripts/aw_yield_precompute.py
+++ b/scripts/aw_yield_precompute.py
@@ -428,11 +428,13 @@ def normalize_import_paths(workflow_path: Path, frontmatter: dict[str, Any]) ->
pass
else:
continue
- if not workflows_root or raw.startswith("/"):
+ if not workflows_root:
+ continue
+ if raw.startswith("/"):
continue
if raw.startswith("shared/"):
import_path = (workflows_root / raw).resolve()
- if shared_root and _path_is_within(import_path, shared_root):
+ if _path_is_within(import_path, shared_root):
imports.append(import_path)
elif raw.startswith("./") or raw.startswith("../"):
import_path = (workflow_path.parent / raw).resolve()
diff --git a/tests/test_aw_yield_precompute.py b/tests/test_aw_yield_precompute.py
index e354253f0c0..e988d10f12b 100644
--- a/tests/test_aw_yield_precompute.py
+++ b/tests/test_aw_yield_precompute.py
@@ -56,7 +56,7 @@ def test_imported_observability_is_detected(tmp_path: Path) -> None:
assert pre.has_imported_observability(workflow, frontmatter) is True
-def test_imports_outside_workflows_root_are_rejected(tmp_path: Path) -> None:
+def test_relative_imports_outside_workflows_root_are_rejected(tmp_path: Path) -> None:
workflows = tmp_path / ".github" / "workflows"
escaped = tmp_path / "outside.md"
write_workflow(
@@ -64,10 +64,21 @@ def test_imports_outside_workflows_root_are_rejected(tmp_path: Path) -> None:
"---\nobservability:\n otlp:\n endpoint:\n url: https://example.invalid\n---\n",
)
workflow = workflows / "alpha.md"
+ write_workflow(workflow, "---\nimports:\n - ../outside.md\n---\n# Alpha\n")
+ frontmatter, _ = pre.read_workflow(workflow)
+ assert pre.normalize_import_paths(workflow, frontmatter) == []
+ assert pre.has_imported_observability(workflow, frontmatter) is False
+
+
+def test_absolute_imports_are_rejected(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ escaped = tmp_path / "outside.md"
write_workflow(
- workflow,
- f"---\nimports:\n - ../outside.md\n - {escaped}\n---\n# Alpha\n",
+ escaped,
+ "---\nobservability:\n otlp:\n endpoint:\n url: https://example.invalid\n---\n",
)
+ workflow = workflows / "alpha.md"
+ write_workflow(workflow, f"---\nimports:\n - {escaped}\n---\n# Alpha\n")
frontmatter, _ = pre.read_workflow(workflow)
assert pre.normalize_import_paths(workflow, frontmatter) == []
assert pre.has_imported_observability(workflow, frontmatter) is False
From 53b661f648ea2e666c5f7a044e6e9694510e447e Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:56:31 +0000
Subject: [PATCH 08/13] Inline AWY shared import root guard
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_precompute.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
index 9fa6164ab94..fbf8abb92a7 100644
--- a/scripts/aw_yield_precompute.py
+++ b/scripts/aw_yield_precompute.py
@@ -415,7 +415,6 @@ def _path_is_within(path: Path, root: Path) -> bool:
def normalize_import_paths(workflow_path: Path, frontmatter: dict[str, Any]) -> list[Path]:
workflows_root = _get_workflows_root(workflow_path)
- shared_root = workflows_root / "shared" if workflows_root else None
imports = []
for item in as_list(frontmatter.get("imports")):
raw: str | None = None
@@ -434,7 +433,7 @@ def normalize_import_paths(workflow_path: Path, frontmatter: dict[str, Any]) ->
continue
if raw.startswith("shared/"):
import_path = (workflows_root / raw).resolve()
- if _path_is_within(import_path, shared_root):
+ if _path_is_within(import_path, workflows_root / "shared"):
imports.append(import_path)
elif raw.startswith("./") or raw.startswith("../"):
import_path = (workflow_path.parent / raw).resolve()
From 1f8b397dfdcbbbbf07c70f6efeb73b37faa4ba5d Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:57:22 +0000
Subject: [PATCH 09/13] Canonicalize AWY import containment checks
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_precompute.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
index fbf8abb92a7..795a9ce3e23 100644
--- a/scripts/aw_yield_precompute.py
+++ b/scripts/aw_yield_precompute.py
@@ -407,7 +407,7 @@ def _get_workflows_root(workflow_path: Path) -> Path | None:
def _path_is_within(path: Path, root: Path) -> bool:
try:
- path.relative_to(root)
+ path.resolve().relative_to(root.resolve())
return True
except ValueError:
return False
From cfa2359293ee54bc6978014769438e43abb14461 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:58:24 +0000
Subject: [PATCH 10/13] Harden AWY absolute import detection
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_precompute.py | 10 +++++++---
tests/test_aw_yield_precompute.py | 12 ++++++++++--
2 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
index 795a9ce3e23..cd1f2e64fd3 100644
--- a/scripts/aw_yield_precompute.py
+++ b/scripts/aw_yield_precompute.py
@@ -10,7 +10,7 @@
import re
import sys
from collections import Counter, defaultdict
-from pathlib import Path
+from pathlib import Path, PureWindowsPath
from typing import Any
LAMBDA = 0.25
@@ -413,6 +413,10 @@ def _path_is_within(path: Path, root: Path) -> bool:
return False
+def _is_absolute_import_path(raw: str) -> bool:
+ return Path(raw).is_absolute() or PureWindowsPath(raw).is_absolute()
+
+
def normalize_import_paths(workflow_path: Path, frontmatter: dict[str, Any]) -> list[Path]:
workflows_root = _get_workflows_root(workflow_path)
imports = []
@@ -423,13 +427,13 @@ def normalize_import_paths(workflow_path: Path, frontmatter: dict[str, Any]) ->
elif isinstance(item, dict):
raw = normalize_text(item.get("uses")) or normalize_text(item.get("path"))
if not raw or "@" in raw or "/" in raw and not raw.startswith("shared/") and not raw.startswith("."):
- if raw and raw.startswith(("shared/", "./", "../", "/")):
+ if raw and (raw.startswith(("shared/", "./", "../", "/")) or _is_absolute_import_path(raw)):
pass
else:
continue
if not workflows_root:
continue
- if raw.startswith("/"):
+ if _is_absolute_import_path(raw):
continue
if raw.startswith("shared/"):
import_path = (workflows_root / raw).resolve()
diff --git a/tests/test_aw_yield_precompute.py b/tests/test_aw_yield_precompute.py
index e988d10f12b..3a5cd69ae81 100644
--- a/tests/test_aw_yield_precompute.py
+++ b/tests/test_aw_yield_precompute.py
@@ -56,7 +56,7 @@ def test_imported_observability_is_detected(tmp_path: Path) -> None:
assert pre.has_imported_observability(workflow, frontmatter) is True
-def test_relative_imports_outside_workflows_root_are_rejected(tmp_path: Path) -> None:
+def test_relative_import_escapes_are_rejected(tmp_path: Path) -> None:
workflows = tmp_path / ".github" / "workflows"
escaped = tmp_path / "outside.md"
write_workflow(
@@ -84,7 +84,15 @@ def test_absolute_imports_are_rejected(tmp_path: Path) -> None:
assert pre.has_imported_observability(workflow, frontmatter) is False
-def test_shared_imports_must_stay_under_shared_directory(tmp_path: Path) -> None:
+def test_windows_absolute_imports_are_rejected(tmp_path: Path) -> None:
+ workflows = tmp_path / ".github" / "workflows"
+ workflow = workflows / "alpha.md"
+ write_workflow(workflow, "---\nimports:\n - C:\\outside.md\n---\n# Alpha\n")
+ frontmatter, _ = pre.read_workflow(workflow)
+ assert pre.normalize_import_paths(workflow, frontmatter) == []
+
+
+def test_shared_import_escapes_are_rejected(tmp_path: Path) -> None:
workflows = tmp_path / ".github" / "workflows"
escaped = workflows / "outside.md"
write_workflow(
From 7bbf72f444a4288a5164c2c1d5e36c1f59dd4726 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Sun, 10 May 2026 19:59:24 +0000
Subject: [PATCH 11/13] Tighten AWY workflow root discovery
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_precompute.py | 2 ++
tests/test_aw_yield_precompute.py | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/scripts/aw_yield_precompute.py b/scripts/aw_yield_precompute.py
index cd1f2e64fd3..9055f795873 100644
--- a/scripts/aw_yield_precompute.py
+++ b/scripts/aw_yield_precompute.py
@@ -400,6 +400,8 @@ def as_list(value: Any) -> list[Any]:
def _get_workflows_root(workflow_path: Path) -> Path | None:
resolved = workflow_path.resolve()
for candidate in (resolved.parent, *resolved.parents):
+ if candidate.parent == candidate:
+ break
if candidate.name == "workflows" and candidate.parent.name == ".github":
return candidate
return None
diff --git a/tests/test_aw_yield_precompute.py b/tests/test_aw_yield_precompute.py
index 3a5cd69ae81..d71512b2d48 100644
--- a/tests/test_aw_yield_precompute.py
+++ b/tests/test_aw_yield_precompute.py
@@ -87,7 +87,7 @@ def test_absolute_imports_are_rejected(tmp_path: Path) -> None:
def test_windows_absolute_imports_are_rejected(tmp_path: Path) -> None:
workflows = tmp_path / ".github" / "workflows"
workflow = workflows / "alpha.md"
- write_workflow(workflow, "---\nimports:\n - C:\\outside.md\n---\n# Alpha\n")
+ write_workflow(workflow, "---\nimports:\n - \\\\server\\share\\outside.md\n---\n# Alpha\n")
frontmatter, _ = pre.read_workflow(workflow)
assert pre.normalize_import_paths(workflow, frontmatter) == []
From 28d7313dad5599851e327e9e7c22b34f68c09d7b Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 11 May 2026 06:27:41 +0000
Subject: [PATCH 12/13] Use github-script for AWY generated steps
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
.github/workflows/aw-portfolio-yield.lock.yml | 204 +++++++-----------
.github/workflows/aw-portfolio-yield.md | 40 +++-
scripts/aw_yield_postcompute.cjs | 44 ++++
scripts/aw_yield_precompute.cjs | 35 +++
4 files changed, 189 insertions(+), 134 deletions(-)
create mode 100644 scripts/aw_yield_postcompute.cjs
create mode 100644 scripts/aw_yield_precompute.cjs
diff --git a/.github/workflows/aw-portfolio-yield.lock.yml b/.github/workflows/aw-portfolio-yield.lock.yml
index b8800f5fe3b..b57d1f96c4f 100644
--- a/.github/workflows/aw-portfolio-yield.lock.yml
+++ b/.github/workflows/aw-portfolio-yield.lock.yml
@@ -1,5 +1,5 @@
-# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"d36a8beb35fc7079f029faf93be7fc0e20f0546e3735b7676083df09be7bcb7b","strict":true,"agent_id":"copilot"}
-# gh-aw-manifest: {"version":1,"secrets":["COPILOT_GITHUB_TOKEN","GH_AW_GITHUB_MCP_SERVER_TOKEN","GH_AW_GITHUB_TOKEN","GITHUB_TOKEN","OTLP_ENDPOINT","OTLP_TOKEN"],"actions":[{"repo":"actions/checkout","sha":"de0fac2e4500dabe0009e67214ff5f5447ce83dd","version":"v6.0.2"},{"repo":"actions/download-artifact","sha":"3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c","version":"v8.0.1"},{"repo":"actions/github-script","sha":"3a2844b7e9c422d3c10d287c895573f7108da1b3","version":"v9.0.0"},{"repo":"actions/setup-node","sha":"48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e","version":"v6.4.0"},{"repo":"actions/upload-artifact","sha":"043fb46d1a93c77aae656e7c1c64a875d1fc6a0a","version":"v7.0.1"}],"containers":[{"image":"ghcr.io/github/gh-aw-firewall/agent:0.25.42"},{"image":"ghcr.io/github/gh-aw-firewall/api-proxy:0.25.42"},{"image":"ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.42"},{"image":"ghcr.io/github/gh-aw-firewall/squid:0.25.42"},{"image":"ghcr.io/github/gh-aw-mcpg:v0.3.6","digest":"sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c","pinned_image":"ghcr.io/github/gh-aw-mcpg:v0.3.6@sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c"},{"image":"ghcr.io/github/github-mcp-server:v1.0.3","digest":"sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959","pinned_image":"ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959"},{"image":"node:lts-alpine","digest":"sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f","pinned_image":"node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f"}]}
+# gh-aw-metadata: {"schema_version":"v3","frontmatter_hash":"69b046be850ac611c1b77355ed8ff147f41f198b7f2298740383327b156850ab","compiler_version":"v0.71.5","strict":true,"agent_id":"copilot"}
+# gh-aw-manifest: {"version":1,"secrets":["COPILOT_GITHUB_TOKEN","GH_AW_GITHUB_MCP_SERVER_TOKEN","GH_AW_GITHUB_TOKEN","GITHUB_TOKEN","OTLP_ENDPOINT","OTLP_TOKEN"],"actions":[{"repo":"actions/checkout","sha":"de0fac2e4500dabe0009e67214ff5f5447ce83dd","version":"v6.0.2"},{"repo":"actions/download-artifact","sha":"3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c","version":"v8.0.1"},{"repo":"actions/github-script","sha":"3a2844b7e9c422d3c10d287c895573f7108da1b3","version":"v9"},{"repo":"actions/setup-node","sha":"48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e","version":"v6.4.0"},{"repo":"actions/upload-artifact","sha":"043fb46d1a93c77aae656e7c1c64a875d1fc6a0a","version":"v7.0.1"},{"repo":"github/gh-aw-actions/setup","sha":"v0.71.5","version":"v0.71.5"}],"containers":[{"image":"ghcr.io/github/gh-aw-firewall/agent:0.25.40","digest":"sha256:14ff567e8d9d4c2fbc5e55c973488381c71d7e0fdbe72d30ee7b8a738fd86504","pinned_image":"ghcr.io/github/gh-aw-firewall/agent:0.25.40@sha256:14ff567e8d9d4c2fbc5e55c973488381c71d7e0fdbe72d30ee7b8a738fd86504"},{"image":"ghcr.io/github/gh-aw-firewall/api-proxy:0.25.40","digest":"sha256:2883ca3e5ae9f330cafdd9345bfd4ae17fc8da36c96d4c9a1f76e922b4c45280","pinned_image":"ghcr.io/github/gh-aw-firewall/api-proxy:0.25.40@sha256:2883ca3e5ae9f330cafdd9345bfd4ae17fc8da36c96d4c9a1f76e922b4c45280"},{"image":"ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.40","digest":"sha256:3e7152911d4b4b7b97beef9d3d7d924ff7902227e86001ef3838fb728d5d514c","pinned_image":"ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.40@sha256:3e7152911d4b4b7b97beef9d3d7d924ff7902227e86001ef3838fb728d5d514c"},{"image":"ghcr.io/github/gh-aw-firewall/squid:0.25.40","digest":"sha256:b084f4a2c771f584ee68084ced52fa6b3245197a1889645d817462d307d3ac51","pinned_image":"ghcr.io/github/gh-aw-firewall/squid:0.25.40@sha256:b084f4a2c771f584ee68084ced52fa6b3245197a1889645d817462d307d3ac51"},{"image":"ghcr.io/github/gh-aw-mcpg:v0.3.6","digest":"sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c","pinned_image":"ghcr.io/github/gh-aw-mcpg:v0.3.6@sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c"},{"image":"ghcr.io/github/github-mcp-server:v1.0.3","digest":"sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959","pinned_image":"ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959"},{"image":"node:lts-alpine","digest":"sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f","pinned_image":"node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f"}]}
# ___ _ _
# / _ \ | | (_)
# | |_| | __ _ ___ _ __ | |_ _ ___
@@ -14,7 +14,7 @@
# \ /\ / (_) | | | | ( | | | | (_) \ V V /\__ \
# \/ \/ \___/|_| |_|\_\|_| |_|\___/ \_/\_/ |___/
#
-# This file was automatically generated by gh-aw. DO NOT EDIT.
+# This file was automatically generated by gh-aw (v0.71.5). DO NOT EDIT.
#
# To update this file, edit the corresponding .md file and run:
# gh aw compile
@@ -43,16 +43,17 @@
# Custom actions used:
# - actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
# - actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1
+# - actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9
# - actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
-# - actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 (source v9)
# - actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0
# - actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
+# - github/gh-aw-actions/setup@v0.71.5
#
# Container images used:
-# - ghcr.io/github/gh-aw-firewall/agent:0.25.42
-# - ghcr.io/github/gh-aw-firewall/api-proxy:0.25.42
-# - ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.42
-# - ghcr.io/github/gh-aw-firewall/squid:0.25.42
+# - ghcr.io/github/gh-aw-firewall/agent:0.25.40@sha256:14ff567e8d9d4c2fbc5e55c973488381c71d7e0fdbe72d30ee7b8a738fd86504
+# - ghcr.io/github/gh-aw-firewall/api-proxy:0.25.40@sha256:2883ca3e5ae9f330cafdd9345bfd4ae17fc8da36c96d4c9a1f76e922b4c45280
+# - ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.40@sha256:3e7152911d4b4b7b97beef9d3d7d924ff7902227e86001ef3838fb728d5d514c
+# - ghcr.io/github/gh-aw-firewall/squid:0.25.40@sha256:b084f4a2c771f584ee68084ced52fa6b3245197a1889645d817462d307d3ac51
# - ghcr.io/github/gh-aw-mcpg:v0.3.6@sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c
# - ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959
# - node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f
@@ -60,7 +61,7 @@
name: "Agentic Workflow Portfolio Yield"
"on":
schedule:
- - cron: "40 8 * * 1"
+ - cron: "7 9 * * 1"
# Friendly format: weekly on monday around 09:00 (scattered)
workflow_dispatch:
inputs:
@@ -82,7 +83,6 @@ env:
OTEL_BACKEND_URL: ${{ secrets.OTLP_ENDPOINT }}
OTEL_EXPORTER_OTLP_ENDPOINT: ${{ secrets.OTLP_ENDPOINT }}
OTEL_SERVICE_NAME: gh-aw
- COPILOT_OTEL_FILE_EXPORTER_PATH: /tmp/gh-aw/copilot-otel.jsonl
OTEL_EXPORTER_OTLP_HEADERS: Authorization=${{ secrets.OTLP_TOKEN }}
GH_AW_OTLP_ENDPOINTS: '[{"url":"${{ secrets.OTLP_ENDPOINT }}","headers":"Authorization=${{ secrets.OTLP_TOKEN }}"}]'
@@ -99,28 +99,19 @@ jobs:
lockdown_check_failed: ${{ steps.generate_aw_info.outputs.lockdown_check_failed == 'true' }}
model: ${{ steps.generate_aw_info.outputs.model }}
secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }}
- setup-parent-span-id: ${{ steps.setup.outputs.parent-span-id || steps.setup.outputs.span-id }}
- setup-span-id: ${{ steps.setup.outputs.span-id }}
setup-trace-id: ${{ steps.setup.outputs.trace-id }}
stale_lock_file_failed: ${{ steps.check-lock-file.outputs.stale_lock_file_failed == 'true' }}
steps:
- - name: Checkout actions folder
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- with:
- repository: github/gh-aw
- sparse-checkout: |
- actions
- persist-credentials: false
- name: Setup Scripts
id: setup
- uses: ./actions/setup
+ uses: github/gh-aw-actions/setup@v0.71.5
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
- GH_AW_INFO_VERSION: "1.0.43"
+ GH_AW_INFO_VERSION: "1.0.40"
- name: Mask OTLP telemetry headers
run: bash "${RUNNER_TEMP}/gh-aw/actions/mask_otlp_headers.sh"
- name: Generate agentic run info
@@ -129,15 +120,16 @@ jobs:
GH_AW_INFO_ENGINE_ID: "copilot"
GH_AW_INFO_ENGINE_NAME: "GitHub Copilot CLI"
GH_AW_INFO_MODEL: ${{ vars.GH_AW_MODEL_AGENT_COPILOT || 'claude-sonnet-4.6' }}
- GH_AW_INFO_VERSION: "1.0.43"
- GH_AW_INFO_AGENT_VERSION: "1.0.43"
+ GH_AW_INFO_VERSION: "1.0.40"
+ GH_AW_INFO_AGENT_VERSION: "1.0.40"
+ GH_AW_INFO_CLI_VERSION: "v0.71.5"
GH_AW_INFO_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
GH_AW_INFO_EXPERIMENTAL: "false"
GH_AW_INFO_SUPPORTS_TOOLS_ALLOWLIST: "true"
GH_AW_INFO_STAGED: "false"
GH_AW_INFO_ALLOWED_DOMAINS: '["defaults","github"]'
GH_AW_INFO_FIREWALL_ENABLED: "true"
- GH_AW_INFO_AWF_VERSION: "v0.25.42"
+ GH_AW_INFO_AWF_VERSION: "v0.25.40"
GH_AW_INFO_AWMG_VERSION: ""
GH_AW_INFO_FIREWALL_TYPE: "squid"
GH_AW_COMPILED_STRICT: "true"
@@ -160,7 +152,6 @@ jobs:
sparse-checkout: |
.github
.agents
- actions/setup
.claude
.codex
.crush
@@ -187,6 +178,16 @@ jobs:
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/check_workflow_timestamp_api.cjs');
await main();
+ - name: Check compile-agentic version
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
+ env:
+ GH_AW_COMPILED_VERSION: "v0.71.5"
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/check_version_updates.cjs');
+ await main();
- name: Create prompt with built-in context
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
@@ -203,20 +204,20 @@ jobs:
run: |
bash "${RUNNER_TEMP}/gh-aw/actions/create_prompt_first.sh"
{
- cat << 'GH_AW_PROMPT_4387b81dbd2ba9bc_EOF'
+ cat << 'GH_AW_PROMPT_82735420d32ed5a0_EOF'
- GH_AW_PROMPT_4387b81dbd2ba9bc_EOF
+ GH_AW_PROMPT_82735420d32ed5a0_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/xpia.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/temp_folder_prompt.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/markdown.md"
cat "${RUNNER_TEMP}/gh-aw/prompts/safe_outputs_prompt.md"
- cat << 'GH_AW_PROMPT_4387b81dbd2ba9bc_EOF'
+ cat << 'GH_AW_PROMPT_82735420d32ed5a0_EOF'
Tools: create_issue, missing_tool, missing_data, noop
- GH_AW_PROMPT_4387b81dbd2ba9bc_EOF
+ GH_AW_PROMPT_82735420d32ed5a0_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/mcp_cli_tools_prompt.md"
- cat << 'GH_AW_PROMPT_4387b81dbd2ba9bc_EOF'
+ cat << 'GH_AW_PROMPT_82735420d32ed5a0_EOF'
The following GitHub context information is available for this workflow:
{{#if __GH_AW_GITHUB_ACTOR__ }}
@@ -245,13 +246,13 @@ jobs:
{{/if}}
- GH_AW_PROMPT_4387b81dbd2ba9bc_EOF
+ GH_AW_PROMPT_82735420d32ed5a0_EOF
cat "${RUNNER_TEMP}/gh-aw/prompts/cli_proxy_with_safeoutputs_prompt.md"
- cat << 'GH_AW_PROMPT_4387b81dbd2ba9bc_EOF'
+ cat << 'GH_AW_PROMPT_82735420d32ed5a0_EOF'
{{#runtime-import .github/workflows/shared/otel-observability.md}}
{{#runtime-import .github/workflows/aw-portfolio-yield.md}}
- GH_AW_PROMPT_4387b81dbd2ba9bc_EOF
+ GH_AW_PROMPT_82735420d32ed5a0_EOF
} > "$GH_AW_PROMPT"
- name: Interpolate variables and render templates
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
@@ -318,11 +319,8 @@ jobs:
path: |
/tmp/gh-aw/aw_info.json
/tmp/gh-aw/aw-prompts/prompt.txt
- /tmp/gh-aw/aw-prompts/prompt-template.txt
- /tmp/gh-aw/aw-prompts/prompt-import-tree.json
/tmp/gh-aw/github_rate_limits.jsonl
/tmp/gh-aw/base
- /tmp/gh-aw/.github/agents
if-no-files-found: ignore
retention-days: 1
@@ -347,7 +345,6 @@ jobs:
agentic_engine_timeout: ${{ steps.detect-copilot-errors.outputs.agentic_engine_timeout || 'false' }}
checkout_pr_success: ${{ steps.checkout-pr.outputs.checkout_pr_success || 'true' }}
effective_tokens: ${{ steps.parse-mcp-gateway.outputs.effective_tokens }}
- effective_tokens_rate_limit_error: ${{ steps.parse-mcp-gateway.outputs.effective_tokens_rate_limit_error || 'false' }}
has_patch: ${{ steps.collect_output.outputs.has_patch }}
inference_access_error: ${{ steps.detect-copilot-errors.outputs.inference_access_error || 'false' }}
mcp_policy_error: ${{ steps.detect-copilot-errors.outputs.mcp_policy_error || 'false' }}
@@ -355,29 +352,19 @@ jobs:
model_not_supported_error: ${{ steps.detect-copilot-errors.outputs.model_not_supported_error || 'false' }}
output: ${{ steps.collect_output.outputs.output }}
output_types: ${{ steps.collect_output.outputs.output_types }}
- setup-parent-span-id: ${{ steps.setup.outputs.parent-span-id || steps.setup.outputs.span-id }}
- setup-span-id: ${{ steps.setup.outputs.span-id }}
setup-trace-id: ${{ steps.setup.outputs.trace-id }}
steps:
- - name: Checkout actions folder
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- with:
- repository: github/gh-aw
- sparse-checkout: |
- actions
- persist-credentials: false
- name: Setup Scripts
id: setup
- uses: ./actions/setup
+ uses: github/gh-aw-actions/setup@v0.71.5
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.activation.outputs.setup-trace-id }}
- parent-span-id: ${{ needs.activation.outputs.setup-parent-span-id || needs.activation.outputs.setup-span-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
- GH_AW_INFO_VERSION: "1.0.43"
+ GH_AW_INFO_VERSION: "1.0.40"
- name: Set runtime paths
id: set-runtime-paths
run: |
@@ -431,14 +418,14 @@ jobs:
const { main } = require('${{ runner.temp }}/gh-aw/actions/checkout_pr_branch.cjs');
await main();
- name: Install GitHub Copilot CLI
- run: bash "${RUNNER_TEMP}/gh-aw/actions/install_copilot_cli.sh" 1.0.43
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/install_copilot_cli.sh" 1.0.40
env:
GH_HOST: github.com
- name: Install AWF binary
- run: bash "${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh" v0.25.42
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh" v0.25.40
- name: Determine automatic lockdown mode for GitHub MCP Server
id: determine-automatic-lockdown
- uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 (source v9)
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9
env:
GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
@@ -457,27 +444,25 @@ jobs:
GH_AW_AGENT_FOLDERS: ".agents .claude .codex .crush .gemini .github .opencode .pi"
GH_AW_AGENT_FILES: ".crush.json AGENTS.md CLAUDE.md GEMINI.md PI.md opencode.jsonc"
run: bash "${RUNNER_TEMP}/gh-aw/actions/restore_base_github_folders.sh"
- - name: Restore inline sub-agents from activation artifact
- env:
- GH_AW_SUB_AGENT_DIR: ".github/agents"
- GH_AW_SUB_AGENT_EXT: ".agent.md"
- run: bash "${RUNNER_TEMP}/gh-aw/actions/restore_inline_sub_agents.sh"
- - name: Precompute workflow portfolio data
- run: |-
- set -euo pipefail
- mkdir -p /tmp/gh-aw
- python3 scripts/aw_yield_precompute.py --workflows ".github/workflows" --out /tmp/aw-yield-precompute.json
+ - env:
+ AW_YIELD_OUT: /tmp/aw-yield-precompute.json
+ AW_YIELD_WORKFLOWS: .github/workflows
+ AW_YIELD_WORKSPACE: ${{ github.workspace }}
+ name: Precompute workflow portfolio data
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9
+ with:
+ script: "const path = require(\"path\");\nconst { runPrecompute } = require(path.join(process.env.AW_YIELD_WORKSPACE, \"scripts/aw_yield_precompute.cjs\"));\nawait runPrecompute({\n workspace: process.env.AW_YIELD_WORKSPACE,\n workflows: process.env.AW_YIELD_WORKFLOWS,\n out: process.env.AW_YIELD_OUT,\n});\n"
- name: Download container images
- run: bash "${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh" ghcr.io/github/gh-aw-firewall/agent:0.25.42 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.42 ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.42 ghcr.io/github/gh-aw-firewall/squid:0.25.42 ghcr.io/github/gh-aw-mcpg:v0.3.6@sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959 node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh" ghcr.io/github/gh-aw-firewall/agent:0.25.40@sha256:14ff567e8d9d4c2fbc5e55c973488381c71d7e0fdbe72d30ee7b8a738fd86504 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.40@sha256:2883ca3e5ae9f330cafdd9345bfd4ae17fc8da36c96d4c9a1f76e922b4c45280 ghcr.io/github/gh-aw-firewall/cli-proxy:0.25.40@sha256:3e7152911d4b4b7b97beef9d3d7d924ff7902227e86001ef3838fb728d5d514c ghcr.io/github/gh-aw-firewall/squid:0.25.40@sha256:b084f4a2c771f584ee68084ced52fa6b3245197a1889645d817462d307d3ac51 ghcr.io/github/gh-aw-mcpg:v0.3.6@sha256:2bb8eef86006a4c5963c55616a9c51c32f27bfdecb023b8aa6f91f6718d9171c ghcr.io/github/github-mcp-server:v1.0.3@sha256:2ac27ef03461ef2b877031b838a7d1fd7f12b12d4ace7796d8cad91446d55959 node:lts-alpine@sha256:d1b3b4da11eefd5941e7f0b9cf17783fc99d9c6fc34884a665f40a06dbdfc94f
- name: Generate Safe Outputs Config
run: |
mkdir -p "${RUNNER_TEMP}/gh-aw/safeoutputs"
mkdir -p /tmp/gh-aw/safeoutputs
mkdir -p /tmp/gh-aw/mcp-logs/safeoutputs
- cat > "${RUNNER_TEMP}/gh-aw/safeoutputs/config.json" << 'GH_AW_SAFE_OUTPUTS_CONFIG_193b35d124023bca_EOF'
+ cat > "${RUNNER_TEMP}/gh-aw/safeoutputs/config.json" << 'GH_AW_SAFE_OUTPUTS_CONFIG_b885e1ed5d87eaff_EOF'
{"create_issue":{"close_older_issues":true,"expires":720,"labels":["automation","report","observability"],"max":1},"create_report_incomplete_issue":{},"mentions":{"enabled":false},"missing_data":{},"missing_tool":{},"noop":{"max":1,"report-as-issue":"true"},"report_incomplete":{}}
- GH_AW_SAFE_OUTPUTS_CONFIG_193b35d124023bca_EOF
+ GH_AW_SAFE_OUTPUTS_CONFIG_b885e1ed5d87eaff_EOF
- name: Generate Safe Outputs Tools
env:
GH_AW_TOOLS_META_JSON: |
@@ -499,9 +484,6 @@ jobs:
"sanitize": true,
"maxLength": 65000
},
- "fields": {
- "type": "array"
- },
"labels": {
"type": "array",
"itemType": "string",
@@ -679,7 +661,7 @@ jobs:
mkdir -p /home/runner/.copilot
GH_AW_NODE=$(which node 2>/dev/null || command -v node 2>/dev/null || echo node)
- cat << GH_AW_MCP_CONFIG_15bccfb2d4a49cee_EOF | "$GH_AW_NODE" "${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.cjs"
+ cat << GH_AW_MCP_CONFIG_c77e9289cadb457a_EOF | "$GH_AW_NODE" "${RUNNER_TEMP}/gh-aw/actions/start_mcp_gateway.cjs"
{
"mcpServers": {
"otel": {
@@ -732,7 +714,7 @@ jobs:
}
}
}
- GH_AW_MCP_CONFIG_15bccfb2d4a49cee_EOF
+ GH_AW_MCP_CONFIG_c77e9289cadb457a_EOF
- name: Mount MCP servers as CLIs
id: mount-mcp-clis
continue-on-error: true
@@ -772,12 +754,11 @@ jobs:
GH_AW_NODE_BIN=$(command -v node 2>/dev/null || true)
export GH_AW_NODE_BIN
(umask 177 && touch /tmp/gh-aw/agent-stdio.log)
- printf '%s\n' '{"$schema":"https://github.com/github/gh-aw-firewall/releases/download/v0.25.42/awf-config.schema.json","network":{"allowDomains":["*.githubusercontent.com","api.business.githubcopilot.com","api.enterprise.githubcopilot.com","api.github.com","api.githubcopilot.com","api.individual.githubcopilot.com","api.snapcraft.io","archive.ubuntu.com","azure.archive.ubuntu.com","codeload.github.com","crl.geotrust.com","crl.globalsign.com","crl.identrust.com","crl.sectigo.com","crl.thawte.com","crl.usertrust.com","crl.verisign.com","crl3.digicert.com","crl4.digicert.com","crls.ssl.com","docs.github.com","github-cloud.githubusercontent.com","github-cloud.s3.amazonaws.com","github.blog","github.com","github.githubassets.com","host.docker.internal","json-schema.org","json.schemastore.org","keyserver.ubuntu.com","lfs.github.com","objects.githubusercontent.com","ocsp.digicert.com","ocsp.geotrust.com","ocsp.globalsign.com","ocsp.identrust.com","ocsp.sectigo.com","ocsp.ssl.com","ocsp.thawte.com","ocsp.usertrust.com","ocsp.verisign.com","packagecloud.io","packages.cloud.google.com","packages.microsoft.com","ppa.launchpad.net","raw.githubusercontent.com","registry.npmjs.org","s.symcb.com","s.symcd.com","security.ubuntu.com","telemetry.enterprise.githubcopilot.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","www.googleapis.com"]},"apiProxy":{"enabled":true,"maxEffectiveTokens":10000000,"models":{"auto":["large"],"deep-research":["copilot/deep-research*","copilot/o3-deep-research*","copilot/o4-mini-deep-research*","google/deep-research*","gemini/deep-research*","openai/o3-deep-research*","openai/o4-mini-deep-research*"],"gemini-flash":["copilot/gemini-*flash*","google/gemini-*flash*","gemini/gemini-*flash*"],"gemini-flash-lite":["copilot/gemini-*flash*lite*","google/gemini-*flash*lite*","gemini/gemini-*flash*lite*"],"gemini-pro":["copilot/gemini-*pro*","google/gemini-*pro*","gemini/gemini-*pro*"],"gpt-4.1":["copilot/gpt-4.1*","openai/gpt-4.1*"],"gpt-5":["copilot/gpt-5*","openai/gpt-5*"],"gpt-5-codex":["copilot/gpt-5*codex*","openai/gpt-5*codex*"],"gpt-5-mini":["copilot/gpt-5*mini*","openai/gpt-5*mini*"],"gpt-5-nano":["copilot/gpt-5*nano*","openai/gpt-5*nano*"],"gpt-5-pro":["copilot/gpt-5*pro*","openai/gpt-5*pro*"],"haiku":["copilot/*haiku*","anthropic/*haiku*"],"large":["sonnet","gpt-5-pro","gpt-5","gemini-pro"],"mini":["haiku","gpt-5-mini","gpt-5-nano","gemini-flash-lite"],"opus":["copilot/*opus*","anthropic/*opus*"],"reasoning":["copilot/o1*","copilot/o3*","copilot/o4*","openai/o1*","openai/o3*","openai/o4*"],"small":["mini"],"sonnet":["copilot/*sonnet*","anthropic/*sonnet*"]}},"container":{"imageTag":"0.25.42"}}' > "${RUNNER_TEMP}/gh-aw/awf-config.json" && cp "${RUNNER_TEMP}/gh-aw/awf-config.json" /tmp/gh-aw/awf-config.json
+ printf '%s\n' '{"$schema":"https://github.com/github/gh-aw-firewall/releases/download/v0.25.40/awf-config.schema.json","network":{"allowDomains":["*.githubusercontent.com","api.business.githubcopilot.com","api.enterprise.githubcopilot.com","api.github.com","api.githubcopilot.com","api.individual.githubcopilot.com","api.snapcraft.io","archive.ubuntu.com","azure.archive.ubuntu.com","codeload.github.com","crl.geotrust.com","crl.globalsign.com","crl.identrust.com","crl.sectigo.com","crl.thawte.com","crl.usertrust.com","crl.verisign.com","crl3.digicert.com","crl4.digicert.com","crls.ssl.com","docs.github.com","github-cloud.githubusercontent.com","github-cloud.s3.amazonaws.com","github.blog","github.com","github.githubassets.com","host.docker.internal","json-schema.org","json.schemastore.org","keyserver.ubuntu.com","lfs.github.com","objects.githubusercontent.com","ocsp.digicert.com","ocsp.geotrust.com","ocsp.globalsign.com","ocsp.identrust.com","ocsp.sectigo.com","ocsp.ssl.com","ocsp.thawte.com","ocsp.usertrust.com","ocsp.verisign.com","packagecloud.io","packages.cloud.google.com","packages.microsoft.com","ppa.launchpad.net","raw.githubusercontent.com","registry.npmjs.org","s.symcb.com","s.symcd.com","security.ubuntu.com","telemetry.enterprise.githubcopilot.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","www.googleapis.com"]},"apiProxy":{"enabled":true,"models":{"auto":["large"],"deep-research":["copilot/deep-research*","google/deep-research*"],"gemini-flash":["copilot/gemini-*flash*","google/gemini-*flash*"],"gemini-pro":["copilot/gemini-*pro*","google/gemini-*pro*"],"gpt-4.1":["copilot/gpt-4.1*","openai/gpt-4.1*"],"gpt-5":["copilot/gpt-5*","openai/gpt-5*"],"gpt-5-codex":["copilot/gpt-5*codex*","openai/gpt-5*codex*"],"gpt-5-mini":["copilot/gpt-5*mini*","openai/gpt-5*mini*"],"gpt-5-nano":["copilot/gpt-5*nano*","openai/gpt-5*nano*"],"gpt-5-pro":["copilot/gpt-5*pro*","openai/gpt-5*pro*"],"haiku":["copilot/*haiku*","anthropic/*haiku*"],"large":["sonnet","gpt-5-pro","gpt-5","gemini-pro"],"mini":["haiku","gpt-5-mini","gpt-5-nano","gemini-flash"],"opus":["copilot/*opus*","anthropic/*opus*"],"reasoning":["copilot/o1*","copilot/o3*","copilot/o4*","openai/o1*","openai/o3*","openai/o4*"],"small":["mini"],"sonnet":["copilot/*sonnet*","anthropic/*sonnet*"]}},"container":{"imageTag":"0.25.40,squid=sha256:b084f4a2c771f584ee68084ced52fa6b3245197a1889645d817462d307d3ac51,agent=sha256:14ff567e8d9d4c2fbc5e55c973488381c71d7e0fdbe72d30ee7b8a738fd86504,api-proxy=sha256:2883ca3e5ae9f330cafdd9345bfd4ae17fc8da36c96d4c9a1f76e922b4c45280,cli-proxy=sha256:3e7152911d4b4b7b97beef9d3d7d924ff7902227e86001ef3838fb728d5d514c"}}' > "${RUNNER_TEMP}/gh-aw/awf-config.json" && cp "${RUNNER_TEMP}/gh-aw/awf-config.json" /tmp/gh-aw/awf-config.json
# shellcheck disable=SC1003
sudo -E awf --config "${RUNNER_TEMP}/gh-aw/awf-config.json" --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --env-all --exclude-env COPILOT_GITHUB_TOKEN --exclude-env GH_TOKEN --exclude-env GITHUB_MCP_SERVER_TOKEN --exclude-env MCP_GATEWAY_API_KEY --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --allow-host-ports 80,443,8080 --skip-pull --difc-proxy-host host.docker.internal:18443 --difc-proxy-ca-cert /tmp/gh-aw/difc-proxy-tls/ca.crt \
- -- /bin/bash -c 'export PATH="${RUNNER_TEMP}/gh-aw/mcp-cli/bin:$PATH" && export PATH="$(find /opt/hostedtoolcache /home/runner/work/_tool -maxdepth 5 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && GH_AW_NODE_EXEC="${GH_AW_NODE_BIN:-}"; if [ -z "$GH_AW_NODE_EXEC" ] || [ ! -x "$GH_AW_NODE_EXEC" ]; then GH_AW_NODE_EXEC="$(command -v node 2>/dev/null || true)"; fi; if [ -z "$GH_AW_NODE_EXEC" ]; then echo "node runtime missing on this runner — check runtimes.node in workflow YAML" >&2; exit 127; fi; "$GH_AW_NODE_EXEC" ${RUNNER_TEMP}/gh-aw/actions/copilot_harness.cjs /usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --no-ask-user --allow-all-tools --allow-all-paths --add-dir "${GITHUB_WORKSPACE}" --prompt-file /tmp/gh-aw/aw-prompts/prompt.txt' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
+ -- /bin/bash -c 'export PATH="${RUNNER_TEMP}/gh-aw/mcp-cli/bin:$PATH" && export PATH="$(find /opt/hostedtoolcache /home/runner/work/_tool -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && GH_AW_NODE_EXEC="${GH_AW_NODE_BIN:-}"; if [ -z "$GH_AW_NODE_EXEC" ] || [ ! -x "$GH_AW_NODE_EXEC" ]; then GH_AW_NODE_EXEC="$(command -v node 2>/dev/null || echo node)"; fi; "$GH_AW_NODE_EXEC" ${RUNNER_TEMP}/gh-aw/actions/copilot_harness.cjs /usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --no-ask-user --allow-all-tools --allow-all-paths --add-dir "${GITHUB_WORKSPACE}" --prompt-file /tmp/gh-aw/aw-prompts/prompt.txt' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
env:
- AWF_REFLECT_ENABLED: 1
COPILOT_AGENT_RUNNER_TYPE: STANDALONE
COPILOT_API_KEY: dummy-byok-key-for-offline-mode
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
@@ -786,7 +767,7 @@ jobs:
GH_AW_PHASE: agent
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
GH_AW_SAFE_OUTPUTS: ${{ steps.set-runtime-paths.outputs.GH_AW_SAFE_OUTPUTS }}
- GH_AW_VERSION: dev
+ GH_AW_VERSION: v0.71.5
GH_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN || github.token }}
GITHUB_API_URL: ${{ github.api_url }}
GITHUB_AW: true
@@ -907,7 +888,7 @@ jobs:
run: |
# Fix permissions on firewall logs/audit dirs so they can be uploaded as artifacts
# AWF runs with sudo, creating files owned by root
- sudo chmod -R a+rX /tmp/gh-aw/sandbox/firewall 2>/dev/null || true
+ sudo chmod -R a+r /tmp/gh-aw/sandbox/firewall 2>/dev/null || true
# Only run awf logs summary if awf command exists (it may not be installed if workflow failed before install step)
if command -v awf &> /dev/null; then
awf logs summary | tee -a "$GITHUB_STEP_SUMMARY"
@@ -949,11 +930,15 @@ jobs:
if [ ! -f /tmp/gh-aw/agent_output.json ]; then
echo '{"items":[]}' > /tmp/gh-aw/agent_output.json
fi
- - name: Finalize workflow portfolio report
- run: |-
- set -euo pipefail
- mkdir -p /tmp/gh-aw
- python3 scripts/aw_yield_postcompute.py --precompute /tmp/aw-yield-precompute.json --agent-output /tmp/gh-aw --out /tmp/aw-yield-final.json
+ - env:
+ AW_YIELD_AGENT_OUTPUT: /tmp/gh-aw
+ AW_YIELD_OUT: /tmp/aw-yield-final.json
+ AW_YIELD_PRECOMPUTE: /tmp/aw-yield-precompute.json
+ AW_YIELD_WORKSPACE: ${{ github.workspace }}
+ name: Finalize workflow portfolio report
+ uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9
+ with:
+ script: "const path = require(\"path\");\nconst { runPostcompute } = require(path.join(process.env.AW_YIELD_WORKSPACE, \"scripts/aw_yield_postcompute.cjs\"));\nawait runPostcompute({\n workspace: process.env.AW_YIELD_WORKSPACE,\n precompute: process.env.AW_YIELD_PRECOMPUTE,\n agentOutput: process.env.AW_YIELD_AGENT_OUTPUT,\n out: process.env.AW_YIELD_OUT,\n});\n"
- name: Upload agent artifacts
if: always()
@@ -972,7 +957,6 @@ jobs:
/tmp/gh-aw/agent/
/tmp/gh-aw/github_rate_limits.jsonl
/tmp/gh-aw/otel.jsonl
- /tmp/gh-aw/copilot-otel.jsonl
/tmp/gh-aw/safeoutputs.jsonl
/tmp/gh-aw/agent_output.json
/tmp/gh-aw/aw-*.patch
@@ -1005,25 +989,17 @@ jobs:
tools_reported: ${{ steps.missing_tool.outputs.tools_reported }}
total_count: ${{ steps.missing_tool.outputs.total_count }}
steps:
- - name: Checkout actions folder
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- with:
- repository: github/gh-aw
- sparse-checkout: |
- actions
- persist-credentials: false
- name: Setup Scripts
id: setup
- uses: ./actions/setup
+ uses: github/gh-aw-actions/setup@v0.71.5
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.activation.outputs.setup-trace-id }}
- parent-span-id: ${{ needs.activation.outputs.setup-parent-span-id || needs.activation.outputs.setup-span-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
- GH_AW_INFO_VERSION: "1.0.43"
+ GH_AW_INFO_VERSION: "1.0.40"
- name: Download agent output artifact
id: download-agent-output
continue-on-error: true
@@ -1113,8 +1089,6 @@ jobs:
GH_AW_ENGINE_ID: "copilot"
GH_AW_SECRET_VERIFICATION_RESULT: ${{ needs.activation.outputs.secret_verification_result }}
GH_AW_CHECKOUT_PR_SUCCESS: ${{ needs.agent.outputs.checkout_pr_success }}
- GH_AW_EFFECTIVE_TOKENS: ${{ needs.agent.outputs.effective_tokens || '' }}
- GH_AW_EFFECTIVE_TOKENS_RATE_LIMIT_ERROR: ${{ needs.agent.outputs.effective_tokens_rate_limit_error || 'false' }}
GH_AW_INFERENCE_ACCESS_ERROR: ${{ needs.agent.outputs.inference_access_error }}
GH_AW_MCP_POLICY_ERROR: ${{ needs.agent.outputs.mcp_policy_error }}
GH_AW_AGENTIC_ENGINE_TIMEOUT: ${{ needs.agent.outputs.agentic_engine_timeout }}
@@ -1127,7 +1101,6 @@ jobs:
GH_AW_MISSING_TOOL_REPORT_AS_FAILURE: "true"
GH_AW_MISSING_DATA_REPORT_AS_FAILURE: "true"
GH_AW_TIMEOUT_MINUTES: "25"
- GH_AW_MAX_EFFECTIVE_TOKENS: "10000000"
with:
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
script: |
@@ -1150,25 +1123,17 @@ jobs:
detection_reason: ${{ steps.detection_conclusion.outputs.reason }}
detection_success: ${{ steps.detection_conclusion.outputs.success }}
steps:
- - name: Checkout actions folder
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- with:
- repository: github/gh-aw
- sparse-checkout: |
- actions
- persist-credentials: false
- name: Setup Scripts
id: setup
- uses: ./actions/setup
+ uses: github/gh-aw-actions/setup@v0.71.5
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.activation.outputs.setup-trace-id }}
- parent-span-id: ${{ needs.activation.outputs.setup-parent-span-id || needs.activation.outputs.setup-span-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
- GH_AW_INFO_VERSION: "1.0.43"
+ GH_AW_INFO_VERSION: "1.0.40"
- name: Download agent output artifact
id: download-agent-output
continue-on-error: true
@@ -1194,7 +1159,7 @@ jobs:
rm -rf /tmp/gh-aw/sandbox/firewall/logs
rm -rf /tmp/gh-aw/sandbox/firewall/audit
- name: Download container images
- run: bash "${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh" ghcr.io/github/gh-aw-firewall/agent:0.25.42 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.42 ghcr.io/github/gh-aw-firewall/squid:0.25.42
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/download_docker_images.sh" ghcr.io/github/gh-aw-firewall/agent:0.25.40@sha256:14ff567e8d9d4c2fbc5e55c973488381c71d7e0fdbe72d30ee7b8a738fd86504 ghcr.io/github/gh-aw-firewall/api-proxy:0.25.40@sha256:2883ca3e5ae9f330cafdd9345bfd4ae17fc8da36c96d4c9a1f76e922b4c45280 ghcr.io/github/gh-aw-firewall/squid:0.25.40@sha256:b084f4a2c771f584ee68084ced52fa6b3245197a1889645d817462d307d3ac51
- name: Check if detection needed
id: detection_guard
if: always()
@@ -1253,11 +1218,11 @@ jobs:
node-version: '24'
package-manager-cache: false
- name: Install GitHub Copilot CLI
- run: bash "${RUNNER_TEMP}/gh-aw/actions/install_copilot_cli.sh" 1.0.43
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/install_copilot_cli.sh" 1.0.40
env:
GH_HOST: github.com
- name: Install AWF binary
- run: bash "${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh" v0.25.42
+ run: bash "${RUNNER_TEMP}/gh-aw/actions/install_awf_binary.sh" v0.25.40
- name: Execute GitHub Copilot CLI
if: always() && steps.detection_guard.outputs.run_detection == 'true'
continue-on-error: true
@@ -1270,19 +1235,18 @@ jobs:
GH_AW_NODE_BIN=$(command -v node 2>/dev/null || true)
export GH_AW_NODE_BIN
(umask 177 && touch /tmp/gh-aw/threat-detection/detection.log)
- printf '%s\n' '{"$schema":"https://github.com/github/gh-aw-firewall/releases/download/v0.25.42/awf-config.schema.json","network":{"allowDomains":["api.business.githubcopilot.com","api.enterprise.githubcopilot.com","api.github.com","api.githubcopilot.com","api.individual.githubcopilot.com","github.com","host.docker.internal","telemetry.enterprise.githubcopilot.com"]},"apiProxy":{"enabled":true,"maxEffectiveTokens":10000000},"container":{"imageTag":"0.25.42"}}' > "${RUNNER_TEMP}/gh-aw/awf-config.json" && cp "${RUNNER_TEMP}/gh-aw/awf-config.json" /tmp/gh-aw/awf-config.json
+ printf '%s\n' '{"$schema":"https://github.com/github/gh-aw-firewall/releases/download/v0.25.40/awf-config.schema.json","network":{"allowDomains":["api.business.githubcopilot.com","api.enterprise.githubcopilot.com","api.github.com","api.githubcopilot.com","api.individual.githubcopilot.com","github.com","host.docker.internal","telemetry.enterprise.githubcopilot.com"]},"apiProxy":{"enabled":true},"container":{"imageTag":"0.25.40,squid=sha256:b084f4a2c771f584ee68084ced52fa6b3245197a1889645d817462d307d3ac51,agent=sha256:14ff567e8d9d4c2fbc5e55c973488381c71d7e0fdbe72d30ee7b8a738fd86504,api-proxy=sha256:2883ca3e5ae9f330cafdd9345bfd4ae17fc8da36c96d4c9a1f76e922b4c45280,cli-proxy=sha256:3e7152911d4b4b7b97beef9d3d7d924ff7902227e86001ef3838fb728d5d514c"}}' > "${RUNNER_TEMP}/gh-aw/awf-config.json" && cp "${RUNNER_TEMP}/gh-aw/awf-config.json" /tmp/gh-aw/awf-config.json
# shellcheck disable=SC1003
sudo -E awf --config "${RUNNER_TEMP}/gh-aw/awf-config.json" --container-workdir "${GITHUB_WORKSPACE}" --mount "${RUNNER_TEMP}/gh-aw:${RUNNER_TEMP}/gh-aw:ro" --mount "${RUNNER_TEMP}/gh-aw:/host${RUNNER_TEMP}/gh-aw:ro" --env-all --exclude-env COPILOT_GITHUB_TOKEN --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --audit-dir /tmp/gh-aw/sandbox/firewall/audit --enable-host-access --allow-host-ports 80,443,8080 --skip-pull \
- -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache /home/runner/work/_tool -maxdepth 5 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && GH_AW_NODE_EXEC="${GH_AW_NODE_BIN:-}"; if [ -z "$GH_AW_NODE_EXEC" ] || [ ! -x "$GH_AW_NODE_EXEC" ]; then GH_AW_NODE_EXEC="$(command -v node 2>/dev/null || true)"; fi; if [ -z "$GH_AW_NODE_EXEC" ]; then echo "node runtime missing on this runner — check runtimes.node in workflow YAML" >&2; exit 127; fi; "$GH_AW_NODE_EXEC" ${RUNNER_TEMP}/gh-aw/actions/copilot_harness.cjs /usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --no-ask-user --allow-all-tools --add-dir "${GITHUB_WORKSPACE}" --prompt-file /tmp/gh-aw/aw-prompts/prompt.txt' 2>&1 | tee -a /tmp/gh-aw/threat-detection/detection.log
+ -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache /home/runner/work/_tool -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && GH_AW_NODE_EXEC="${GH_AW_NODE_BIN:-}"; if [ -z "$GH_AW_NODE_EXEC" ] || [ ! -x "$GH_AW_NODE_EXEC" ]; then GH_AW_NODE_EXEC="$(command -v node 2>/dev/null || echo node)"; fi; "$GH_AW_NODE_EXEC" ${RUNNER_TEMP}/gh-aw/actions/copilot_harness.cjs /usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --no-ask-user --allow-all-tools --add-dir "${GITHUB_WORKSPACE}" --prompt-file /tmp/gh-aw/aw-prompts/prompt.txt' 2>&1 | tee -a /tmp/gh-aw/threat-detection/detection.log
env:
- AWF_REFLECT_ENABLED: 1
COPILOT_AGENT_RUNNER_TYPE: STANDALONE
COPILOT_API_KEY: dummy-byok-key-for-offline-mode
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
COPILOT_MODEL: ${{ vars.GH_AW_MODEL_DETECTION_COPILOT || 'claude-sonnet-4.6' }}
GH_AW_PHASE: detection
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
- GH_AW_VERSION: dev
+ GH_AW_VERSION: v0.71.5
GITHUB_API_URL: ${{ github.api_url }}
GITHUB_AW: true
GITHUB_COPILOT_INTEGRATION_ID: agentic-workflows
@@ -1310,7 +1274,6 @@ jobs:
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
RUN_DETECTION: ${{ steps.detection_guard.outputs.run_detection }}
- DETECTION_AGENTIC_EXECUTION_OUTCOME: ${{ steps.detection_agentic_execution.outcome }}
GH_AW_DETECTION_CONTINUE_ON_ERROR: "true"
with:
script: |
@@ -1321,11 +1284,10 @@ jobs:
await main();
} catch (loadErr) {
const continueOnError = process.env.GH_AW_DETECTION_CONTINUE_ON_ERROR !== 'false';
- const detectionExecutionFailed = process.env.DETECTION_AGENTIC_EXECUTION_OUTCOME === 'failure';
const msg = 'ERR_SYSTEM: \u274C Unexpected error loading threat detection module: ' + (loadErr && loadErr.message ? loadErr.message : String(loadErr));
core.error(msg);
core.setOutput('reason', 'parse_error');
- if (continueOnError && !detectionExecutionFailed) {
+ if (continueOnError) {
core.warning('\u26A0\uFE0F ' + msg);
core.setOutput('conclusion', 'warning');
core.setOutput('success', 'false');
@@ -1354,7 +1316,7 @@ jobs:
GH_AW_EFFECTIVE_TOKENS: ${{ needs.agent.outputs.effective_tokens }}
GH_AW_ENGINE_ID: "copilot"
GH_AW_ENGINE_MODEL: ${{ needs.agent.outputs.model }}
- GH_AW_ENGINE_VERSION: "1.0.43"
+ GH_AW_ENGINE_VERSION: "1.0.40"
GH_AW_WORKFLOW_ID: "aw-portfolio-yield"
GH_AW_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
outputs:
@@ -1367,25 +1329,17 @@ jobs:
process_safe_outputs_processed_count: ${{ steps.process_safe_outputs.outputs.processed_count }}
process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }}
steps:
- - name: Checkout actions folder
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- with:
- repository: github/gh-aw
- sparse-checkout: |
- actions
- persist-credentials: false
- name: Setup Scripts
id: setup
- uses: ./actions/setup
+ uses: github/gh-aw-actions/setup@v0.71.5
with:
destination: ${{ runner.temp }}/gh-aw/actions
job-name: ${{ github.job }}
trace-id: ${{ needs.activation.outputs.setup-trace-id }}
- parent-span-id: ${{ needs.activation.outputs.setup-parent-span-id || needs.activation.outputs.setup-span-id }}
env:
GH_AW_SETUP_WORKFLOW_NAME: "Agentic Workflow Portfolio Yield"
GH_AW_CURRENT_WORKFLOW_REF: ${{ github.repository }}/.github/workflows/aw-portfolio-yield.lock.yml@${{ github.ref }}
- GH_AW_INFO_VERSION: "1.0.43"
+ GH_AW_INFO_VERSION: "1.0.40"
- name: Mask OTLP telemetry headers
run: bash "${RUNNER_TEMP}/gh-aw/actions/mask_otlp_headers.sh"
- name: Download agent output artifact
diff --git a/.github/workflows/aw-portfolio-yield.md b/.github/workflows/aw-portfolio-yield.md
index 307fd1e46d6..f093467c296 100644
--- a/.github/workflows/aw-portfolio-yield.md
+++ b/.github/workflows/aw-portfolio-yield.md
@@ -31,16 +31,38 @@ imports:
- shared/otel-observability.md
pre-agent-steps:
- name: Precompute workflow portfolio data
- run: |
- set -euo pipefail
- mkdir -p /tmp/gh-aw
- python3 scripts/aw_yield_precompute.py --workflows ".github/workflows" --out /tmp/aw-yield-precompute.json
+ uses: actions/github-script@v9
+ env:
+ AW_YIELD_WORKSPACE: ${{ github.workspace }}
+ AW_YIELD_WORKFLOWS: .github/workflows
+ AW_YIELD_OUT: /tmp/aw-yield-precompute.json
+ with:
+ script: |
+ const path = require("path");
+ const { runPrecompute } = require(path.join(process.env.AW_YIELD_WORKSPACE, "scripts/aw_yield_precompute.cjs"));
+ await runPrecompute({
+ workspace: process.env.AW_YIELD_WORKSPACE,
+ workflows: process.env.AW_YIELD_WORKFLOWS,
+ out: process.env.AW_YIELD_OUT,
+ });
post-steps:
- name: Finalize workflow portfolio report
- run: |
- set -euo pipefail
- mkdir -p /tmp/gh-aw
- python3 scripts/aw_yield_postcompute.py --precompute /tmp/aw-yield-precompute.json --agent-output /tmp/gh-aw --out /tmp/aw-yield-final.json
+ uses: actions/github-script@v9
+ env:
+ AW_YIELD_WORKSPACE: ${{ github.workspace }}
+ AW_YIELD_PRECOMPUTE: /tmp/aw-yield-precompute.json
+ AW_YIELD_AGENT_OUTPUT: /tmp/gh-aw
+ AW_YIELD_OUT: /tmp/aw-yield-final.json
+ with:
+ script: |
+ const path = require("path");
+ const { runPostcompute } = require(path.join(process.env.AW_YIELD_WORKSPACE, "scripts/aw_yield_postcompute.cjs"));
+ await runPostcompute({
+ workspace: process.env.AW_YIELD_WORKSPACE,
+ precompute: process.env.AW_YIELD_PRECOMPUTE,
+ agentOutput: process.env.AW_YIELD_AGENT_OUTPUT,
+ out: process.env.AW_YIELD_OUT,
+ });
---
# Agentic Workflow Portfolio Yield
@@ -49,7 +71,7 @@ You are the semantic interpreter for the repository's agentic workflow portfolio
## Hard Rules
- Treat `/tmp/aw-yield-precompute.json` as the factual source of truth.
-- OTel = facts. Python = math. Agent = interpretation.
+- OTel = facts. Deterministic precompute/postcompute = math. Agent = interpretation.
- Do **not** recompute raw scores, ranking, overlap values, fractions, or portfolio math from scratch.
- Do **not** invent telemetry, economics, confidence, or success evidence.
- Use the `otel` MCP server only for aggregated summaries when the precompute file explicitly indicates that telemetry exists but needs brief interpretation.
diff --git a/scripts/aw_yield_postcompute.cjs b/scripts/aw_yield_postcompute.cjs
new file mode 100644
index 00000000000..4589096be6e
--- /dev/null
+++ b/scripts/aw_yield_postcompute.cjs
@@ -0,0 +1,44 @@
+#!/usr/bin/env node
+
+const fs = require("fs");
+const path = require("path");
+const { execFileSync } = require("child_process");
+
+function resolvePythonCommand() {
+ for (const command of [process.env.AW_YIELD_PYTHON, "python3", "python"]) {
+ if (!command) {
+ continue;
+ }
+ try {
+ execFileSync(command, ["--version"], { stdio: "ignore" });
+ return command;
+ } catch {}
+ }
+ throw new Error("Unable to locate a Python interpreter for aw_yield_postcompute.py");
+}
+
+async function runPostcompute({ workspace, precompute, agentOutput, out }) {
+ if (!workspace || !precompute || !agentOutput || !out) {
+ throw new Error("workspace, precompute, agentOutput, and out are required");
+ }
+ fs.mkdirSync(agentOutput, { recursive: true });
+ fs.mkdirSync(path.dirname(out), { recursive: true });
+ execFileSync(
+ resolvePythonCommand(),
+ [
+ path.join(workspace, "scripts/aw_yield_postcompute.py"),
+ "--precompute",
+ precompute,
+ "--agent-output",
+ agentOutput,
+ "--out",
+ out,
+ ],
+ {
+ cwd: workspace,
+ stdio: "inherit",
+ }
+ );
+}
+
+module.exports = { runPostcompute };
diff --git a/scripts/aw_yield_precompute.cjs b/scripts/aw_yield_precompute.cjs
new file mode 100644
index 00000000000..1d6f01ec333
--- /dev/null
+++ b/scripts/aw_yield_precompute.cjs
@@ -0,0 +1,35 @@
+#!/usr/bin/env node
+
+const fs = require("fs");
+const path = require("path");
+const { execFileSync } = require("child_process");
+
+function resolvePythonCommand() {
+ for (const command of [process.env.AW_YIELD_PYTHON, "python3", "python"]) {
+ if (!command) {
+ continue;
+ }
+ try {
+ execFileSync(command, ["--version"], { stdio: "ignore" });
+ return command;
+ } catch {}
+ }
+ throw new Error("Unable to locate a Python interpreter for aw_yield_precompute.py");
+}
+
+async function runPrecompute({ workspace, workflows, out }) {
+ if (!workspace || !workflows || !out) {
+ throw new Error("workspace, workflows, and out are required");
+ }
+ fs.mkdirSync(path.dirname(out), { recursive: true });
+ execFileSync(
+ resolvePythonCommand(),
+ [path.join(workspace, "scripts/aw_yield_precompute.py"), "--workflows", workflows, "--out", out],
+ {
+ cwd: workspace,
+ stdio: "inherit",
+ }
+ );
+}
+
+module.exports = { runPrecompute };
From 5fe246f73105b5967f0f47ef7cd80ca66704246f Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Mon, 11 May 2026 06:31:23 +0000
Subject: [PATCH 13/13] Refine AWY github-script wrappers
Co-authored-by: mnkiefer <8320933+mnkiefer@users.noreply.github.com>
---
scripts/aw_yield_postcompute.cjs | 5 ++---
scripts/aw_yield_precompute.cjs | 4 ++--
2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/scripts/aw_yield_postcompute.cjs b/scripts/aw_yield_postcompute.cjs
index 4589096be6e..928664320a8 100644
--- a/scripts/aw_yield_postcompute.cjs
+++ b/scripts/aw_yield_postcompute.cjs
@@ -5,7 +5,7 @@ const path = require("path");
const { execFileSync } = require("child_process");
function resolvePythonCommand() {
- for (const command of [process.env.AW_YIELD_PYTHON, "python3", "python"]) {
+ for (const command of [process.env.AW_YIELD_PYTHON, "python3"]) {
if (!command) {
continue;
}
@@ -17,11 +17,10 @@ function resolvePythonCommand() {
throw new Error("Unable to locate a Python interpreter for aw_yield_postcompute.py");
}
-async function runPostcompute({ workspace, precompute, agentOutput, out }) {
+function runPostcompute({ workspace, precompute, agentOutput, out }) {
if (!workspace || !precompute || !agentOutput || !out) {
throw new Error("workspace, precompute, agentOutput, and out are required");
}
- fs.mkdirSync(agentOutput, { recursive: true });
fs.mkdirSync(path.dirname(out), { recursive: true });
execFileSync(
resolvePythonCommand(),
diff --git a/scripts/aw_yield_precompute.cjs b/scripts/aw_yield_precompute.cjs
index 1d6f01ec333..fa586936e9d 100644
--- a/scripts/aw_yield_precompute.cjs
+++ b/scripts/aw_yield_precompute.cjs
@@ -5,7 +5,7 @@ const path = require("path");
const { execFileSync } = require("child_process");
function resolvePythonCommand() {
- for (const command of [process.env.AW_YIELD_PYTHON, "python3", "python"]) {
+ for (const command of [process.env.AW_YIELD_PYTHON, "python3"]) {
if (!command) {
continue;
}
@@ -17,7 +17,7 @@ function resolvePythonCommand() {
throw new Error("Unable to locate a Python interpreter for aw_yield_precompute.py");
}
-async function runPrecompute({ workspace, workflows, out }) {
+function runPrecompute({ workspace, workflows, out }) {
if (!workspace || !workflows || !out) {
throw new Error("workspace, workflows, and out are required");
}