diff --git a/agent/factory.py b/agent/factory.py index dfe5993a..8254be47 100644 --- a/agent/factory.py +++ b/agent/factory.py @@ -56,9 +56,9 @@ def _resolve_model_id(requested: str | None, default: str) -> str: _MCP_FACTORIES = [ - lambda jwt_token: mcp_agentcore_runtime(jwt_token=jwt_token), - lambda jwt_token: mcp_aws_knowledge(), - lambda jwt_token: mcp_aws_pricing(), + lambda jwt_token, tool_filters=None: mcp_agentcore_runtime(jwt_token=jwt_token, tool_filters=tool_filters), + lambda jwt_token, tool_filters=None: mcp_aws_knowledge(), + lambda jwt_token, tool_filters=None: mcp_aws_pricing(), ] @@ -101,9 +101,11 @@ def create_agent(mode: str, user_id: str, session_id: str, jwt_token: str, chat_ # MCP servers mcp_servers = [] mcp_status = [] - for (name, required), factory_fn in zip(MCP_DEFS, _MCP_FACTORIES): + for i, ((name, required), factory_fn) in enumerate(zip(MCP_DEFS, _MCP_FACTORIES)): try: - mcp_servers.append(factory_fn(jwt_token)) + # Apply tool_filters only to the Presentation Maker server (index 0) + filters = {"allowed": cfg.allowed_tools} if (i == 0 and cfg.allowed_tools) else None + mcp_servers.append(factory_fn(jwt_token, tool_filters=filters)) mcp_status.append({"name": name, "status": "ok"}) except Exception as e: mcp_status.append({"name": name, "status": "error", "error": str(e)}) diff --git a/agent/mcp_clients.py b/agent/mcp_clients.py index 17ff235d..3b8a4607 100644 --- a/agent/mcp_clients.py +++ b/agent/mcp_clients.py @@ -19,17 +19,22 @@ ] -def mcp_agentcore_runtime(jwt_token: str) -> MCPClient: +def mcp_agentcore_runtime(jwt_token: str, tool_filters: dict | None = None) -> MCPClient: """Pattern 1: Amazon Bedrock AgentCore Runtime MCP Server with JWT Bearer authentication. Args: jwt_token: JWT access token from the caller (without "Bearer " prefix). + tool_filters: Optional tool filter dict (e.g. {"allowed": ["tool1", "tool2"]}). """ region = os.environ.get("AWS_REGION", os.environ.get("AWS_DEFAULT_REGION", "us-east-1")) runtime_arn = os.environ["MCP_RUNTIME_ARN"] encoded_arn = urllib.parse.quote(runtime_arn, safe="") url = f"https://bedrock-agentcore.{region}.amazonaws.com/runtimes/{encoded_arn}/invocations?qualifier=DEFAULT" + kwargs: dict = {} + if tool_filters: + kwargs["tool_filters"] = tool_filters + return MCPClient( lambda: streamablehttp_client( url=url, @@ -37,6 +42,7 @@ def mcp_agentcore_runtime(jwt_token: str) -> MCPClient: timeout=120, terminate_on_close=False, ), + **kwargs, ) diff --git a/agent/modes/__init__.py b/agent/modes/__init__.py index 62afc675..5bed4a62 100644 --- a/agent/modes/__init__.py +++ b/agent/modes/__init__.py @@ -17,11 +17,14 @@ class ModeConfig: use_composer: Include compose_slides tool. agent_model: Which model setting to use for the main agent — "chat" for conversations/planning, "create" for modes that also generate artifacts. + allowed_tools: If set, only these MCP tool names are loaded from the + Presentation Maker server. None = all tools except style-only tools. """ parts: list[Part] = field(default_factory=list) use_composer: bool = True agent_model: Literal["chat", "create"] = "chat" + allowed_tools: list[str] | None = None # Shared parts — referenced by multiple modes @@ -40,6 +43,21 @@ class ModeConfig: prefill_text="Starting the Briefing phase. I'll read the workflow to conduct the hearing properly.", ) +# Tool allowlists — explicit control over which MCP tools each mode can use. +# run_style_python is only available to style_creator. +_DECK_TOOLS = [ + "init_presentation", "analyze_template", "read_uploaded_file", + "list_styles", "apply_style", "read_examples", "list_workflows", + "read_workflows", "list_guides", "read_guides", "search_assets", + "list_asset_sources", "list_templates", + "run_python", "generate_pptx", "get_preview", "code_to_slide", + "grid", "import_attachment", +] + +_STYLE_TOOLS = [ + "run_style_python", "list_styles", "analyze_template", "read_uploaded_file", +] + MODES: dict[str, ModeConfig] = { "separated": ModeConfig(parts=[ @@ -51,7 +69,7 @@ class ModeConfig: _WF_SLIDE_GROUPS, _NOW, _PREFETCH_BRIEFING, - ]), + ], allowed_tools=_DECK_TOOLS), "vibe": ModeConfig(parts=[ _COMMON_LANGUAGE, Part(Source.file("role/vibe_agent"), target="system"), @@ -61,7 +79,7 @@ class ModeConfig: _WF_POST_COMPOSE, _WF_SLIDE_GROUPS, _NOW, - ]), + ], allowed_tools=_DECK_TOOLS), "single": ModeConfig( parts=[ _COMMON_LANGUAGE, @@ -71,6 +89,7 @@ class ModeConfig: ], use_composer=False, agent_model="create", + allowed_tools=_DECK_TOOLS, ), # Composer is a sub-agent invoked by compose_slides; ModeConfig is used # by compose_slides to build its prompt via the same resolve_parts path. @@ -94,5 +113,22 @@ class ModeConfig: target="history:tool_result", label="read_examples"), ], use_composer=False, + allowed_tools=_DECK_TOOLS, + ), + "style_creator": ModeConfig( + parts=[ + _COMMON_LANGUAGE, + Part(Source.file("role/style_creator"), target="system"), + _NOW, + Part( + Source.mcp("read_workflows", {"names": ["create-style"]}), + target="history:tool_result", + label="read_workflows", + prefill_text="I'll read the style creation workflow.", + ), + ], + use_composer=False, + agent_model="create", + allowed_tools=_STYLE_TOOLS, ), } diff --git a/agent/prompts/role/style_creator.md b/agent/prompts/role/style_creator.md new file mode 100644 index 00000000..8186971c --- /dev/null +++ b/agent/prompts/role/style_creator.md @@ -0,0 +1,61 @@ +You are a style creator for spec-driven-presentation-maker. +You create reusable style guides (HTML files) through dialogue with the user. +Respond in the same language as the user. + +## Your Role + +Help users create presentation styles that capture their design preferences. +A style is an HTML file with CSS variables and example slides that an agent reads +to understand how to design presentations. + +## Workflow + +Follow the `create-style` workflow loaded in your context. It defines the full process: +1. Gather preferences (analyze references, ask about design direction) +2. Find the premise (the core idea tying preferences together) +3. Design the style (tokens → composition → HTML) +4. Review with user (iterate until confirmed) + +**Critical:** Do NOT skip to HTML generation. Gather preferences and confirm direction first. + +## Tools + +**Primary tool: `run_style_python`** + +Execute Python code in a sandbox with file I/O access. + +Workspace layout: +- `style.html` — the target style file (read/write, saved back when save=True) +- `ref/{name}.html` — reference styles (read-only, loaded via ref_styles parameter) + +Usage patterns: +- Read a reference: `run_style_python(code="html = open('ref/corporate-executive.html').read(); print(html[:500])", ref_styles=["corporate-executive"])` +- Create/edit style: `run_style_python(code="open('style.html','w').write(html)", style_name="style-20260506-1430", save=True)` +- Compute colors: `run_style_python(code="from colorsys import rgb_to_hls; print(rgb_to_hls(0.2, 0.4, 0.6))")` + +Import statements are allowed — PIL, colorsys, numpy are available for color computation and palette extraction. + +The user's first message contains `[Style: ]` — use this as the `style_name` parameter. + +**Other tools:** +- `list_styles` — see available styles (discover names for ref_styles) +- `analyze_template` — analyze reference PPTX themes +- `hearing` — display questions to the user + +## HTML Writing Strategy + +Write incrementally via `run_style_python`: +1. First call: full HTML skeleton (head, :root variables, base CSS, first slide) with save=True +2. Subsequent calls: read back with `open('style.html').read()`, add/modify slides, save=True +3. Each save triggers a live preview update for the user + +## Quality Standards + +- All design tokens in `:root` as CSS variables +- All colors via `var()` references, never hardcoded in elements +- Text style classes (`.t-title`, `.t-body`, etc.) reference CSS variables +- Inline style only for position/size (`left`, `top`, `width`, `height`) +- Coordinate system: 1920×1080 absolute positioning +- Font sizes: pt units only +- `body { zoom: 0.7; }` for display scaling +- 5–6 slides maximum (cover + design areas) diff --git a/api/common.py b/api/common.py index 8be5f949..9211293f 100644 --- a/api/common.py +++ b/api/common.py @@ -18,6 +18,8 @@ def get_user_id(event: Any) -> str: """Extract user ID (Cognito sub) from API Gateway authorizer claims. + Supports both REST API (v1) and HTTP API (v2) JWT authorizer formats. + Args: event: Powertools current_event with request_context.authorizer. @@ -27,9 +29,13 @@ def get_user_id(event: Any) -> str: Raises: ValueError: If authorizer claims or sub are missing. """ - authorizer = event.request_context.authorizer - claims = (authorizer or {}).get("claims", {}) - sub = claims.get("sub") + # HTTP API v2: requestContext.authorizer.jwt.claims + # REST API v1: requestContext.authorizer.claims + raw = event.raw_event.get("requestContext", {}).get("authorizer", {}) + if "jwt" in raw: + sub = raw["jwt"].get("claims", {}).get("sub") + else: + sub = raw.get("claims", {}).get("sub") if not sub: raise ValueError("Unauthorized: missing user identity") return sub @@ -38,15 +44,19 @@ def get_user_id(event: Any) -> str: def get_user_alias(event: Any) -> str: """Extract user alias (email prefix) from API Gateway authorizer claims. + Supports both REST API (v1) and HTTP API (v2) JWT authorizer formats. + Args: event: Powertools current_event with request_context.authorizer. Returns: Alias string (email prefix before @), or empty string if unavailable. """ - authorizer = event.request_context.authorizer - claims = (authorizer or {}).get("claims", {}) - email = claims.get("email", "") + raw = event.raw_event.get("requestContext", {}).get("authorizer", {}) + if "jwt" in raw: + email = raw["jwt"].get("claims", {}).get("email", "") + else: + email = raw.get("claims", {}).get("email", "") return email.split("@")[0] if email else "" diff --git a/api/index.py b/api/index.py index 4aeb05c9..24c4bcfd 100644 --- a/api/index.py +++ b/api/index.py @@ -10,7 +10,7 @@ # Security: AWS manages infrastructure security. You manage access control, # data classification, and IAM policies. See SECURITY.md for details. -Single Lambda with Powertools APIGatewayRestResolver. +Single Lambda with Powertools APIGatewayHttpResolver. Ported from spec-driven-presentation-maker-web deck-api, upload-api. """ @@ -22,7 +22,7 @@ import boto3 from aws_lambda_powertools import Logger, Metrics -from aws_lambda_powertools.event_handler import APIGatewayRestResolver, CORSConfig +from aws_lambda_powertools.event_handler import APIGatewayHttpResolver, CORSConfig from aws_lambda_powertools.utilities.typing import LambdaContext from boto3.dynamodb.conditions import Key from authz import authorize @@ -92,7 +92,7 @@ def preview_url(s3_key: str) -> Optional[str]: dynamodb = boto3.resource("dynamodb") table = dynamodb.Table(TABLE_NAME) s3_client = boto3.client("s3") -app = APIGatewayRestResolver(cors=cors_config) +app = APIGatewayHttpResolver(cors=cors_config) # --- KB (optional) --- _kb_sync = None @@ -219,7 +219,7 @@ def _deck_summary(item: Dict, extras: Dict[str, Dict]) -> Dict[str, Any]: def _extract_cover_html(html: str) -> str: - """Extract + first
from a style HTML. + """Extract + first
from a style HTML. Args: html: Full style HTML string. @@ -230,12 +230,13 @@ def _extract_cover_html(html: str) -> str: head_end = html.find("") if head_end == -1: return "" - first_slide = html.find('
') - if first_slide == -1: + # Match
or
(additional classes) + slide_pattern = re.compile(r'
- next_slide = html.find('
', first_slide + 1) - end = next_slide if next_slide != -1 else html.find("", first_slide) + first_slide = matches[0].start() + end = matches[1].start() if len(matches) > 1 else html.find("", first_slide) if end == -1: end = len(html) return ( @@ -250,52 +251,512 @@ def _extract_cover_html(html: str) -> str: def list_styles() -> Dict[str, Any]: """List available styles with cover slide HTML for preview. + Includes builtin styles and user styles with pin/source metadata. + Returns: - Dict with styles list (name, description, coverHtml). + Dict with styles list (name, description, coverHtml, pinned, source). """ + user_id = get_user_id(app.current_event) + + # Builtin styles (cached) global _styles_cache # noqa: PLW0603 - if _styles_cache is not None: - return {"styles": _styles_cache} - - if not RESOURCE_BUCKET: - return {"styles": []} - - prefix = "references/examples/styles/" - resp = s3_client.list_objects_v2(Bucket=RESOURCE_BUCKET, Prefix=prefix) - styles: List[Dict[str, str]] = [] - for obj in resp.get("Contents", []): - key = obj["Key"] - if not key.endswith(".html"): - continue - name = key.removeprefix(prefix).removesuffix(".html") - body = s3_client.get_object(Bucket=RESOURCE_BUCKET, Key=key)["Body"].read().decode("utf-8") - description = "" - m = re.search(r"(.*?)", body, re.IGNORECASE) - if m: - description = m.group(1).strip() - styles.append({"name": name, "description": description, "coverHtml": _extract_cover_html(body)}) + if _styles_cache is None and RESOURCE_BUCKET: + prefix = "references/examples/styles/" + resp = s3_client.list_objects_v2(Bucket=RESOURCE_BUCKET, Prefix=prefix) + builtin: List[Dict[str, str]] = [] + for obj in resp.get("Contents", []): + key = obj["Key"] + if not key.endswith(".html"): + continue + name = key.removeprefix(prefix).removesuffix(".html") + body = s3_client.get_object(Bucket=RESOURCE_BUCKET, Key=key)["Body"].read().decode("utf-8") + description = "" + m = re.search(r"(.*?)", body, re.IGNORECASE) + if m: + description = m.group(1).strip() + builtin.append({"name": name, "description": description, "coverHtml": _extract_cover_html(body), "source": "builtin"}) + _styles_cache = builtin + + all_styles: List[Dict[str, Any]] = list(_styles_cache or []) + + # User styles + user_prefix = f"user-styles/{user_id}/" + try: + resp = s3_client.list_objects_v2(Bucket=BUCKET_NAME, Prefix=user_prefix) + for obj in resp.get("Contents", []): + key = obj["Key"] + if not key.endswith(".html"): + continue + name = key.removeprefix(user_prefix).removesuffix(".html") + body = s3_client.get_object(Bucket=BUCKET_NAME, Key=key)["Body"].read().decode("utf-8") + description = "" + m = re.search(r"(.*?)", body, re.IGNORECASE) + if m: + description = m.group(1).strip() + all_styles.append({"name": name, "description": description, "coverHtml": _extract_cover_html(body), "source": "user"}) + except Exception: + pass - _styles_cache = styles - return {"styles": styles} + # Pins + pin_resp = table.get_item(Key={"PK": f"USER#{user_id}", "SK": "STYLE_PINS"}) + pinned_names = set(pin_resp.get("Item", {}).get("pinned_styles", [])) + + for s in all_styles: + s["pinned"] = s["name"] in pinned_names + + return {"styles": all_styles} @app.get("/styles/") def get_style(name: str) -> Dict[str, Any]: - """Get full HTML for a single style. + """Get full HTML for a single style (user or builtin). Returns: Dict with name and fullHtml. """ - if not RESOURCE_BUCKET: - return {"error": f"Style not found: {name}"}, 404 if not re.fullmatch(r"[a-zA-Z0-9_-]+", name): return {"error": "Invalid style name"}, 400 - key = f"references/examples/styles/{name}.html" + + user_id = get_user_id(app.current_event) + + # Try user style first + user_key = f"user-styles/{user_id}/{name}.html" + try: + body = s3_client.get_object(Bucket=BUCKET_NAME, Key=user_key)["Body"].read().decode("utf-8") + return {"name": name, "fullHtml": body, "source": "user"} + except Exception: + pass + + # Fall back to builtin + if RESOURCE_BUCKET: + builtin_key = f"references/examples/styles/{name}.html" + try: + body = s3_client.get_object(Bucket=RESOURCE_BUCKET, Key=builtin_key)["Body"].read().decode("utf-8") + return {"name": name, "fullHtml": body, "source": "builtin"} + except Exception: + pass + + return {"error": f"Style not found: {name}"}, 404 + + +@app.post("/styles/pin") +def pin_style() -> Dict[str, Any]: + """Toggle pin status for a style. + + Body: {"name": str, "pinned": bool} + """ + body = app.current_event.json_body + name = body.get("name", "") + pinned = body.get("pinned", False) + + if not name or not re.fullmatch(r"[a-zA-Z0-9_-]+", name): + return {"error": "Invalid style name"}, 400 + + user_id = get_user_id(app.current_event) + pin_resp = table.get_item(Key={"PK": f"USER#{user_id}", "SK": "STYLE_PINS"}) + current_pins: List[str] = pin_resp.get("Item", {}).get("pinned_styles", []) + + if pinned and name not in current_pins: + current_pins.append(name) + elif not pinned and name in current_pins: + current_pins.remove(name) + + table.put_item(Item={"PK": f"USER#{user_id}", "SK": "STYLE_PINS", "pinned_styles": current_pins}) + return {"ok": True, "pinned_styles": current_pins} + + +@app.post("/styles/user") +def save_user_style() -> Dict[str, Any]: + """Save a user style (import or copy). + + Body: {"name": str, "html": str} + """ + body = app.current_event.json_body + name = body.get("name", "") + html = body.get("html", "") + + if not name or not re.fullmatch(r"[a-zA-Z0-9_-]+", name): + return {"error": "Invalid style name"}, 400 + if not html: + return {"error": "html is required"}, 400 + + user_id = get_user_id(app.current_event) + key = f"user-styles/{user_id}/{name}.html" + s3_client.put_object(Bucket=BUCKET_NAME, Key=key, Body=html.encode("utf-8"), ContentType="text/html") + return {"saved": name} + + +@app.delete("/styles/user/") +def delete_user_style(style_name: str) -> Dict[str, Any]: + """Delete a user style. + + Also removes from pins if pinned. + """ + if not re.fullmatch(r"[a-zA-Z0-9_-]+", style_name): + return {"error": "Invalid style name"}, 400 + + user_id = get_user_id(app.current_event) + key = f"user-styles/{user_id}/{style_name}.html" + + try: + s3_client.delete_object(Bucket=BUCKET_NAME, Key=key) + except Exception: + return {"error": f"Style not found: {style_name}"}, 404 + + # Remove from pins + pin_resp = table.get_item(Key={"PK": f"USER#{user_id}", "SK": "STYLE_PINS"}) + current_pins: List[str] = pin_resp.get("Item", {}).get("pinned_styles", []) + if style_name in current_pins: + current_pins.remove(style_name) + table.put_item(Item={"PK": f"USER#{user_id}", "SK": "STYLE_PINS", "pinned_styles": current_pins}) + + return {"deleted": style_name} + + +@app.patch("/styles/user/") +def rename_user_style(style_name: str) -> Dict[str, Any]: + """Rename a user style. + + Body: {"newName": str} + """ + body = app.current_event.json_body + new_name = body.get("newName", "") + + if not re.fullmatch(r"[a-zA-Z0-9_-]+", style_name): + return {"error": "Invalid style name"}, 400 + if not new_name or not re.fullmatch(r"[a-zA-Z0-9_-]+", new_name): + return {"error": "Invalid new name"}, 400 + + user_id = get_user_id(app.current_event) + old_key = f"user-styles/{user_id}/{style_name}.html" + new_key = f"user-styles/{user_id}/{new_name}.html" + + # Check source exists + try: + body_bytes = s3_client.get_object(Bucket=BUCKET_NAME, Key=old_key)["Body"].read() + except Exception: + return {"error": f"Style not found: {style_name}"}, 404 + + # Check destination doesn't exist + try: + s3_client.head_object(Bucket=BUCKET_NAME, Key=new_key) + return {"error": f"Style already exists: {new_name}"}, 409 + except Exception: + pass + + # Copy + delete + s3_client.put_object(Bucket=BUCKET_NAME, Key=new_key, Body=body_bytes, ContentType="text/html") + s3_client.delete_object(Bucket=BUCKET_NAME, Key=old_key) + + # Update pins + pin_resp = table.get_item(Key={"PK": f"USER#{user_id}", "SK": "STYLE_PINS"}) + current_pins: List[str] = pin_resp.get("Item", {}).get("pinned_styles", []) + if style_name in current_pins: + current_pins[current_pins.index(style_name)] = new_name + table.put_item(Item={"PK": f"USER#{user_id}", "SK": "STYLE_PINS", "pinned_styles": current_pins}) + + return {"renamed": {"from": style_name, "to": new_name}} + + +# --------------------------------------------------------------------------- +# Template endpoints +# --------------------------------------------------------------------------- + + +@app.get("/templates") +def list_templates() -> Dict[str, Any]: + """List all templates (builtin + user) with metadata. + + Builtin: S3 is source of truth for existence. DDB is metadata cache. + User: DDB is source of truth. + """ + import tempfile + from pathlib import Path + + user_id = get_user_id(app.current_event) + templates: List[Dict[str, Any]] = [] + + # --- Builtin: S3 source of truth --- + s3_templates: Dict[str, str] = {} # name -> etag + if RESOURCE_BUCKET: + resp = s3_client.list_objects_v2(Bucket=RESOURCE_BUCKET, Prefix="templates/") + for obj in resp.get("Contents", []): + key = obj["Key"] + if key.endswith(".pptx"): + name = key.removeprefix("templates/").removesuffix(".pptx") + s3_templates[name] = obj["ETag"] + + # Batch get DDB cache for builtins + ddb_cache: Dict[str, Dict] = {} + if s3_templates: + keys = [{"PK": f"TEMPLATE#{n}", "SK": "META"} for n in s3_templates] + resp = table.meta.client.batch_get_item( + RequestItems={table.name: {"Keys": keys}} + ) + for item in resp.get("Responses", {}).get(table.name, []): + ddb_cache[item["name"]] = item + + # Build builtin list with lazy analysis + to_analyze: List[str] = [] + for name, etag in s3_templates.items(): + cached = ddb_cache.get(name) + if cached and cached.get("s3ETag") == etag: + analysis = {} + raw = cached.get("analysisJson", "") + if raw and raw != "{}": + analysis = json.loads(raw) if isinstance(raw, str) else raw + templates.append({ + "name": name, + "source": "builtin", + "description": cached.get("description", ""), + "theme_colors": analysis.get("theme_colors", {}), + "fonts": cached.get("fonts", {}), + "layout_count": len(analysis.get("layouts", [])), + }) + else: + to_analyze.append(name) + templates.append({ + "name": name, + "source": "builtin", + "description": "", + "theme_colors": {}, + "fonts": {}, + "layout_count": 0, + }) + + # Lazy analyze uncached builtins (async would be better but keep simple) + if to_analyze: + from sdpm.analyzer import analyze_template as _analyze + + tmp = Path(tempfile.mkdtemp()) + for name in to_analyze: + s3_key = f"templates/{name}.pptx" + tpl_path = tmp / f"{name}.pptx" + s3_client.download_file(RESOURCE_BUCKET, s3_key, str(tpl_path)) + analysis = _analyze(tpl_path) + etag = s3_templates[name] + item = { + "PK": f"TEMPLATE#{name}", + "SK": "META", + "name": name, + "s3Key": s3_key, + "s3ETag": etag, + "fonts": analysis.get("fonts", {}), + "analysisJson": json.dumps({ + "theme_colors": analysis.get("theme_colors", {}), + "layouts": analysis.get("layouts", []), + }), + } + table.put_item(Item=item) + # Update the placeholder in templates list + for t in templates: + if t["name"] == name and t["source"] == "builtin": + t["theme_colors"] = analysis.get("theme_colors", {}) + t["fonts"] = analysis.get("fonts", {}) + t["layout_count"] = len(analysis.get("layouts", [])) + break + + # --- User templates: DDB source of truth --- + resp = table.query( + KeyConditionExpression=Key("PK").eq(f"USER#{user_id}") & Key("SK").begins_with("TEMPLATE#"), + ) + for t in resp.get("Items", []): + analysis = {} + raw = t.get("analysisJson", "") + if raw and raw != "{}": + analysis = json.loads(raw) if isinstance(raw, str) else raw + templates.append({ + "name": t.get("name", ""), + "source": "user", + "description": t.get("description", ""), + "theme_colors": analysis.get("theme_colors", {}), + "fonts": t.get("fonts", {}), + "layout_count": len(analysis.get("layouts", [])), + }) + + return {"templates": templates} + + +@app.get("/templates/") +def download_template(name: str) -> Any: + """Download a template .pptx file. Searches user templates first, then builtin.""" + user_id = get_user_id(app.current_event) + + # Try user template + user_key = f"user-templates/{user_id}/{name}.pptx" + try: + s3_client.head_object(Bucket=BUCKET_NAME, Key=user_key) + url = s3_client.generate_presigned_url( + "get_object", Params={"Bucket": BUCKET_NAME, "Key": user_key}, ExpiresIn=300 + ) + return {"downloadUrl": url} + except Exception: + pass + + # Try builtin (S3 source of truth) + builtin_key = f"templates/{name}.pptx" try: - body = s3_client.get_object(Bucket=RESOURCE_BUCKET, Key=key)["Body"].read().decode("utf-8") + s3_client.head_object(Bucket=RESOURCE_BUCKET, Key=builtin_key) + url = s3_client.generate_presigned_url( + "get_object", Params={"Bucket": RESOURCE_BUCKET, "Key": builtin_key}, ExpiresIn=300 + ) + return {"downloadUrl": url} except Exception: - return {"error": f"Style not found: {name}"}, 404 - return {"name": name, "fullHtml": body} + return {"error": "Template not found"}, 404 + + +@app.post("/templates/user/upload-url") +def presign_template_upload() -> Dict[str, Any]: + """Generate a presigned PUT URL for template upload to S3.""" + user_id = get_user_id(app.current_event) + body = app.current_event.json_body + + name: str = body.get("name", "").strip() + if not name or not re.fullmatch(r"[a-zA-Z0-9_\-\s.()]+", name): + return {"error": "Invalid template name"}, 400 + + # Duplicate check + existing = table.get_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}) + if existing.get("Item"): + return {"error": f'Template "{name}" already exists'}, 409 + + s3_key = f"user-templates/{user_id}/{name}.pptx" + url = s3_client.generate_presigned_url( + "put_object", + Params={ + "Bucket": BUCKET_NAME, + "Key": s3_key, + "ContentType": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + }, + ExpiresIn=PRESIGNED_URL_EXPIRY, + ) + return {"presignedUrl": url, "s3Key": s3_key} + + +@app.post("/templates/user") +def upload_user_template() -> Dict[str, Any]: + """Register a user template after S3 upload. Analyzes and stores metadata. + + Expects JSON body: {name, description}. + The .pptx must already be uploaded to S3 via presigned URL. + """ + import tempfile + from pathlib import Path + + user_id = get_user_id(app.current_event) + body = app.current_event.json_body + + name: str = body.get("name", "").strip() + description: str = body.get("description", "") + + if not name or not re.fullmatch(r"[a-zA-Z0-9_\-\s.()]+", name): + return {"error": "Invalid template name"}, 400 + + s3_key = f"user-templates/{user_id}/{name}.pptx" + + # Verify file exists in S3 + try: + s3_client.head_object(Bucket=BUCKET_NAME, Key=s3_key) + except Exception: + return {"error": "File not found in S3. Upload via presigned URL first."}, 400 + + # Download and analyze + tmp = Path(tempfile.mkdtemp()) + tpl_path = tmp / f"{name}.pptx" + s3_client.download_file(BUCKET_NAME, s3_key, str(tpl_path)) + + from sdpm.analyzer import analyze_template as _analyze + + analysis = _analyze(tpl_path) + metadata = { + "description": description, + "fonts": analysis.get("fonts", {}), + "analysisJson": json.dumps({ + "theme_colors": analysis.get("theme_colors", {}), + "layouts": analysis.get("layouts", []), + }), + } + + # Store in DDB + table.put_item(Item={ + "PK": f"USER#{user_id}", + "SK": f"TEMPLATE#{name}", + "name": name, + "s3Key": s3_key, + **metadata, + }) + + return {"uploaded": name} + + +@app.delete("/templates/user/") +def delete_user_template(name: str) -> Dict[str, Any]: + """Delete a user template.""" + user_id = get_user_id(app.current_event) + + # Check exists + resp = table.get_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}) + if not resp.get("Item"): + return {"error": "Template not found"}, 404 + + s3_key = f"user-templates/{user_id}/{name}.pptx" + s3_client.delete_object(Bucket=BUCKET_NAME, Key=s3_key) + table.delete_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}) + + return {"deleted": name} + + +@app.patch("/templates/user/") +def patch_user_template(name: str) -> Dict[str, Any]: + """Rename or update description of a user template. + + Body: {"newName": str} or {"description": str} + """ + user_id = get_user_id(app.current_event) + body = app.current_event.json_body + + resp = table.get_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}) + item = resp.get("Item") + if not item: + return {"error": "Template not found"}, 404 + + # Rename + new_name = body.get("newName", "").strip() + if new_name: + if not re.fullmatch(r"[a-zA-Z0-9_-]+", new_name): + return {"error": "Letters, numbers, hyphens, underscores only"}, 400 + # Check duplicate + dup = table.get_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{new_name}"}) + if dup.get("Item"): + return {"error": "Name already exists"}, 409 + # S3 copy + delete + old_key = f"user-templates/{user_id}/{name}.pptx" + new_key = f"user-templates/{user_id}/{new_name}.pptx" + s3_client.copy_object( + Bucket=BUCKET_NAME, + CopySource={"Bucket": BUCKET_NAME, "Key": old_key}, + Key=new_key, + ) + s3_client.delete_object(Bucket=BUCKET_NAME, Key=old_key) + # DDB: delete old, put new + table.delete_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}) + item["SK"] = f"TEMPLATE#{new_name}" + item["name"] = new_name + item["s3Key"] = new_key + table.put_item(Item=item) + return {"renamed": {"from": name, "to": new_name}} + + # Update description + description = body.get("description") + if description is not None: + table.update_item( + Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}, + UpdateExpression="SET description = :d", + ExpressionAttributeValues={":d": description}, + ) + return {"updated": name, "description": description} + + return {"error": "No action specified"}, 400 # --------------------------------------------------------------------------- diff --git a/api/requirements.txt b/api/requirements.txt index b54fb6aa..9aeca1ba 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -4,3 +4,5 @@ pdfplumber>=0.11.0 pypdf>=4.0.0 python-docx>=1.0.0 openpyxl>=3.1.0 +python-pptx>=1.0.0 +lxml>=5.0.0 diff --git a/infra/lib/data-stack.ts b/infra/lib/data-stack.ts index d18cce1a..c4aadf52 100644 --- a/infra/lib/data-stack.ts +++ b/infra/lib/data-stack.ts @@ -9,7 +9,6 @@ */ import * as cdk from "aws-cdk-lib"; -import * as cr from "aws-cdk-lib/custom-resources"; import * as dynamodb from "aws-cdk-lib/aws-dynamodb"; import * as iam from "aws-cdk-lib/aws-iam"; import * as lambda from "aws-cdk-lib/aws-lambda"; @@ -173,39 +172,8 @@ export class DataStack extends cdk.Stack { new cdk.CfnOutput(this, "PptxBucketName", { value: this.pptxBucket.bucketName }); new cdk.CfnOutput(this, "ResourceBucketName", { value: this.resourceBucket.bucketName }); - // --- Register default templates in Amazon DynamoDB --- - const templates = [ - { id: "blank-dark", name: "blank-dark", isDefault: true }, - { id: "blank-light", name: "blank-light", isDefault: false }, - ]; - - for (const tmpl of templates) { - new cr.AwsCustomResource(this, `RegisterTemplate_${tmpl.id}`, { - onCreate: { - service: "DynamoDB", - action: "putItem", - parameters: { - TableName: this.table.tableName, - Item: { - PK: { S: `TEMPLATE#${tmpl.id}` }, - SK: { S: "META" }, - name: { S: tmpl.name }, - s3Key: { S: `templates/${tmpl.name}.pptx` }, - analysisJson: { S: "{}" }, - isDefault: { BOOL: tmpl.isDefault }, - createdAt: { S: new Date().toISOString() }, - updatedAt: { S: new Date().toISOString() }, - }, - ConditionExpression: "attribute_not_exists(PK)", - }, - physicalResourceId: cr.PhysicalResourceId.of(`template-${tmpl.id}`), - ignoreErrorCodesMatching: "ConditionalCheckFailedException", - }, - policy: cr.AwsCustomResourcePolicy.fromSdkCalls({ - resources: [this.table.tableArn], - }), - }); - } + // Builtin templates: S3 is source of truth (BucketDeployment above). + // DDB metadata is populated lazily by API Lambda on first access. // --- Invocation Logging (optional, gated by features.enableInvocationLogging) --- if (props?.enableInvocationLogging) { diff --git a/infra/lib/web-ui-stack.ts b/infra/lib/web-ui-stack.ts index aa6d6cbb..bad94b79 100644 --- a/infra/lib/web-ui-stack.ts +++ b/infra/lib/web-ui-stack.ts @@ -14,7 +14,9 @@ */ import * as cdk from "aws-cdk-lib"; -import * as apigateway from "aws-cdk-lib/aws-apigateway"; +import * as apigatewayv2 from "aws-cdk-lib/aws-apigatewayv2"; +import * as apigatewayv2_authorizers from "aws-cdk-lib/aws-apigatewayv2-authorizers"; +import * as apigatewayv2_integrations from "aws-cdk-lib/aws-apigatewayv2-integrations"; import * as cloudfront from "aws-cdk-lib/aws-cloudfront"; import * as origins from "aws-cdk-lib/aws-cloudfront-origins"; import * as cognito from "aws-cdk-lib/aws-cognito"; @@ -228,7 +230,10 @@ function handler(event) { command: [ "bash", "-c", "pip install -r /asset-input/api/requirements.txt -t /asset-output/ && " + - "cp -r /asset-input/api/* /asset-input/shared /asset-output/", + "cp -r /asset-input/api/* /asset-input/shared /asset-output/ && " + + "mkdir -p /asset-output/sdpm && cp -r /asset-input/skill/sdpm/analyzer /asset-output/sdpm/ && " + + "cp /asset-input/skill/sdpm/__init__.py /asset-output/sdpm/ && " + + "mkdir -p /asset-output/sdpm/utils && cp /asset-input/skill/sdpm/utils/__init__.py /asset-input/skill/sdpm/utils/io.py /asset-output/sdpm/utils/", ], local: { tryBundle(outputDir: string): boolean { @@ -245,6 +250,10 @@ function handler(event) { } execSync(`cp -r ${root}/api/* ${outputDir}/`, { stdio: "inherit" }); execSync(`cp -r ${root}/shared ${outputDir}/shared`, { stdio: "inherit" }); + execSync(`mkdir -p ${outputDir}/sdpm/utils`, { stdio: "inherit" }); + execSync(`cp -r ${root}/skill/sdpm/analyzer ${outputDir}/sdpm/`, { stdio: "inherit" }); + execSync(`cp ${root}/skill/sdpm/__init__.py ${outputDir}/sdpm/`, { stdio: "inherit" }); + execSync(`cp ${root}/skill/sdpm/utils/__init__.py ${root}/skill/sdpm/utils/io.py ${outputDir}/sdpm/utils/`, { stdio: "inherit" }); return true; }, }, @@ -308,42 +317,38 @@ function handler(event) { } } - // --- Amazon API Gateway --- - const api = new apigateway.RestApi(this, "SdpmApi", { - restApiName: "sdpm-api", - defaultCorsPreflightOptions: { - allowOrigins: apigateway.Cors.ALL_ORIGINS, - allowMethods: apigateway.Cors.ALL_METHODS, + // --- Amazon API Gateway HTTP API --- + const httpApi = new apigatewayv2.HttpApi(this, "SdpmHttpApi", { + apiName: "sdpm-api", + corsPreflight: { + allowOrigins: ["*"], + allowMethods: [apigatewayv2.CorsHttpMethod.ANY], allowHeaders: ["Content-Type", "Authorization"], + maxAge: cdk.Duration.hours(1), }, }); - const authorizer = new apigateway.CognitoUserPoolsAuthorizer(this, "CognitoAuthorizer", { - cognitoUserPools: [props.userPool], + + const issuerUrl = `https://cognito-idp.${this.region}.amazonaws.com/${props.userPool.userPoolId}`; + const jwtAuthorizer = new apigatewayv2_authorizers.HttpJwtAuthorizer("CognitoJwt", issuerUrl, { + jwtAudience: [props.userPoolClient.userPoolClientId], + }); + + const lambdaIntegration = new apigatewayv2_integrations.HttpLambdaIntegration("ApiIntegration", apiLambda); + + httpApi.addRoutes({ + path: "/{proxy+}", + methods: [apigatewayv2.HttpMethod.ANY], + integration: lambdaIntegration, + authorizer: jwtAuthorizer, + }); + + // OPTIONS must bypass authorizer for CORS preflight to succeed (HTTP 204) + httpApi.addRoutes({ + path: "/{proxy+}", + methods: [apigatewayv2.HttpMethod.OPTIONS], + integration: lambdaIntegration, + authorizer: new apigatewayv2.HttpNoneAuthorizer(), }); - const integration = new apigateway.LambdaIntegration(apiLambda); - const auth = { authorizer, authorizationType: apigateway.AuthorizationType.COGNITO }; - - const decks = api.root.addResource("decks"); - decks.addMethod("GET", integration, auth); - decks.addResource("favorites").addMethod("GET", integration, auth); - decks.addResource("shared").addMethod("GET", integration, auth); - decks.addResource("public").addMethod("GET", integration, auth); - const deck = decks.addResource("{deck_id}"); - deck.addMethod("GET", integration, auth); - deck.addMethod("DELETE", integration, auth); - deck.addMethod("PATCH", integration, auth); - deck.addResource("favorite").addMethod("POST", integration, auth); - const uploads = api.root.addResource("uploads"); - uploads.addResource("presign").addMethod("POST", integration, auth); - const upload = uploads.addResource("{upload_id}"); - upload.addResource("process").addMethod("POST", integration, auth); - upload.addResource("status").addMethod("GET", integration, auth); - api.root.addResource("chat").addResource("{session_id}").addMethod("GET", integration, auth); - const slides = api.root.addResource("slides"); - slides.addResource("search").addMethod("GET", integration, auth); - const styles = api.root.addResource("styles"); - styles.addMethod("GET", integration, auth); - styles.addResource("{name}").addMethod("GET", integration, auth); // --- Deploy web-ui static files to S3 --- // Bundle the web-ui at synth time so changes are auto-picked up without a @@ -415,7 +420,7 @@ function handler(event) { ClientId: props.userPoolClient.userPoolClientId, SiteUrl: this.siteUrl, AgentRuntimeArn: props.agentRuntimeArn, - ApiBaseUrl: api.url, + ApiBaseUrl: `${httpApi.apiEndpoint}/`, McpScope: props.mcpCustomScope ? ` ${props.mcpCustomScope}` : "", }); @@ -512,13 +517,13 @@ function handler(event) { allowedIpV6AddressRanges: props.allowedIpV6AddressRanges, }); new CfnWebACLAssociation(this, "ApiWafAssociation", { - resourceArn: api.deploymentStage.stageArn, + resourceArn: `arn:aws:apigateway:${this.region}::/apis/${httpApi.httpApiId}/stages/$default`, webAclArn: regionalWaf.webAclArn, }); } // --- Outputs --- new cdk.CfnOutput(this, "SiteUrl", { value: this.siteUrl }); - new cdk.CfnOutput(this, "ApiUrl", { value: api.url }); + new cdk.CfnOutput(this, "ApiUrl", { value: httpApi.apiEndpoint }); } } diff --git a/mcp-local/.kiro/acp-agents/sdpm-composer.json b/mcp-local/.kiro/acp-agents/sdpm-composer.json index 15bbee32..e15072fb 100644 --- a/mcp-local/.kiro/acp-agents/sdpm-composer.json +++ b/mcp-local/.kiro/acp-agents/sdpm-composer.json @@ -2,13 +2,51 @@ "name": "sdpm-composer", "description": "Composer agent for spec-driven-presentation-maker. Writes slide JSON, builds PPTX, measures, and polishes autonomously. No user interaction.", "tools": [ - "@sdpm", + "@sdpm/analyze_template", + "@sdpm/apply_style", + "@sdpm/code_to_slide", + "@sdpm/generate_pptx", + "@sdpm/get_preview", + "@sdpm/grid", + "@sdpm/import_attachment", + "@sdpm/init_presentation", + "@sdpm/list_asset_sources", + "@sdpm/list_guides", + "@sdpm/list_styles", + "@sdpm/list_templates", + "@sdpm/list_workflows", + "@sdpm/measure_slides", + "@sdpm/pptx_to_json", + "@sdpm/read_examples", + "@sdpm/read_guides", + "@sdpm/read_workflows", + "@sdpm/run_python", + "@sdpm/search_assets", "read", "glob", "grep" ], "allowedTools": [ - "@sdpm/*", + "@sdpm/analyze_template", + "@sdpm/apply_style", + "@sdpm/code_to_slide", + "@sdpm/generate_pptx", + "@sdpm/get_preview", + "@sdpm/grid", + "@sdpm/import_attachment", + "@sdpm/init_presentation", + "@sdpm/list_asset_sources", + "@sdpm/list_guides", + "@sdpm/list_styles", + "@sdpm/list_templates", + "@sdpm/list_workflows", + "@sdpm/measure_slides", + "@sdpm/pptx_to_json", + "@sdpm/read_examples", + "@sdpm/read_guides", + "@sdpm/read_workflows", + "@sdpm/run_python", + "@sdpm/search_assets", "read", "glob", "grep" diff --git a/mcp-local/.kiro/acp-agents/sdpm-single.json b/mcp-local/.kiro/acp-agents/sdpm-single.json index 4f05903d..b62ee7f7 100644 --- a/mcp-local/.kiro/acp-agents/sdpm-single.json +++ b/mcp-local/.kiro/acp-agents/sdpm-single.json @@ -14,7 +14,27 @@ }, "prompt": "file://../../acp-agent-prompts/single-agent.md", "tools": [ - "@sdpm", + "@sdpm/analyze_template", + "@sdpm/apply_style", + "@sdpm/code_to_slide", + "@sdpm/generate_pptx", + "@sdpm/get_preview", + "@sdpm/grid", + "@sdpm/hearing", + "@sdpm/import_attachment", + "@sdpm/init_presentation", + "@sdpm/list_asset_sources", + "@sdpm/list_guides", + "@sdpm/list_styles", + "@sdpm/list_templates", + "@sdpm/list_workflows", + "@sdpm/measure_slides", + "@sdpm/pptx_to_json", + "@sdpm/read_examples", + "@sdpm/read_guides", + "@sdpm/read_workflows", + "@sdpm/run_python", + "@sdpm/search_assets", "read", "glob", "grep", @@ -22,7 +42,27 @@ "web_fetch" ], "allowedTools": [ - "@sdpm/*", + "@sdpm/analyze_template", + "@sdpm/apply_style", + "@sdpm/code_to_slide", + "@sdpm/generate_pptx", + "@sdpm/get_preview", + "@sdpm/grid", + "@sdpm/hearing", + "@sdpm/import_attachment", + "@sdpm/init_presentation", + "@sdpm/list_asset_sources", + "@sdpm/list_guides", + "@sdpm/list_styles", + "@sdpm/list_templates", + "@sdpm/list_workflows", + "@sdpm/measure_slides", + "@sdpm/pptx_to_json", + "@sdpm/read_examples", + "@sdpm/read_guides", + "@sdpm/read_workflows", + "@sdpm/run_python", + "@sdpm/search_assets", "read", "glob", "grep", diff --git a/mcp-local/.kiro/acp-agents/sdpm-spec.json b/mcp-local/.kiro/acp-agents/sdpm-spec.json index 2456fa80..1874c1c8 100644 --- a/mcp-local/.kiro/acp-agents/sdpm-spec.json +++ b/mcp-local/.kiro/acp-agents/sdpm-spec.json @@ -17,7 +17,27 @@ "file://../../../skill/references/workflows/create-new-1-briefing.md" ], "tools": [ - "@sdpm", + "@sdpm/analyze_template", + "@sdpm/apply_style", + "@sdpm/code_to_slide", + "@sdpm/generate_pptx", + "@sdpm/get_preview", + "@sdpm/grid", + "@sdpm/hearing", + "@sdpm/import_attachment", + "@sdpm/init_presentation", + "@sdpm/list_asset_sources", + "@sdpm/list_guides", + "@sdpm/list_styles", + "@sdpm/list_templates", + "@sdpm/list_workflows", + "@sdpm/measure_slides", + "@sdpm/pptx_to_json", + "@sdpm/read_examples", + "@sdpm/read_guides", + "@sdpm/read_workflows", + "@sdpm/run_python", + "@sdpm/search_assets", "use_subagent", "read", "glob", @@ -26,7 +46,27 @@ "web_fetch" ], "allowedTools": [ - "@sdpm/*", + "@sdpm/analyze_template", + "@sdpm/apply_style", + "@sdpm/code_to_slide", + "@sdpm/generate_pptx", + "@sdpm/get_preview", + "@sdpm/grid", + "@sdpm/hearing", + "@sdpm/import_attachment", + "@sdpm/init_presentation", + "@sdpm/list_asset_sources", + "@sdpm/list_guides", + "@sdpm/list_styles", + "@sdpm/list_templates", + "@sdpm/list_workflows", + "@sdpm/measure_slides", + "@sdpm/pptx_to_json", + "@sdpm/read_examples", + "@sdpm/read_guides", + "@sdpm/read_workflows", + "@sdpm/run_python", + "@sdpm/search_assets", "read", "glob", "grep", diff --git a/mcp-local/.kiro/acp-agents/sdpm-style.json b/mcp-local/.kiro/acp-agents/sdpm-style.json new file mode 100644 index 00000000..7d34444f --- /dev/null +++ b/mcp-local/.kiro/acp-agents/sdpm-style.json @@ -0,0 +1,42 @@ +{ + "name": "sdpm-style", + "description": "Style creator agent. Creates reusable style guides through dialogue.", + "mcpServers": { + "sdpm": { + "command": "uv", + "args": [ + "run", + "python", + "server_acp.py" + ], + "timeout": 120000 + } + }, + "prompt": "file://../../acp-agent-prompts/style-creator.md", + "tools": [ + "@sdpm/run_style_python", + "@sdpm/list_styles", + "@sdpm/analyze_template", + "@sdpm/hearing", + "read", + "glob", + "grep", + "web_fetch", + "web_search" + ], + "allowedTools": [ + "@sdpm/run_style_python", + "@sdpm/list_styles", + "@sdpm/analyze_template", + "@sdpm/hearing", + "read", + "glob", + "grep", + "web_fetch", + "web_search" + ], + "resources": [ + "file://../../../skill/references/workflows/create-style.md" + ], + "welcomeMessage": "What style would you like to create?" +} diff --git a/mcp-local/.kiro/acp-agents/sdpm-vibe.json b/mcp-local/.kiro/acp-agents/sdpm-vibe.json index 3b9ba050..51f57c7a 100644 --- a/mcp-local/.kiro/acp-agents/sdpm-vibe.json +++ b/mcp-local/.kiro/acp-agents/sdpm-vibe.json @@ -14,7 +14,27 @@ }, "prompt": "file://../../acp-agent-prompts/vibe-agent.md", "tools": [ - "@sdpm", + "@sdpm/analyze_template", + "@sdpm/apply_style", + "@sdpm/code_to_slide", + "@sdpm/generate_pptx", + "@sdpm/get_preview", + "@sdpm/grid", + "@sdpm/hearing", + "@sdpm/import_attachment", + "@sdpm/init_presentation", + "@sdpm/list_asset_sources", + "@sdpm/list_guides", + "@sdpm/list_styles", + "@sdpm/list_templates", + "@sdpm/list_workflows", + "@sdpm/measure_slides", + "@sdpm/pptx_to_json", + "@sdpm/read_examples", + "@sdpm/read_guides", + "@sdpm/read_workflows", + "@sdpm/run_python", + "@sdpm/search_assets", "use_subagent", "read", "glob", @@ -23,7 +43,27 @@ "web_fetch" ], "allowedTools": [ - "@sdpm/*", + "@sdpm/analyze_template", + "@sdpm/apply_style", + "@sdpm/code_to_slide", + "@sdpm/generate_pptx", + "@sdpm/get_preview", + "@sdpm/grid", + "@sdpm/hearing", + "@sdpm/import_attachment", + "@sdpm/init_presentation", + "@sdpm/list_asset_sources", + "@sdpm/list_guides", + "@sdpm/list_styles", + "@sdpm/list_templates", + "@sdpm/list_workflows", + "@sdpm/measure_slides", + "@sdpm/pptx_to_json", + "@sdpm/read_examples", + "@sdpm/read_guides", + "@sdpm/read_workflows", + "@sdpm/run_python", + "@sdpm/search_assets", "read", "glob", "grep", diff --git a/mcp-local/acp-agent-prompts/style-creator.md b/mcp-local/acp-agent-prompts/style-creator.md new file mode 100644 index 00000000..997a87f0 --- /dev/null +++ b/mcp-local/acp-agent-prompts/style-creator.md @@ -0,0 +1,53 @@ +You are a style creator for spec-driven-presentation-maker. +You create reusable style guides (HTML files) through dialogue with the user. +Respond in the same language as the user. + +## Your Role + +Help users create presentation styles that capture their design preferences. +A style is an HTML file with CSS variables and example slides that an agent reads +to understand how to design presentations. + +## Workflow + +Follow the `create-style` workflow loaded in your resources. It defines the full process: +1. Gather preferences (analyze references, ask about design direction) +2. Find the premise (the core idea tying preferences together) +3. Design the style (tokens → composition → HTML) +4. Review with user (iterate until confirmed) + +**Critical:** Do NOT skip to HTML generation. Gather preferences and confirm direction first. + +## Tools + +**Primary tool: `run_style_python`** + +Execute Python code with two sandbox functions: +- `read_style(name)` — read existing styles for reference +- `write_style(name, html)` — save HTML to user's style directory + +The user's first message contains `[Style: ]` — this is the file stem to use +for all `write_style` calls. + +**Other tools:** +- `list_styles` — see available styles (for reference) +- `read_uploaded_file` — read user-uploaded reference files +- `analyze_template` — analyze reference PPTX themes + +## HTML Writing Strategy + +Write incrementally via `run_style_python`: +1. First call: full HTML skeleton (head, :root variables, base CSS, first slide) +2. Subsequent calls: read back with `read_style(name)`, add/modify slides +3. Each `write_style` call saves immediately — the user sees live preview updates + +## Quality Standards + +- All design tokens in `:root` as CSS variables +- All colors via `var()` references, never hardcoded in elements +- Text style classes (`.t-title`, `.t-body`, etc.) reference CSS variables +- Inline style only for position/size (`left`, `top`, `width`, `height`) +- Coordinate system: 1920×1080 absolute positioning +- Font sizes: pt units only +- `body { zoom: 0.7; }` for display scaling +- 5–6 slides maximum (cover + design areas) diff --git a/mcp-local/sandbox.py b/mcp-local/sandbox.py index a8ee0bb1..7f85beeb 100644 --- a/mcp-local/sandbox.py +++ b/mcp-local/sandbox.py @@ -113,3 +113,67 @@ def list_files(subdir="."): def make_runner(deck_id: str) -> str: """Return the runner script. Static template — code comes via stdin.""" return _RUNNER_WITH_DECK if deck_id else _RUNNER_NO_DECK + + +# --- Style runner --- + +_RUNNER_STYLE = '''\ +import json, os, sys, re +from pathlib import Path + +user_styles_dir = Path(sys.argv[1]).resolve() +styles_dirs = json.loads(sys.argv[2]) + +def _validate_name(name): + if not name or "/" in name or "\\\\" in name or ".." in name: + raise PermissionError(f"Invalid style name: {name}") + return name + +def read_style(name): + """Read an existing style HTML by name (searches all style dirs).""" + name = _validate_name(name) + filename = name + ".html" if not name.endswith(".html") else name + for d in styles_dirs: + p = Path(d) / filename + if p.exists(): + return p.read_text(encoding="utf-8") + available = [] + for d in styles_dirs: + dp = Path(d) + if dp.is_dir(): + available.extend(f.stem for f in dp.iterdir() if f.suffix == ".html") + raise FileNotFoundError(f"Style not found: {name}. Available: {sorted(set(available))}") + +def write_style(name, html): + """Save style HTML to user styles directory. name = file stem (no .html extension).""" + name = _validate_name(name) + filename = name + ".html" if not name.endswith(".html") else name + user_styles_dir.mkdir(parents=True, exist_ok=True) + dest = user_styles_dir / filename + dest.write_text(html, encoding="utf-8") + title_m = re.search(r"(.+?)", html, re.IGNORECASE) + title = title_m.group(1).strip() if title_m else name + # Signal save result to parent process via stderr JSON + sys.stderr.write("__STYLE_SAVED__" + json.dumps({"title": title, "filename": filename, "path": str(dest)}) + "\\n") + +_safe_builtins = { + "print": print, "len": len, "range": range, "enumerate": enumerate, + "sorted": sorted, "isinstance": isinstance, "type": type, + "str": str, "int": int, "float": float, "bool": bool, + "list": list, "dict": dict, "tuple": tuple, "set": set, + "min": min, "max": max, "sum": sum, "abs": abs, "round": round, + "any": any, "all": all, "zip": zip, "map": map, "filter": filter, + "reversed": reversed, "True": True, "False": False, "None": None, + "ValueError": ValueError, "PermissionError": PermissionError, + "FileNotFoundError": FileNotFoundError, "Exception": Exception, +} + +code = sys.stdin.read() +exec(code, {"__builtins__": _safe_builtins, + "read_style": read_style, "write_style": write_style}) +''' + + +def make_style_runner() -> str: + """Return the style runner script. Expects sys.argv[1]=user_styles_dir, sys.argv[2]=styles_dirs JSON.""" + return _RUNNER_STYLE diff --git a/mcp-local/server.py b/mcp-local/server.py index 6859355c..5196ed51 100644 --- a/mcp-local/server.py +++ b/mcp-local/server.py @@ -214,24 +214,24 @@ def search_assets( @mcp.tool() def list_templates() -> str: - """List all available PPTX templates with name. + """List all available PPTX templates with metadata. Includes user-local templates (via $SDPM_TEMPLATES_DIR or ~/.config/sdpm/templates/) in addition to the package-bundled ones. User-local templates shadow bundled templates with the same stem. Returns: - JSON with list of template names. + JSON with list of templates including name, source, description, fonts, layout_count. """ - from sdpm.api import get_templates_dirs + from sdpm.api import get_templates_dirs, list_templates_with_metadata + from sdpm.config import get_state - seen: dict[str, str] = {} - for d in get_templates_dirs(): - if not d.exists(): - continue - for t in sorted(d.glob("*.pptx")): - seen.setdefault(t.stem, t.stem) - return json.dumps({"templates": sorted(seen)}) + templates_dirs = get_templates_dirs() + metadata = get_state().get("template_metadata", {}) + return json.dumps( + {"templates": list_templates_with_metadata(templates_dirs, metadata)}, + ensure_ascii=False, + ) @mcp.tool() @@ -245,15 +245,15 @@ def list_asset_sources() -> str: @mcp.tool() -def list_styles() -> str: +def list_styles(include_all: bool = False) -> str: """List available design styles for presentations. - Workflow equivalent: ``examples styles`` + Default returns pinned + user styles only. Pass include_all=True for all. Returns: - JSON with list of styles (name + description). + JSON with list of styles (name, description, pinned, source). """ - return json.dumps(_list_styles(skill_dir=_SKILL_DIR), ensure_ascii=False) + return json.dumps(_list_styles(skill_dir=_SKILL_DIR, include_all=include_all), ensure_ascii=False) @mcp.tool() diff --git a/mcp-local/server_acp.py b/mcp-local/server_acp.py index aba97f6e..25ccaed1 100644 --- a/mcp-local/server_acp.py +++ b/mcp-local/server_acp.py @@ -41,15 +41,16 @@ @mcp.tool() -def list_styles() -> str: +def list_styles(include_all: bool = False) -> str: """List available design styles for presentations. + Default returns pinned + user styles only. Pass include_all=True for all. + Returns: - JSON with list of styles (name + description). + JSON with list of styles (name, description, pinned, source). """ - from sdpm.reference import list_styles as _list_styles - styles_dir = _SKILL_DIR / "references" / "examples" / "styles" - return json.dumps({"styles": _list_styles(styles_dir)}, ensure_ascii=False) + from tools import list_styles as _list_styles + return json.dumps(_list_styles(skill_dir=_SKILL_DIR, include_all=include_all), ensure_ascii=False) # --------------------------------------------------------------------------- @@ -233,7 +234,8 @@ def init_presentation(name: str, template: str = "") -> str: from datetime import datetime from sdpm.analyzer import extract_fonts - base_dir = Path(os.environ.get("SDPM_DECK_ROOT", "")) or Path.home() / "Documents" / "SDPM-Presentations" + root = os.environ.get("SDPM_DECK_ROOT", "") + base_dir = Path(root) if root else Path.home() / "Documents" / "SDPM-Presentations" ts = datetime.now().strftime("%Y%m%d-%H%M") dir_name = f"{ts}-{name}" if name else ts out_dir = base_dir / dir_name @@ -328,10 +330,11 @@ def apply_style(deck_id: str, style: str) -> str: JSON with status and the copied file path. """ import shutil - styles_dir = _SKILL_DIR / "references" / "examples" / "styles" - src = styles_dir / f"{style}.html" - if not src.exists(): - return json.dumps({"error": f"Style not found: {style}. Available: {[s.stem for s in styles_dir.glob('*.html')]}"}) + from sdpm.api import get_styles_dirs, _find_style_in_dirs + src = _find_style_in_dirs(style, get_styles_dirs()) + if src is None: + available = [p.stem for d in get_styles_dirs() if d.is_dir() for p in d.glob("*.html")] + return json.dumps({"error": f"Style not found: {style}. Available: {sorted(set(available))}"}) deck_path = Path(deck_id) if not deck_path.is_dir(): return json.dumps({"error": f"Deck directory not found: {deck_id}"}) @@ -363,8 +366,8 @@ def _rejection_message(violations: list[str], has_deck: bool) -> str: @mcp.tool() -def run_python(code: str, deck_id: str = "", save: bool = False, - measure_slides: list[str] | None = None, purpose: str = "") -> str: +def run_python(purpose: str, code: str, deck_id: str = "", save: bool = False, + measure_slides: list[str] | None = None) -> str: """Execute Python code in a sandboxed environment. Code runs in a restricted subprocess. `import` statements and direct file @@ -425,11 +428,11 @@ def run_python(code: str, deck_id: str = "", save: bool = False, **Always specify measure_slides when editing slides.** Args: + purpose: Brief user-facing description of what this code does. Shown in UI. code: Python code to execute (no import statements allowed). deck_id: Deck output_dir path. Optional. save: When True, triggers PPTX build + preview + SVG compose after execution. measure_slides: Slide slugs to measure after execution (e.g. ["title", "feature-a"]). - purpose: Brief description shown in UI. Returns: JSON: {"output", "measure"?, "pptx"?, "preview"?, "compose"?} @@ -484,6 +487,31 @@ def run_python(code: str, deck_id: str = "", save: bool = False, "Read workflow `create-new-1-outline` for the correct format." ) + # Lint and sanitize slide JSON (pre-save: before measure/build) + from sdpm.schema.lint import lint_and_sanitize + + slides_dir = deck_dir / "slides" + if slides_dir.is_dir(): + lint_diagnostics: list[dict] = [] + for slide_file in sorted(slides_dir.glob("*.json")): + try: + slide_data = json.loads(slide_file.read_text(encoding="utf-8")) + cleaned, diags = lint_and_sanitize(slide_data) + if diags: + slug = slide_file.stem + for d in diags: + d["slug"] = slug + lint_diagnostics.extend(diags) + slide_file.write_text( + json.dumps(cleaned, ensure_ascii=False, indent=2) + "\n", + encoding="utf-8", + ) + except (json.JSONDecodeError, TypeError): + pass + if lint_diagnostics: + errs = result.setdefault("errors", {}) + errs["lintDiagnostics"] = lint_diagnostics + # Build slug → page number mapping from outline.md (for slug-based measure_slides) def _slug_to_page() -> dict[str, int]: from sdpm.api import parse_outline_slugs @@ -686,33 +714,144 @@ def _fp(c: dict) -> str: # --------------------------------------------------------------------------- -# Upload / attachment tools (Local version) +# grid: compute CSS Grid layout coordinates # --------------------------------------------------------------------------- -from upload_tools import ( # noqa: E402 - read_uploaded_file as _read_uploaded_file, - import_attachment as _import_attachment, - cleanup_old_sessions as _cleanup_old_sessions, -) @mcp.tool() -def read_uploaded_file(upload_id: str, offset: int = 0, limit: int = 2000) -> list: - """Read the content of a file uploaded by the user. +def grid(purpose: str, spec: str) -> str: + """Compute CSS Grid layout coordinates from a grid specification. + Use before placing elements to calculate exact positions. + + Args: + purpose: Brief user-facing description (e.g. '3-column icon layout'). Shown in UI. + spec: JSON string with grid spec. Keys: + area: {"x", "y", "w", "h"} (required) + columns: track-list string, e.g. "1fr 2fr" (default "1fr") + rows: track-list string (default "1fr") + gap: str or int, e.g. "20" or "20 40" (row-gap col-gap) + areas: 2D list of area names (optional) + items: dict of item overrides (optional) + + Returns: + JSON with named rectangles containing x, y, w, h coordinates. + """ + from sdpm.layout.grid import compute_grid + + try: + grid_spec = json.loads(spec) + except (json.JSONDecodeError, TypeError) as e: + return json.dumps({"error": f"Invalid grid spec JSON: {e}"}) + result = compute_grid(grid_spec) + return json.dumps(result, ensure_ascii=False, indent=2) + + +# --------------------------------------------------------------------------- +# run_style_python: sandboxed execution for style creation/editing +# --------------------------------------------------------------------------- + + +@mcp.tool() +def run_style_python(purpose: str, code: str) -> str: + """Execute Python code in a sandboxed environment for style creation. + + ## Sandbox functions - Files are pre-processed at upload time (converted to Markdown/JSON for docs, - stored as-is for images/text). Output uses cat -n format (line numbers) for - citation and navigation. No deck_id required — works during hearing. + read_style(name) → str Read an existing style HTML (builtin or user) + write_style(name, html) → None Save HTML to user styles directory + + ## Rules + + - `name` is the file stem without .html (e.g. "corporate-executive", "style-20260505-1430") + - No import statements or direct file access allowed + - Use print() for computation output + + ## Examples + + # Read an existing style for reference + html = read_style("corporate-executive") + print(html[:200]) + + # Create a new style + html = ''' + My Custom Style + ...''' + write_style("style-20260505-1430", html) + + # Edit an existing user style + html = read_style("style-20260505-1430") + html = html.replace("old color", "new color") + write_style("style-20260505-1430", html) Args: - upload_id: The upload identifier from the [Attached: ...] message - (format: "sessionId/shortId_filename"). - offset: Starting line number (0-indexed). Default 0. - limit: Number of lines to read. Default 2000. + purpose: Brief user-facing description of what this code does. Shown in UI. + code: Python code to execute (no import statements allowed). Returns: - Text content with line numbers and/or image previews. + JSON: {"output", "saved"?} """ - return _read_uploaded_file(upload_id=upload_id, offset=offset, limit=limit) + from sandbox import check_code, make_style_runner + + result: dict = {} + + # AST inspection + violations = check_code(code) + if violations: + lines = ["Code rejected by sandbox:"] + lines.extend(f" {v}" for v in violations) + lines.append("") + lines.append("Use sandbox functions instead:") + lines.append(" read_style(name) → str (read existing style HTML)") + lines.append(" write_style(name, html) → None (save to user styles)") + result["output"] = "\n".join(lines) + return json.dumps(result, ensure_ascii=False) + + from sdpm.config import get_user_config_dir + from sdpm.api import get_styles_dirs + + user_styles_dir = str(get_user_config_dir() / "styles") + styles_dirs_json = json.dumps([str(d) for d in get_styles_dirs()]) + + try: + runner = make_style_runner() + proc = subprocess.run( + [sys.executable, "-c", runner, user_styles_dir, styles_dirs_json], + input=code, capture_output=True, text=True, timeout=120, + ) + output = proc.stdout + stderr = proc.stderr or "" + + # Extract save signal from stderr + save_lines = [] + other_stderr = [] + for line in stderr.splitlines(): + if line.startswith("__STYLE_SAVED__"): + save_lines.append(line[len("__STYLE_SAVED__"):]) + else: + other_stderr.append(line) + + if other_stderr: + output += "\n" + "\n".join(other_stderr) + result["output"] = output.strip() + + if save_lines: + result["saved"] = json.loads(save_lines[-1]) + + except subprocess.TimeoutExpired: + result["output"] = "Error: execution timed out (120s)" + except Exception as e: + result["output"] = f"Error: {e}" + + return json.dumps(result, ensure_ascii=False) + + +# --------------------------------------------------------------------------- +# Upload / attachment tools (Local version) +# --------------------------------------------------------------------------- +from upload_tools import ( # noqa: E402 + import_attachment as _import_attachment, + cleanup_old_sessions as _cleanup_old_sessions, +) @mcp.tool() diff --git a/mcp-local/tools.py b/mcp-local/tools.py index 6f153d30..98821a33 100644 --- a/mcp-local/tools.py +++ b/mcp-local/tools.py @@ -100,18 +100,21 @@ def list_asset_sources(skill_dir: Path) -> dict[str, Any]: return {"sources": list_sources()} -def list_styles(skill_dir: Path) -> dict[str, Any]: - """List available design styles and open gallery in browser. +def list_styles(skill_dir: Path, include_all: bool = False) -> dict[str, Any]: + """List available design styles with pin/source metadata. Searches user-local styles directory (``~/.config/sdpm/styles/``) in addition to the package-bundled styles. User-local entries shadow bundled ones with the same name. + + Default returns pinned + user styles only (or all if no pins). + Pass include_all=True to return everything. """ - from sdpm.api import get_styles_dirs - from sdpm.reference import list_styles_merged, open_styles_gallery + from sdpm.api import get_styles_dirs, list_styles_filtered + from sdpm.config import get_state styles_dirs = get_styles_dirs() - open_styles_gallery(styles_dirs) - return {"styles": list_styles_merged(styles_dirs)} + pinned = get_state().get("pinned_styles", []) + return {"styles": list_styles_filtered(styles_dirs, pinned, include_all)} def read_examples(names: list[str], skill_dir: Path) -> dict[str, Any]: diff --git a/mcp-local/upload_tools.py b/mcp-local/upload_tools.py index 5623c13c..44ccfee9 100644 --- a/mcp-local/upload_tools.py +++ b/mcp-local/upload_tools.py @@ -40,7 +40,8 @@ def _deck_root() -> Path: - return Path(os.environ.get("SDPM_DECK_ROOT", "")) or Path.home() / "Documents" / "SDPM-Presentations" + root = os.environ.get("SDPM_DECK_ROOT", "") + return Path(root) if root else Path.home() / "Documents" / "SDPM-Presentations" def _session_dir(session_id: str) -> Path: @@ -88,6 +89,39 @@ def cleanup_old_sessions(ttl_days: int = _SESSION_TTL_DAYS) -> int: return count +def _analyze_colors(file_path: Path) -> dict | None: + """Analyze dominant colors of an image. Returns None on failure.""" + try: + img = PILImage.open(file_path).convert("RGB") + small = img.resize((100, 100)) + pixels = list(small.getdata()) + + # Brightness (luminance average) + lum_sum = sum(r * 0.299 + g * 0.587 + b * 0.114 for r, g, b in pixels) + lum_avg = lum_sum / len(pixels) + brightness = "dark" if lum_avg < 100 else "light" if lum_avg > 155 else "mixed" + + # Saturation (HSV S channel average) + hsv = img.resize((100, 100)).convert("HSV") + hsv_pixels = list(hsv.getdata()) + s_avg = sum(s for _, s, _ in hsv_pixels) / len(hsv_pixels) + saturation = "monochrome" if s_avg < 30 else "muted" if s_avg < 100 else "vivid" + + # Palette: quantize to 5 colors + quantized = small.quantize(colors=5, method=PILImage.Quantize.MEDIANCUT) + palette_data = quantized.getpalette() + color_counts = sorted(quantized.getcolors(), reverse=True) + total = sum(count for count, _ in color_counts) + palette = [] + for count, idx in color_counts[:5]: + r, g, b = palette_data[idx * 3], palette_data[idx * 3 + 1], palette_data[idx * 3 + 2] + palette.append({"hex": f"#{r:02X}{g:02X}{b:02X}", "ratio": round(count / total, 2)}) + + return {"palette": palette, "brightness": brightness, "saturation": saturation} + except Exception: + return None + + def upload_file(session_id: str, file_path: str, filename: str = "") -> str: """Convert and store a file in session storage. @@ -97,7 +131,7 @@ def upload_file(session_id: str, file_path: str, filename: str = "") -> str: filename: Original filename (defaults to basename of file_path). Returns: - JSON with {uploadId, fileName, fileType, status, warnings?}. + JSON with {uploadId, fileName, fileType, status, filePath?, colorAnalysis?}. """ src = Path(file_path) if not src.exists(): @@ -140,7 +174,7 @@ def upload_file(session_id: str, file_path: str, filename: str = "") -> str: (upload_dir / "meta.json").write_text(json.dumps(meta, ensure_ascii=False), encoding="utf-8") upload_id = f"{session_id}/{short_id}_{original_name}" - response = { + response: dict = { "uploadId": upload_id, "fileName": original_name, "fileType": file_type, @@ -150,6 +184,28 @@ def upload_file(session_id: str, file_path: str, filename: str = "") -> str: response["warnings"] = meta["warnings"] if meta.get("error"): response["error"] = meta["error"] + + # Add filePath for direct agent access (Local/ACP only) + if meta["status"] == "completed": + response["filePath"] = str((upload_dir / original_name).resolve()) + elif meta["status"] == "converted": + stem = original_name.rsplit(".", 1)[0] if "." in original_name else original_name + md_path = upload_dir / f"{stem}.md" + json_path = upload_dir / "slides.json" + if md_path.exists(): + response["filePath"] = str(md_path.resolve()) + elif json_path.exists(): + response["filePath"] = str(json_path.resolve()) + images_dir = upload_dir / "images" + if images_dir.exists(): + response["imagesDir"] = str(images_dir.resolve()) + + # Color analysis for images + if file_type.startswith("image/") and meta["status"] == "completed": + colors = _analyze_colors(upload_dir / original_name) + if colors: + response["colorAnalysis"] = colors + return json.dumps(response, ensure_ascii=False) diff --git a/mcp-server/server.py b/mcp-server/server.py index 63716000..ec2f88b5 100644 --- a/mcp-server/server.py +++ b/mcp-server/server.py @@ -220,7 +220,7 @@ def analyze_template(template: str) -> str: if not template or not template.strip(): return json.dumps({"error": "template is required"}) return json.dumps( - template_mod.analyze_template(template_name=template, storage=_storage), + template_mod.analyze_template(template_name=template, storage=_storage, user_id=_get_user_id()), ensure_ascii=False, ) @@ -433,22 +433,26 @@ def list_asset_sources() -> str: @mcp.tool() -def list_styles() -> str: +def list_styles(include_all: bool = False) -> str: """List available design styles for presentations. - Workflow equivalent: ``examples styles`` + Default returns pinned + user styles only. Pass include_all=True for all. Returns: - JSON with list of styles (name + description). + JSON with list of styles (name, description, pinned, source). """ - return json.dumps(reference.list_styles(storage=_storage), ensure_ascii=False) + user_id = _get_user_id() + return json.dumps( + reference.list_styles(storage=_storage, user_id=user_id, include_all=include_all), + ensure_ascii=False, + ) @mcp.tool() def apply_style(deck_id: str, style: str) -> str: """Copy a style as the deck's art direction. Call during Art Direction phase. - Copies references/examples/styles/{style}.html → specs/art-direction.html. + Searches user styles first, then builtin styles. Args: deck_id: Deck ID. @@ -460,8 +464,22 @@ def apply_style(deck_id: str, style: str) -> str: _check_deck_access(deck_id) if not re.fullmatch(r"[a-zA-Z0-9_-]+", style): raise ValueError("Invalid style name") - html_key = f"references/examples/styles/{style}.html" - html_bytes = _storage.download_file(key=html_key) + + user_id = _get_user_id() + html_bytes = None + + # Try user style first + user_key = f"user-styles/{user_id}/{style}.html" + try: + html_bytes = _storage.download_file_from_pptx_bucket(key=user_key) + except Exception: + pass + + # Fall back to builtin + if html_bytes is None: + builtin_key = f"references/examples/styles/{style}.html" + html_bytes = _storage.download_file(key=builtin_key) + dest_key = f"decks/{deck_id}/specs/art-direction.html" _storage.upload_file(key=dest_key, data=html_bytes, content_type="text/html") return json.dumps({"applied": style, "path": "specs/art-direction.html"}) @@ -540,13 +558,13 @@ def read_guides(names: list[str]) -> str: @mcp.tool() def list_templates() -> str: - """List all available templates with name and description. + """List all available templates with name, source, and description. Returns: JSON with list of templates. """ return json.dumps( - template_mod.list_templates(storage=_storage), + template_mod.list_templates(storage=_storage, user_id=_get_user_id()), ) @@ -644,7 +662,7 @@ def run_python(purpose: str, code: str, deck_id: str | None = None, save: bool = if deck_id: _check_deck_access(deck_id, action="edit_slide" if save else "read") - output, outline_rejected = sandbox_mod.execute_in_sandbox( + output, outline_rejected, lint_diagnostics = sandbox_mod.execute_in_sandbox( code=code, storage=_storage, region=_region, @@ -662,7 +680,11 @@ def run_python(purpose: str, code: str, deck_id: str | None = None, save: bool = "Read workflow `create-new-1-outline` for the correct format." ) - # Post-processing: measure_slides triggers PPTX build → measure/lint/bias + if lint_diagnostics: + errs = result.setdefault("errors", {}) + errs["lintDiagnostics"] = lint_diagnostics + + # Post-processing: measure_slides triggers PPTX build → measure/bias if deck_id and (measure_slides or save): import shutil import traceback @@ -695,21 +717,10 @@ def run_python(purpose: str, code: str, deck_id: str | None = None, save: bool = except Exception as e: result["measure"] = json.dumps({"error": str(e)}) - # Lint (filter to measured slugs) - try: - from sdpm.schema.lint import lint as lint_slides - presentation = {"slides": slides} - page_set = set(page_numbers) - lint_diag = [d for d in lint_slides(presentation) if d.get("slide") + 1 in page_set] - if lint_diag: - result["errors"] = {"lintDiagnostics": lint_diag} - except Exception as e: - logger.warning("Lint failed: %s", e) - # Layout bias (filter to measured slides; bias uses 1-based) try: from sdpm.preview import check_layout_imbalance_data - layout_bias = [b for b in check_layout_imbalance_data(pptx_path, slide_defs=slides) if b.get("slide") in page_set] + layout_bias = [b for b in check_layout_imbalance_data(pptx_path, slide_defs=slides) if b.get("slide") in set(page_numbers)] if layout_bias: result["warnings"] = {"layoutBias": layout_bias} except Exception as e: @@ -933,6 +944,145 @@ def grid(spec: str, purpose: str = "") -> str: return json.dumps(result, ensure_ascii=False, indent=2) +# --- Style Execution (Code Interpreter) --- + + +@mcp.tool() +def run_style_python(purpose: str, code: str, style_name: str | None = None, + save: bool = False, ref_styles: list[str] | None = None) -> str: + """Execute Python code in a secure sandbox for style creation/editing. + + If style_name is provided, the style HTML is loaded as style.html. + The code can read/write it via normal file I/O (open, read, write). + If save=True, style.html is written back to the user's style storage. + + If ref_styles are provided, they are downloaded and available as ref/{name}.html. + Use list_styles to discover available style names. + + Import statements are allowed — PIL, colorsys, numpy, etc. are available + for color computation, palette extraction, and contrast calculation. + + Workspace layout: + style.html — target style (read/write, saved back when save=True) + ref/{name}.html — reference styles (read-only) + + Examples: + Read reference: run_style_python(code="html = open('ref/corporate-executive.html').read(); print(html[:200])", + ref_styles=["corporate-executive"]) + Create new: run_style_python(code="open('style.html','w').write('...')", + style_name="style-20260506-1430", save=True) + Edit existing: run_style_python(code="html = open('style.html').read(); html = html.replace('old','new'); open('style.html','w').write(html)", + style_name="style-20260506-1430", save=True) + Compute colors: run_style_python(code="from colorsys import rgb_to_hls; print(rgb_to_hls(0.2, 0.4, 0.6))") + + Args: + purpose: Brief user-facing description of what this code does, + written in the user's language. Shown in the UI. + code: Python code to execute. + style_name: Style name to load as style.html. Optional. + save: If True, save style.html back to storage. Requires style_name. + ref_styles: Style names to load as ref/{name}.html. Optional. + + Returns: + JSON string: {"output", "saved"?} + """ + if save and not style_name: + return json.dumps({"error": "save=True requires style_name"}) + + user_id = _get_user_id() + + client = boto3.client("bedrock-agentcore", region_name=_region) + session = client.start_code_interpreter_session( + codeInterpreterIdentifier="aws.codeinterpreter.v1", + name=f"style-{user_id[:8]}", + sessionTimeoutSeconds=300, + ) + session_id = session["sessionId"] + + try: + file_contents: list[dict[str, str]] = [] + + # Load target style + if style_name: + html = _load_style_html(user_id, style_name) + if html: + file_contents.append({"path": "style.html", "text": html}) + + # Load reference styles + if ref_styles: + for ref_name in ref_styles: + ref_html = _load_style_html(user_id, ref_name) + if ref_html: + file_contents.append({"path": f"ref/{ref_name}.html", "text": ref_html}) + + # Ensure directories exist + setup_code = "import os\nos.makedirs('ref', exist_ok=True)\n" + client.invoke_code_interpreter( + codeInterpreterIdentifier="aws.codeinterpreter.v1", + sessionId=session_id, name="executeCode", + arguments={"language": "python", "code": setup_code}, + ) + + # Write files into sandbox + if file_contents: + client.invoke_code_interpreter( + codeInterpreterIdentifier="aws.codeinterpreter.v1", + sessionId=session_id, name="writeFiles", + arguments={"content": file_contents}, + ) + + # Execute user code + response = client.invoke_code_interpreter( + codeInterpreterIdentifier="aws.codeinterpreter.v1", + sessionId=session_id, name="executeCode", + arguments={"language": "python", "code": code}, + ) + output = sandbox_mod._collect_stream(response) + + result: dict = {"output": output} + + # Save style.html back to S3 + if save and style_name: + read_code = "import sys\ntry:\n print(open('style.html').read())\nexcept FileNotFoundError:\n print('__NOT_FOUND__')\n" + read_resp = client.invoke_code_interpreter( + codeInterpreterIdentifier="aws.codeinterpreter.v1", + sessionId=session_id, name="executeCode", + arguments={"language": "python", "code": read_code}, + ) + style_html = sandbox_mod._collect_stream(read_resp) + if style_html and style_html.strip() != "__NOT_FOUND__": + key = f"user-styles/{user_id}/{style_name}.html" + _storage.upload_file(key=key, data=style_html.encode("utf-8"), content_type="text/html") + result["saved"] = {"filename": f"{style_name}.html", "key": key} + + return json.dumps(result, ensure_ascii=False) + + finally: + client.stop_code_interpreter_session( + codeInterpreterIdentifier="aws.codeinterpreter.v1", + sessionId=session_id, + ) + + +def _load_style_html(user_id: str, name: str) -> str | None: + """Load style HTML from S3 (user styles first, then builtin).""" + if not re.fullmatch(r"[a-zA-Z0-9_-]+", name): + return None + # Try user style + user_key = f"user-styles/{user_id}/{name}.html" + try: + return _storage.download_file_from_pptx_bucket(key=user_key).decode("utf-8") + except Exception: + pass + # Try builtin + builtin_key = f"references/examples/styles/{name}.html" + try: + return _storage.download_file(key=builtin_key).decode("utf-8") + except Exception: + pass + return None + + # --- Search + KB Sync (optional, requires KB) --- _kb_sync = None diff --git a/mcp-server/storage/__init__.py b/mcp-server/storage/__init__.py index 96c4d793..8af5120f 100644 --- a/mcp-server/storage/__init__.py +++ b/mcp-server/storage/__init__.py @@ -178,3 +178,102 @@ def deck_exists(self, deck_id: str, user_id: str) -> bool: True if deck exists and is owned by user. """ return self.get_deck(deck_id, user_id) is not None + + # --- Style Pins --- + + @abstractmethod + def get_style_pins(self, user_id: str) -> list[str]: + """Get pinned style names for a user. + + Args: + user_id: User identifier. + + Returns: + List of pinned style names. + """ + + @abstractmethod + def put_style_pins(self, user_id: str, pins: list[str]) -> None: + """Save pinned style names for a user. + + Args: + user_id: User identifier. + pins: List of style names to pin. + """ + + # --- User Templates --- + + @abstractmethod + def list_user_templates(self, user_id: str) -> list[dict]: + """List user-uploaded templates metadata from DDB. + + Args: + user_id: User identifier. + + Returns: + List of template metadata dicts. + """ + + @abstractmethod + def get_user_template_metadata(self, user_id: str, name: str) -> dict | None: + """Get metadata for a single user template. + + Args: + user_id: User identifier. + name: Template name (stem). + + Returns: + Metadata dict or None if not found. + """ + + @abstractmethod + def put_user_template(self, user_id: str, name: str, data: bytes, metadata: dict) -> None: + """Upload a user template file and store metadata. + + Args: + user_id: User identifier. + name: Template name (stem). + data: .pptx file bytes. + metadata: Analysis metadata (description, theme_colors, fonts, etc.). + """ + + @abstractmethod + def delete_user_template(self, user_id: str, name: str) -> None: + """Delete a user template (file + metadata). + + Args: + user_id: User identifier. + name: Template name (stem). + """ + + @abstractmethod + def download_user_template(self, user_id: str, name: str) -> bytes: + """Download a user template .pptx file. + + Args: + user_id: User identifier. + name: Template name (stem). + + Returns: + File content as bytes. + """ + + @abstractmethod + def rename_user_template(self, user_id: str, old_name: str, new_name: str) -> None: + """Rename a user template (file + metadata). + + Args: + user_id: User identifier. + old_name: Current template name. + new_name: New template name. + """ + + @abstractmethod + def update_user_template_metadata(self, user_id: str, name: str, updates: dict) -> None: + """Update specific fields in user template metadata. + + Args: + user_id: User identifier. + name: Template name. + updates: Fields to update (e.g. {"description": "new desc"}). + """ diff --git a/mcp-server/storage/aws.py b/mcp-server/storage/aws.py index 3baefefd..27f4d77d 100644 --- a/mcp-server/storage/aws.py +++ b/mcp-server/storage/aws.py @@ -170,12 +170,40 @@ def put_slide_json(self, deck_id: str, slug: str, data: dict) -> None: # --- Template --- def list_templates(self) -> list[dict]: - """List all templates from DDB.""" - resp = self._table.scan( - FilterExpression="begins_with(PK, :prefix) AND SK = :sk", - ExpressionAttributeValues={":prefix": "TEMPLATE#", ":sk": "META"}, + """List builtin templates. S3 is source of truth for existence, DDB is metadata cache.""" + # S3: what exists + resp = self._s3.list_objects_v2(Bucket=self._resource_bucket, Prefix="templates/") + s3_templates = {} + for obj in resp.get("Contents", []): + key = obj["Key"] + if key.endswith(".pptx"): + name = key.removeprefix("templates/").removesuffix(".pptx") + s3_templates[name] = {"s3Key": key, "s3ETag": obj["ETag"]} + + if not s3_templates: + return [] + + # DDB: cached metadata + keys = [{"PK": f"TEMPLATE#{n}", "SK": "META"} for n in s3_templates] + ddb_resp = self._table.meta.client.batch_get_item( + RequestItems={self._table.name: {"Keys": keys}} ) - return resp.get("Items", []) + ddb_cache = {} + for item in ddb_resp.get("Responses", {}).get(self._table.name, []): + ddb_cache[item["name"]] = item + + # Merge: S3 existence + DDB metadata + results = [] + for name, s3_info in s3_templates.items(): + cached = ddb_cache.get(name, {}) + results.append({ + "name": name, + "s3Key": s3_info["s3Key"], + "description": cached.get("description", ""), + "fonts": cached.get("fonts", {}), + "analysisJson": cached.get("analysisJson", "{}") if cached.get("s3ETag") == s3_info["s3ETag"] else "{}", + }) + return results # --- File I/O --- @@ -231,6 +259,100 @@ def list_files(self, prefix: str, bucket: str = "") -> list[str]: keys.append(obj["Key"]) return sorted(keys) + # --- Style Pins --- + + def get_style_pins(self, user_id: str) -> list[str]: + """Get pinned style names from DDB.""" + resp = self._table.get_item(Key={"PK": f"USER#{user_id}", "SK": "STYLE_PINS"}) + item = resp.get("Item") + if not item: + return [] + return item.get("pinned_styles", []) + + def put_style_pins(self, user_id: str, pins: list[str]) -> None: + """Save pinned style names to DDB.""" + self._table.put_item(Item={ + "PK": f"USER#{user_id}", + "SK": "STYLE_PINS", + "pinned_styles": pins, + }) + + # --- User Templates --- + + def list_user_templates(self, user_id: str) -> list[dict]: + """List user templates from DDB.""" + resp = self._table.query( + KeyConditionExpression="PK = :pk AND begins_with(SK, :prefix)", + ExpressionAttributeValues={":pk": f"USER#{user_id}", ":prefix": "TEMPLATE#"}, + ) + return resp.get("Items", []) + + def get_user_template_metadata(self, user_id: str, name: str) -> dict | None: + """Get single user template metadata.""" + resp = self._table.get_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}) + return resp.get("Item") + + def put_user_template(self, user_id: str, name: str, data: bytes, metadata: dict) -> None: + """Upload user template to S3 and metadata to DDB.""" + s3_key = f"user-templates/{user_id}/{name}.pptx" + self._s3.put_object(Bucket=self._pptx_bucket, Key=s3_key, Body=data) + self._table.put_item(Item={ + "PK": f"USER#{user_id}", + "SK": f"TEMPLATE#{name}", + "name": name, + "s3Key": s3_key, + **metadata, + }) + + def delete_user_template(self, user_id: str, name: str) -> None: + """Delete user template from S3 and DDB.""" + s3_key = f"user-templates/{user_id}/{name}.pptx" + self._s3.delete_object(Bucket=self._pptx_bucket, Key=s3_key) + self._table.delete_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}) + + def download_user_template(self, user_id: str, name: str) -> bytes: + """Download user template from S3.""" + s3_key = f"user-templates/{user_id}/{name}.pptx" + resp = self._s3.get_object(Bucket=self._pptx_bucket, Key=s3_key) + return resp["Body"].read() + + def rename_user_template(self, user_id: str, old_name: str, new_name: str) -> None: + """Rename user template (copy S3 + update DDB + delete old).""" + old_key = f"user-templates/{user_id}/{old_name}.pptx" + new_key = f"user-templates/{user_id}/{new_name}.pptx" + # Copy S3 object + self._s3.copy_object( + Bucket=self._pptx_bucket, + CopySource={"Bucket": self._pptx_bucket, "Key": old_key}, + Key=new_key, + ) + self._s3.delete_object(Bucket=self._pptx_bucket, Key=old_key) + # Move DDB item + old_item = self._table.get_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{old_name}"}).get("Item", {}) + self._table.delete_item(Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{old_name}"}) + old_item["SK"] = f"TEMPLATE#{new_name}" + old_item["name"] = new_name + old_item["s3Key"] = new_key + self._table.put_item(Item=old_item) + + def update_user_template_metadata(self, user_id: str, name: str, updates: dict) -> None: + """Update fields in user template DDB item.""" + expr_parts = [] + attr_values = {} + attr_names = {} + for i, (k, v) in enumerate(updates.items()): + alias = f"#k{i}" + val_alias = f":v{i}" + expr_parts.append(f"{alias} = {val_alias}") + attr_names[alias] = k + attr_values[val_alias] = v + self._table.update_item( + Key={"PK": f"USER#{user_id}", "SK": f"TEMPLATE#{name}"}, + UpdateExpression="SET " + ", ".join(expr_parts), + ExpressionAttributeNames=attr_names, + ExpressionAttributeValues=attr_values, + ) + # --- Auth --- # deck_exists is inherited from Storage base class diff --git a/mcp-server/tools/reference.py b/mcp-server/tools/reference.py index 4d0138f6..6b9d2838 100644 --- a/mcp-server/tools/reference.py +++ b/mcp-server/tools/reference.py @@ -200,41 +200,75 @@ def _render_pptx_from_bytes(pptx_bytes: bytes, pages: list[int] | None = None) - tmp_path.unlink(missing_ok=True) -def list_styles(storage: Storage) -> dict[str, Any]: - """List available design styles from references/examples/styles/. +def list_styles(storage: Storage, user_id: str = "", include_all: bool = False) -> dict[str, Any]: + """List available design styles with pin/source metadata. - Extracts name and description from each HTML file's tag. + Combines builtin styles (references/examples/styles/) and user styles + (user-styles/{user_id}/). Uses Engine filter_styles() for filtering. Args: storage: Storage backend instance. + user_id: User ID for fetching user styles and pins. Empty = builtin only. + include_all: If True, return all styles. If False, return pinned + user only + (falls back to all if no pins exist). Returns: - Dict with styles list (name + description). + Dict with styles list (name, description, pinned, source). """ + from sdpm.reference import filter_styles + + # 1. Builtin styles from resource bucket cache_key = "list:styles" cached = _cache.get(cache_key) if cached and (time.time() - cached[1]) < CACHE_TTL: - return {"styles": cached[0]} - - prefix = "references/examples/styles/" - files = storage.list_files(prefix=prefix) - styles: list[dict[str, str]] = [] - for f in files: - if not f.endswith(".html"): - continue - name = f.removeprefix(prefix).removesuffix(".html") - description = "" - try: - content = storage.download_file(key=f).decode("utf-8") - m = re.search(r"<title>(.*?)", content, re.IGNORECASE) - if m: - description = m.group(1).strip() - except Exception: - pass - styles.append({"name": name, "description": description}) - - _cache[cache_key] = (styles, time.time()) - return {"styles": styles} + builtin_styles = cached[0] + else: + prefix = "references/examples/styles/" + files = storage.list_files(prefix=prefix) + builtin_styles: list[dict[str, str]] = [] + for f in files: + if not f.endswith(".html"): + continue + name = f.removeprefix(prefix).removesuffix(".html") + description = "" + try: + content = storage.download_file(key=f).decode("utf-8") + m = re.search(r"(.*?)", content, re.IGNORECASE) + if m: + description = m.group(1).strip() + except Exception: + pass + builtin_styles.append({"name": name, "description": description, "source": "builtin"}) + _cache[cache_key] = (builtin_styles, time.time()) + + # 2. User styles from pptx bucket + user_styles: list[dict[str, str]] = [] + if user_id: + user_prefix = f"user-styles/{user_id}/" + user_files = storage.list_files(prefix=user_prefix, bucket=storage.pptx_bucket) + for f in user_files: + if not f.endswith(".html"): + continue + name = f.removeprefix(user_prefix).removesuffix(".html") + description = "" + try: + content = storage.download_file_from_pptx_bucket(key=f).decode("utf-8") + m = re.search(r"(.*?)", content, re.IGNORECASE) + if m: + description = m.group(1).strip() + except Exception: + pass + user_styles.append({"name": name, "description": description, "source": "user"}) + + # 3. Get pins + pinned_names: list[str] = [] + if user_id: + pinned_names = storage.get_style_pins(user_id) + + # 4. Filter via Engine + all_styles = user_styles + builtin_styles + filtered = filter_styles(all_styles, pinned_names, include_all) + return {"styles": filtered} def read_examples(names: list[str], storage: Storage) -> dict[str, Any]: diff --git a/mcp-server/tools/sandbox.py b/mcp-server/tools/sandbox.py index 3ea665be..4ae89ef6 100644 --- a/mcp-server/tools/sandbox.py +++ b/mcp-server/tools/sandbox.py @@ -106,13 +106,14 @@ def execute_in_sandbox( # Save modified workspace files back to S3 outline_rejected = False + lint_diagnostics: list[dict] = [] if save and deck_id: - outline_rejected = _save_deck_workspace( + outline_rejected, lint_diagnostics = _save_deck_workspace( client, session_id, storage, deck_id, ) logger.info("Deck workspace saved for deck %s", deck_id) - return output, outline_rejected + return output, outline_rejected, lint_diagnostics finally: client.stop_code_interpreter_session( @@ -235,6 +236,24 @@ def _save_deck_workspace( outline_rejected = True logger.warning("outline.md lint failed for deck %s — not saved", deck_id) + # Lint and sanitize slide JSON before saving + lint_diagnostics: list[dict] = [] + from sdpm.schema.lint import lint_and_sanitize + + for rel_path in list(file_map.keys()): + if rel_path.startswith("slides/") and rel_path.endswith(".json"): + try: + slide_data = json.loads(file_map[rel_path]) + cleaned, diags = lint_and_sanitize(slide_data) + if diags: + slug = rel_path.removeprefix("slides/").removesuffix(".json") + for d in diags: + d["slug"] = slug + lint_diagnostics.extend(diags) + file_map[rel_path] = json.dumps(cleaned, ensure_ascii=False) + except (json.JSONDecodeError, TypeError): + pass + # Write back to S3 prefix = f"decks/{deck_id}/" for rel_path, text in file_map.items(): @@ -245,7 +264,7 @@ def _save_deck_workspace( content_type=_content_type(rel_path), ) - return outline_rejected + return outline_rejected, lint_diagnostics def _write_files(client: Any, session_id: str, content: list[dict[str, str]]) -> None: diff --git a/mcp-server/tools/template.py b/mcp-server/tools/template.py index 90f6df1b..bd271dcc 100644 --- a/mcp-server/tools/template.py +++ b/mcp-server/tools/template.py @@ -12,34 +12,59 @@ from storage import Storage -def list_templates(storage: Storage) -> dict[str, Any]: - """List all available templates. +def list_templates(storage: Storage, user_id: str = "") -> dict[str, Any]: + """List all available templates (builtin + user). Args: storage: Storage backend instance. + user_id: User identifier (for user templates). Returns: - Dict with list of templates (name, description, isDefault). + Dict with list of templates (name, source, description, fonts, layout_count). """ - templates = storage.list_templates() - return { - "templates": [ - { + templates = [] + + # Builtin templates + for t in storage.list_templates(): + analysis = {} + raw = t.get("analysisJson", "") + if raw and raw != "{}": + analysis = json.loads(raw) if isinstance(raw, str) else raw + templates.append({ + "name": t.get("name", ""), + "source": "builtin", + "description": t.get("description", ""), + "fonts": t.get("fonts", {}), + "layout_count": len(analysis.get("layouts", [])), + }) + + # User templates + if user_id: + for t in storage.list_user_templates(user_id): + analysis = {} + raw = t.get("analysisJson", "") + if raw and raw != "{}": + analysis = json.loads(raw) if isinstance(raw, str) else raw + templates.append({ "name": t.get("name", ""), + "source": "user", "description": t.get("description", ""), - "isDefault": t.get("isDefault", False), - } - for t in templates - ] - } + "fonts": t.get("fonts", {}), + "layout_count": len(analysis.get("layouts", [])), + }) + return {"templates": templates} -def analyze_template(template_name: str, storage: Storage) -> dict[str, Any]: + +def analyze_template(template_name: str, storage: Storage, user_id: str = "") -> dict[str, Any]: """Return pre-analyzed template information from DDB. + Searches user templates first, then builtin. + Args: template_name: Template name from list_templates. storage: Storage backend instance. + user_id: User identifier (for user template lookup). Returns: Dict with layouts, theme_colors, fonts from pre-analysis. @@ -47,8 +72,31 @@ def analyze_template(template_name: str, storage: Storage) -> dict[str, Any]: Raises: ValueError: If template not found or analysis not available. """ - templates = storage.list_templates() normalized = template_name.removesuffix(".pptx") + + # Check user templates first + if user_id: + user_meta = storage.get_user_template_metadata(user_id, normalized) + if user_meta: + analysis_raw = user_meta.get("analysisJson", "") + if analysis_raw and analysis_raw != "{}": + analysis = json.loads(analysis_raw) if isinstance(analysis_raw, str) else analysis_raw + else: + # Analyze on the fly + import tempfile + from pathlib import Path + from sdpm.analyzer import analyze_template as _analyze + data = storage.download_user_template(user_id, normalized) + tmp = Path(tempfile.mkdtemp()) + tpl_path = tmp / "template.pptx" + tpl_path.write_bytes(data) + analysis = _analyze(tpl_path) + analysis["fonts"] = user_meta.get("fonts", {}) + analysis["templateName"] = template_name + return analysis + + # Builtin templates + templates = storage.list_templates() tmpl = None for t in templates: if t.get("name") == normalized: @@ -56,16 +104,17 @@ def analyze_template(template_name: str, storage: Storage) -> dict[str, Any]: break if not tmpl: available = [t.get("name", "") for t in templates] + if user_id: + user_names = [t.get("name", "") for t in storage.list_user_templates(user_id)] + available.extend(user_names) raise ValueError( f"Template '{template_name}' not found. Available: {', '.join(available)}" ) analysis_raw = tmpl.get("analysisJson", "") if not analysis_raw or analysis_raw == "{}": - # No pre-analysis — run analysis on the fly import tempfile from pathlib import Path - from sdpm.analyzer import analyze_template as _analyze s3_key = tmpl.get("s3Key", "") diff --git a/mcp-server/tools/upload.py b/mcp-server/tools/upload.py index 1a99a623..deee8f00 100644 --- a/mcp-server/tools/upload.py +++ b/mcp-server/tools/upload.py @@ -16,6 +16,35 @@ _TEXT_TYPES = {"text/plain", "text/markdown", "application/json"} +def _analyze_colors(data: bytes) -> dict | None: + """Analyze dominant colors of an image. Returns palette + brightness + saturation.""" + try: + img = PILImage.open(io.BytesIO(data)).convert("RGB") + small = img.resize((100, 100)) + pixels = list(small.getdata()) + + lum_sum = sum(r * 0.299 + g * 0.587 + b * 0.114 for r, g, b in pixels) + lum_avg = lum_sum / len(pixels) + brightness = "dark" if lum_avg < 100 else "light" if lum_avg > 155 else "mixed" + + hsv = small.convert("HSV") + s_avg = sum(s for _, s, _ in hsv.getdata()) / (100 * 100) + saturation = "monochrome" if s_avg < 30 else "muted" if s_avg < 100 else "vivid" + + quantized = small.quantize(colors=5, method=PILImage.Quantize.MEDIANCUT) + palette_data = quantized.getpalette() + color_counts = sorted(quantized.getcolors(), reverse=True) + total = sum(c for c, _ in color_counts) + palette = [] + for count, idx in color_counts[:5]: + r, g, b = palette_data[idx * 3], palette_data[idx * 3 + 1], palette_data[idx * 3 + 2] + palette.append({"hex": f"#{r:02X}{g:02X}{b:02X}", "ratio": round(count / total, 2)}) + + return {"palette": palette, "brightness": brightness, "saturation": saturation} + except Exception: + return None + + def _to_jpeg(data: bytes) -> bytes: """Resize image to fit within max edge and convert to JPEG.""" img = PILImage.open(io.BytesIO(data)) @@ -96,6 +125,9 @@ def read_uploaded_file( parts.append(Image(data=jpeg, format="jpeg")) except Exception: parts.append("(preview unavailable)") + colors = _analyze_colors(data) + if colors: + parts.append(f"Color analysis: {colors}") return parts # --- PPTX (completed, lazy conversion via Engine on MCP Server) --- diff --git a/skill/sdpm/__init__.py b/skill/sdpm/__init__.py index 74809c7e..f259c948 100644 --- a/skill/sdpm/__init__.py +++ b/skill/sdpm/__init__.py @@ -2,4 +2,4 @@ # SPDX-License-Identifier: MIT-0 """sdpm - Generate PowerPoint from JSON using template.""" -__version__ = "0.2.1" +__version__ = "0.3.0" \ No newline at end of file diff --git a/skill/sdpm/analyzer/__init__.py b/skill/sdpm/analyzer/__init__.py index 468bd53f..c992c137 100644 --- a/skill/sdpm/analyzer/__init__.py +++ b/skill/sdpm/analyzer/__init__.py @@ -1,13 +1,14 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 """Template analysis: extract layout info, theme colors, fonts, and color usage.""" +import zipfile from pathlib import Path -from sdpm.builder import PPTXBuilder -from sdpm.utils.io import write_json, read_json - +from lxml import etree from pptx import Presentation +from sdpm.utils.io import write_json, read_json + def analyze_template(template_path: Path): """Analyze template and return structured result. @@ -130,9 +131,79 @@ def extract_fonts(template_path: Path) -> dict: return {"halfwidth": None, "fullwidth": None} +def _extract_theme_colors_raw(template_path: Path): + """Extract theme colors and dark flag from template's clrScheme + clrMap. + + Returns (colors_dict, is_dark). Standalone — no PPTXBuilder dependency. + """ + ns_a = "http://schemas.openxmlformats.org/drawingml/2006/main" + ns_p = "http://schemas.openxmlformats.org/presentationml/2006/main" + + scheme = {} + with zipfile.ZipFile(str(template_path)) as z: + theme_target = 'ppt/theme/theme1.xml' + for name in z.namelist(): + if 'slideMaster1.xml.rels' in name: + rels = etree.fromstring(z.read(name)) + for rel in rels: + if 'theme' in rel.get('Target', ''): + target = rel.get('Target').replace('..', 'ppt') + if not target.startswith('ppt/'): + target = 'ppt/theme/' + target.split('/')[-1] + theme_target = target + break + break + + if theme_target in z.namelist(): + tree = etree.fromstring(z.read(theme_target)) + else: + tree = None + for name in sorted(z.namelist()): + if 'theme' in name and name.endswith('.xml'): + tree = etree.fromstring(z.read(name)) + break + + if tree is not None: + cs = tree.find(f'.//{{{ns_a}}}clrScheme') + if cs is not None: + for child in cs: + tag = child.tag.split('}')[1] + val_el = child[0] if len(child) > 0 else None + if val_el is not None: + hex_val = val_el.get('lastClr') or val_el.get('val') or '000000' + try: + int(hex_val, 16) + except ValueError: + hex_val = val_el.get('lastClr') or '000000' + scheme[tag] = hex_val + + prs = Presentation(str(template_path)) + master = prs.slide_masters[0] + clr_map = master.element.find(f'.//{{{ns_p}}}clrMap') + bg1_ref = clr_map.get('bg1', 'lt1') if clr_map is not None else 'lt1' + tx1_ref = clr_map.get('tx1', 'dk1') if clr_map is not None else 'dk1' + bg2_ref = clr_map.get('bg2', 'lt2') if clr_map is not None else 'lt2' + tx2_ref = clr_map.get('tx2', 'dk2') if clr_map is not None else 'dk2' + + colors = { + "text": f"#{scheme.get(tx1_ref, '000000')}", + "background": f"#{scheme.get(bg1_ref, 'FFFFFF')}", + "text2": f"#{scheme.get(tx2_ref, '000000')}", + "background2": f"#{scheme.get(bg2_ref, 'FFFFFF')}", + } + for i in range(1, 7): + colors[f"accent{i}"] = f"#{scheme.get(f'accent{i}', '4A90D9')}" + + tx = colors["text"].lstrip("#") + r, g, b = int(tx[:2], 16), int(tx[2:4], 16), int(tx[4:6], 16) + is_dark = (0.299 * r + 0.587 * g + 0.114 * b) > 128 + + return colors, is_dark + + def extract_theme_colors(template_path: Path): """Extract theme colors from template (clrMap-aware).""" - colors, _ = PPTXBuilder._extract_theme_colors(template_path) + colors, _ = _extract_theme_colors_raw(template_path) result = {} role_map = {"text": "text", "background": "background", "text2": "text2", "background2": "background2"} for key, role in role_map.items(): diff --git a/skill/sdpm/api.py b/skill/sdpm/api.py index 4ef76d5a..6abffe17 100644 --- a/skill/sdpm/api.py +++ b/skill/sdpm/api.py @@ -41,6 +41,104 @@ def get_styles_dirs() -> list[Path]: return _get_resource_dirs("SDPM_STYLES_DIR", "styles", BUNDLED_STYLES_DIR) +def list_styles_filtered( + styles_dirs: list[Path], + pinned_names: list[str], + include_all: bool = False, +) -> list[dict]: + """List styles with pin/source metadata, optionally filtered. + + Filesystem-based entry point for MCP Local / CLI. + Determines source ("user" vs "builtin") by checking whether each style + lives in the user-local directory. + + Args: + styles_dirs: Ordered directories from get_styles_dirs(). + pinned_names: Pinned style names from state.json. + include_all: Pass through to filter_styles(). + + Returns: + Filtered list with pinned/source metadata. + """ + from sdpm.config import get_user_config_dir + from sdpm.reference import filter_styles, list_styles_merged + + user_dir = get_user_config_dir() / "styles" + raw = list_styles_merged(styles_dirs) + + # Tag source based on whether the style file exists in user dir + for s in raw: + if (user_dir / f"{s['name']}.html").exists(): + s["source"] = "user" + else: + s["source"] = "builtin" + + return filter_styles(raw, pinned_names, include_all) + + +def list_templates_with_metadata( + templates_dirs: list[Path], + metadata: dict[str, dict], +) -> list[dict]: + """List templates with source and metadata. + + Pure function — no I/O beyond filesystem glob. + + Args: + templates_dirs: From get_templates_dirs(). Last entry is bundled. + metadata: {name: {description, theme_colors, fonts, layout_count}} from state.json or DDB. + + Returns: + Sorted list of template dicts with name, source, description, theme_colors, fonts, layout_count. + """ + bundled_dir = templates_dirs[-1] if templates_dirs else None + seen: dict[str, dict] = {} + for d in templates_dirs: + if not d.exists(): + continue + for t in sorted(d.glob("*.pptx")): + name = t.stem + if name in seen: + continue + source = "builtin" if d == bundled_dir else "user" + meta = metadata.get(name, {}) + seen[name] = { + "name": name, + "source": source, + "description": meta.get("description", ""), + "theme_colors": meta.get("theme_colors", {}), + "fonts": meta.get("fonts", {}), + "layout_count": meta.get("layout_count", 0), + } + return sorted(seen.values(), key=lambda x: (x["source"] != "user", x["name"])) + + +def analyze_and_store_template(template_path: Path, description: str = "") -> dict: + """Analyze a template and return metadata for storage. + + Calls the existing analyze_template() and reshapes the result. + Persistence is the caller's responsibility (state.json or DDB). + + Args: + template_path: Path to .pptx file. + description: User-provided description. + + Returns: + Dict with name, description, theme_colors, fonts, layout_count, layouts. + """ + from sdpm.analyzer import analyze_template as _analyze + + result = _analyze(template_path) + return { + "name": template_path.stem, + "description": description, + "theme_colors": result.get("theme_colors", {}), + "fonts": result.get("fonts", {}), + "layout_count": len(result.get("layouts", [])), + "layouts": result.get("layouts", []), + } + + def _find_style_in_dirs(name: str, styles_dirs: list[Path]) -> Path | None: """Search for a style HTML by name across the given directories. @@ -263,7 +361,7 @@ def _resolve_config(json_path: str | Path) -> BuildConfig: Raises FileNotFoundError, ValueError on missing template/icons. """ - from sdpm.builder import PPTXBuilder, resolve_override, validate_icons_in_json + from sdpm.builder import resolve_override, validate_icons_in_json from sdpm.utils.io import read_json input_path = Path(json_path) @@ -286,6 +384,7 @@ def _resolve_config(json_path: str | Path) -> BuildConfig: # Auto-fill fonts from sdpm.analyzer import extract_fonts as _extract_fonts + from sdpm.analyzer import _extract_theme_colors_raw fonts = data.get("fonts") if not fonts or not fonts.get("fullwidth"): @@ -295,7 +394,7 @@ def _resolve_config(json_path: str | Path) -> BuildConfig: # Auto-fill defaultTextColor dtc = data.get("defaultTextColor") if not dtc: - _, is_dark = PPTXBuilder._extract_theme_colors(template_file) + _, is_dark = _extract_theme_colors_raw(template_file) dtc = "#FFFFFF" if is_dark else "#333333" warnings.append(f"defaultTextColor auto-set to {dtc}") diff --git a/skill/sdpm/builder/__init__.py b/skill/sdpm/builder/__init__.py index fb49f8cc..2f8b6e61 100644 --- a/skill/sdpm/builder/__init__.py +++ b/skill/sdpm/builder/__init__.py @@ -116,76 +116,8 @@ def __init__(self, template_path: Path, custom_template: bool = False, @staticmethod def _extract_theme_colors(template_path): """Extract theme colors from template's clrScheme + clrMap.""" - import zipfile - from lxml import etree - ns_a = "http://schemas.openxmlformats.org/drawingml/2006/main" - ns_p = "http://schemas.openxmlformats.org/presentationml/2006/main" - - # Find theme XML linked to slide master 1 - scheme = {} - with zipfile.ZipFile(str(template_path)) as z: - # Determine which theme file slideMaster1 references - theme_target = 'ppt/theme/theme1.xml' # fallback - for name in z.namelist(): - if 'slideMaster1.xml.rels' in name: - rels = etree.fromstring(z.read(name)) - for rel in rels: - if 'theme' in rel.get('Target', ''): - target = rel.get('Target').replace('..', 'ppt') - if not target.startswith('ppt/'): - target = 'ppt/theme/' + target.split('/')[-1] - theme_target = target - break - break - - if theme_target in z.namelist(): - tree = etree.fromstring(z.read(theme_target)) - else: - # Fallback: first theme file - tree = None - for name in sorted(z.namelist()): - if 'theme' in name and name.endswith('.xml'): - tree = etree.fromstring(z.read(name)) - break - - if tree is not None: - cs = tree.find(f'.//{{{ns_a}}}clrScheme') - if cs is not None: - for child in cs: - tag = child.tag.split('}')[1] - val_el = child[0] if len(child) > 0 else None - if val_el is not None: - hex_val = val_el.get('lastClr') or val_el.get('val') or '000000' - try: - int(hex_val, 16) - except ValueError: - hex_val = val_el.get('lastClr') or '000000' - scheme[tag] = hex_val - - # Parse clrMap from slide master - prs = Presentation(str(template_path)) - master = prs.slide_masters[0] - clr_map = master.element.find(f'.//{{{ns_p}}}clrMap') - bg1_ref = clr_map.get('bg1', 'lt1') if clr_map is not None else 'lt1' - tx1_ref = clr_map.get('tx1', 'dk1') if clr_map is not None else 'dk1' - bg2_ref = clr_map.get('bg2', 'lt2') if clr_map is not None else 'lt2' - tx2_ref = clr_map.get('tx2', 'dk2') if clr_map is not None else 'dk2' - - colors = { - "text": f"#{scheme.get(tx1_ref, '000000')}", - "background": f"#{scheme.get(bg1_ref, 'FFFFFF')}", - "text2": f"#{scheme.get(tx2_ref, '000000')}", - "background2": f"#{scheme.get(bg2_ref, 'FFFFFF')}", - } - for i in range(1, 7): - colors[f"accent{i}"] = f"#{scheme.get(f'accent{i}', '4A90D9')}" - - # is_dark: text color luminance > 128 means dark background - tx = colors["text"].lstrip("#") - r, g, b = int(tx[:2], 16), int(tx[2:4], 16), int(tx[4:6], 16) - is_dark = (0.299 * r + 0.587 * g + 0.114 * b) > 128 - - return colors, is_dark + from sdpm.analyzer import _extract_theme_colors_raw + return _extract_theme_colors_raw(template_path) def _build_layout_map(self): """Build layout name → index mapping from template's slide layouts.""" diff --git a/skill/sdpm/builder/elements/shape.py b/skill/sdpm/builder/elements/shape.py index b4478b16..7386ab1e 100644 --- a/skill/sdpm/builder/elements/shape.py +++ b/skill/sdpm/builder/elements/shape.py @@ -228,7 +228,10 @@ def _add_shape(self, slide, elem): txBody.insert(1, new_lst) tf.word_wrap = True - tf.auto_size = None if elem.get("_noAutofit") else (MSO_AUTO_SIZE.SHAPE_TO_FIT_TEXT if elem.get("_spAutoFit") else MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE) + if elem.get("_spAutoFit"): + tf.auto_size = MSO_AUTO_SIZE.SHAPE_TO_FIT_TEXT + else: + tf.auto_size = None tf.clear() # Apply margins (px input → EMU) @@ -648,7 +651,10 @@ def px_to_emu(v): from pptx.enum.text import MSO_AUTO_SIZE tf = shape.text_frame tf.word_wrap = True - tf.auto_size = None if elem.get("_noAutofit") else (MSO_AUTO_SIZE.SHAPE_TO_FIT_TEXT if elem.get("_spAutoFit") else MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE) + if elem.get("_spAutoFit"): + tf.auto_size = MSO_AUTO_SIZE.SHAPE_TO_FIT_TEXT + else: + tf.auto_size = None tf.clear() if elem.get("marginLeft") is not None: tf.margin_left = self._px_to_emu(elem["marginLeft"]) diff --git a/skill/sdpm/builder/elements/textbox.py b/skill/sdpm/builder/elements/textbox.py index 8ae792db..4dfe59ed 100644 --- a/skill/sdpm/builder/elements/textbox.py +++ b/skill/sdpm/builder/elements/textbox.py @@ -14,7 +14,7 @@ class TextboxMixin: """Mixin providing textbox element methods.""" def _add_textbox(self, slide, elem): - from pptx.enum.text import MSO_AUTO_SIZE, PP_ALIGN + from pptx.enum.text import PP_ALIGN align = elem.get("align", _DEFAULTS["align"]) x_pct = elem.get("x", 3) @@ -58,19 +58,16 @@ def _add_textbox(self, slide, elem): tf = textbox.text_frame tf.margin_left = 0 tf.margin_right = 0 - if elem.get("_noAutofit"): - # Clean bodyPr to match original (no autofit, no wrap override) - from pptx.oxml.ns import qn - bodyPr = tf._txBody.find(qn('a:bodyPr')) - for attr in list(bodyPr.attrib): - if 'wrap' in attr: - del bodyPr.attrib[attr] - for child in list(bodyPr): - tag = child.tag.split('}')[1] - if tag in ('spAutoFit', 'noAutofit', 'normAutofit'): - bodyPr.remove(child) - else: - tf.auto_size = MSO_AUTO_SIZE.SHAPE_TO_FIT_TEXT + # Clean bodyPr: no autofit by default + from pptx.oxml.ns import qn + bodyPr = tf._txBody.find(qn('a:bodyPr')) + for attr in list(bodyPr.attrib): + if 'wrap' in attr: + del bodyPr.attrib[attr] + for child in list(bodyPr): + tag = child.tag.split('}')[1] + if tag in ('spAutoFit', 'noAutofit', 'normAutofit'): + bodyPr.remove(child) if not elem.get("_noAutofit"): tf.word_wrap = not auto_width diff --git a/skill/sdpm/config.py b/skill/sdpm/config.py index 0e81eb2b..c43ca767 100644 --- a/skill/sdpm/config.py +++ b/skill/sdpm/config.py @@ -104,3 +104,36 @@ def get_output_dir() -> Path: def get_extra_sources() -> list[dict]: """Extra asset sources list.""" return get_config().get("extra_sources", []) + + +# ── State (app-managed, separate from user-editable config) ── + + +def get_state() -> dict: + """Load app state from state.json. Returns empty dict if missing. + + state.json stores app-managed data (pinned styles, etc.) separately + from config.json which is user-editable settings. + """ + state_path = get_user_config_dir() / "state.json" + if state_path.exists(): + with open(state_path) as f: + return json.load(f) + return {} + + +def update_state(key: str, value: object) -> None: + """Update a single key in state.json (read-modify-write). + + Creates the file and parent directory if they don't exist. + """ + config_dir = get_user_config_dir() + config_dir.mkdir(parents=True, exist_ok=True) + state_path = config_dir / "state.json" + state = {} + if state_path.exists(): + with open(state_path) as f: + state = json.load(f) + state[key] = value + with open(state_path, "w") as f: + json.dump(state, f, ensure_ascii=False, indent=2) diff --git a/skill/sdpm/reference/__init__.py b/skill/sdpm/reference/__init__.py index 7f277c8d..967c8901 100644 --- a/skill/sdpm/reference/__init__.py +++ b/skill/sdpm/reference/__init__.py @@ -394,3 +394,37 @@ def list_styles_merged(styles_dirs: list[Path]) -> list[dict[str, str]]: seen.add(item["name"]) result.append(item) return result + + +def filter_styles( + styles: list[dict], + pinned_names: list[str], + include_all: bool = False, +) -> list[dict]: + """Add pinned/source metadata and optionally filter styles. + + Pure function — no I/O. Usable by both MCP Local (filesystem) and + MCP Server (S3) since style retrieval is the caller's responsibility. + + Args: + styles: List of style dicts. Each must have ``name``. May already + have ``source``; defaults to ``"builtin"`` if absent. + pinned_names: List of pinned style names. + include_all: If True, return all styles. If False, return only + pinned + user styles (or all if no pins exist). + + Returns: + Styles with ``pinned: bool`` and ``source: "builtin"|"user"`` added. + """ + pin_set = set(pinned_names) + result = [] + for s in styles: + enriched = {**s, "pinned": s["name"] in pin_set} + if "source" not in enriched: + enriched["source"] = "builtin" + result.append(enriched) + + if include_all or not pin_set: + return result + + return [s for s in result if s["pinned"] or s["source"] == "user"] diff --git a/skill/sdpm/schema/lint.py b/skill/sdpm/schema/lint.py index f4cc03f7..446d42af 100644 --- a/skill/sdpm/schema/lint.py +++ b/skill/sdpm/schema/lint.py @@ -31,6 +31,30 @@ def lint(data: list | dict) -> list[dict]: return diagnostics +def lint_and_sanitize(slide: dict) -> tuple[dict, list[dict]]: + """Validate slide JSON and remove deprecated properties. + + Called before persisting slide JSON (S3 write-back or local file save). + Text-based only — no PPTX build needed. + + Args: + slide: Single slide dict (with "elements" key). + + Returns: + (sanitized_slide, diagnostics) — cleaned dict and list of issues found. + """ + import copy + cleaned = copy.deepcopy(slide) + diagnostics: list[dict] = [] + for ei, elem in enumerate(cleaned.get("elements") or []): + diagnostics.extend(_lint_element(0, ei, elem)) + if elem.pop("_spAutoFit", None): + diagnostics.append(_diag(0, ei, "deprecated-autofit", + "_spAutoFit is deprecated and was removed. " + "Use measure to detect overflow instead.")) + return cleaned, diagnostics + + def _diag(slide: int, element: int, rule: str, message: str) -> dict: return {"slide": slide, "element": element, "rule": rule, "message": message} diff --git a/tests/test_styles_resolution.py b/tests/test_styles_resolution.py index 07341cde..a6205e3a 100644 --- a/tests/test_styles_resolution.py +++ b/tests/test_styles_resolution.py @@ -149,3 +149,116 @@ def test_list_styles_single_dir_still_works(temp_styles_dir: Path) -> None: names = [s["name"] for s in result] assert "elegant-dark" in names assert "custom-brand" in names + + +# --------------------------------------------------------------------------- +# filter_styles +# --------------------------------------------------------------------------- + + +from sdpm.reference import filter_styles + + +def test_filter_styles_adds_pinned_metadata() -> None: + styles = [{"name": "a", "description": ""}, {"name": "b", "description": ""}] + result = filter_styles(styles, pinned_names=["a"], include_all=True) + a = next(s for s in result if s["name"] == "a") + b = next(s for s in result if s["name"] == "b") + assert a["pinned"] is True + assert b["pinned"] is False + + +def test_filter_styles_defaults_source_to_builtin() -> None: + styles = [{"name": "a", "description": ""}] + result = filter_styles(styles, pinned_names=[]) + assert result[0]["source"] == "builtin" + + +def test_filter_styles_preserves_existing_source() -> None: + styles = [{"name": "a", "description": "", "source": "user"}] + result = filter_styles(styles, pinned_names=[]) + assert result[0]["source"] == "user" + + +def test_filter_styles_include_all_returns_everything() -> None: + styles = [ + {"name": "a", "description": "", "source": "builtin"}, + {"name": "b", "description": "", "source": "user"}, + ] + result = filter_styles(styles, pinned_names=["a"], include_all=True) + assert len(result) == 2 + + +def test_filter_styles_no_pins_returns_all() -> None: + styles = [ + {"name": "a", "description": ""}, + {"name": "b", "description": ""}, + ] + result = filter_styles(styles, pinned_names=[], include_all=False) + assert len(result) == 2 + + +def test_filter_styles_with_pins_filters_to_pinned_and_user() -> None: + styles = [ + {"name": "a", "description": "", "source": "builtin"}, + {"name": "b", "description": "", "source": "user"}, + {"name": "c", "description": "", "source": "builtin"}, + ] + result = filter_styles(styles, pinned_names=["a"], include_all=False) + names = [s["name"] for s in result] + assert "a" in names # pinned + assert "b" in names # user + assert "c" not in names # neither pinned nor user + + +# --------------------------------------------------------------------------- +# list_styles_filtered (filesystem integration) +# --------------------------------------------------------------------------- + + +from sdpm.api import list_styles_filtered + + +def test_list_styles_filtered_tags_user_source(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path)) + monkeypatch.setenv("APPDATA", str(tmp_path)) + + user_dir = tmp_path / "sdpm" / "styles" + user_dir.mkdir(parents=True) + (user_dir / "my-style.html").write_text("My Style") + + bundled = tmp_path / "bundled" + bundled.mkdir() + (bundled / "default.html").write_text("Default") + + result = list_styles_filtered([user_dir, bundled], pinned_names=[], include_all=True) + my = next(s for s in result if s["name"] == "my-style") + default = next(s for s in result if s["name"] == "default") + assert my["source"] == "user" + assert default["source"] == "builtin" + + +# --------------------------------------------------------------------------- +# get_state / update_state +# --------------------------------------------------------------------------- + + +from sdpm.config import get_state, update_state + + +def test_get_state_returns_empty_when_no_file(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path)) + monkeypatch.setenv("APPDATA", str(tmp_path)) + assert get_state() == {} + + +def test_update_state_creates_and_updates(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path)) + monkeypatch.setenv("APPDATA", str(tmp_path)) + update_state("pinned_styles", ["a", "b"]) + state = get_state() + assert state["pinned_styles"] == ["a", "b"] + + update_state("pinned_styles", ["a"]) + state = get_state() + assert state["pinned_styles"] == ["a"] diff --git a/tests/test_templates_resolution.py b/tests/test_templates_resolution.py index a5185199..d57a7b4e 100644 --- a/tests/test_templates_resolution.py +++ b/tests/test_templates_resolution.py @@ -76,3 +76,57 @@ def test_resolve_template_raises_when_not_found(tmp_path: Path) -> None: input_path = tmp_path / "presentation.json" with pytest.raises(FileNotFoundError, match="No template specified"): _resolve_template(data, str(input_path), [tmp_path]) + + +# ── list_templates_with_metadata ── + + +from sdpm.api import list_templates_with_metadata, analyze_and_store_template + + +def test_list_templates_with_metadata_source_detection(tmp_path: Path) -> None: + user_dir = tmp_path / "user" + bundled_dir = tmp_path / "bundled" + user_dir.mkdir() + bundled_dir.mkdir() + (user_dir / "custom.pptx").write_bytes(b"dummy") + (bundled_dir / "builtin.pptx").write_bytes(b"dummy") + + metadata = {"custom": {"description": "My template", "theme_colors": {}, "fonts": {}, "layout_count": 3}} + result = list_templates_with_metadata([user_dir, bundled_dir], metadata) + + assert len(result) == 2 + custom = next(r for r in result if r["name"] == "custom") + builtin = next(r for r in result if r["name"] == "builtin") + assert custom["source"] == "user" + assert custom["description"] == "My template" + assert custom["layout_count"] == 3 + assert builtin["source"] == "builtin" + assert builtin["description"] == "" + + +def test_list_templates_with_metadata_user_shadows_builtin(tmp_path: Path) -> None: + user_dir = tmp_path / "user" + bundled_dir = tmp_path / "bundled" + user_dir.mkdir() + bundled_dir.mkdir() + (user_dir / "shared.pptx").write_bytes(b"user-ver") + (bundled_dir / "shared.pptx").write_bytes(b"bundled-ver") + + result = list_templates_with_metadata([user_dir, bundled_dir], {}) + assert len(result) == 1 + assert result[0]["source"] == "user" + + +def test_analyze_and_store_template() -> None: + template_path = Path(__file__).parent.parent / "skill" / "templates" / "blank-dark.pptx" + if not template_path.exists(): + pytest.skip("blank-dark.pptx not available") + + result = analyze_and_store_template(template_path, description="Dark theme") + assert result["name"] == "blank-dark" + assert result["description"] == "Dark theme" + assert isinstance(result["theme_colors"], dict) + assert isinstance(result["fonts"], dict) + assert result["layout_count"] > 0 + assert len(result["layouts"]) == result["layout_count"] diff --git a/web-ui/next-env.d.ts b/web-ui/next-env.d.ts index e89b6845..9edff1c7 100644 --- a/web-ui/next-env.d.ts +++ b/web-ui/next-env.d.ts @@ -1,6 +1,6 @@ /// /// -import "./build/dev/types/routes.d.ts"; +import "./.next/types/routes.d.ts"; // NOTE: This file should not be edited // see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/web-ui/src/app/(authenticated)/decks/page.tsx b/web-ui/src/app/(authenticated)/decks/page.tsx index e629eb43..3a337bff 100644 --- a/web-ui/src/app/(authenticated)/decks/page.tsx +++ b/web-ui/src/app/(authenticated)/decks/page.tsx @@ -25,7 +25,7 @@ import { AppShell } from "@/components/AppShell" import { DeckListView } from "@/components/deck/DeckListView" import { SlideCarousel } from "@/components/deck/SlideCarousel" import { DeckActions } from "@/components/deck/DeckActions" -import { DeleteDeckModal } from "@/components/deck/DeleteDeckModal" +import { ConfirmDialog } from "@/components/ConfirmDialog" import { ChatPanelShell } from "@/components/chat/ChatPanelShell" import { ChatPanelHandle } from "@/components/chat/ChatPanel" import { updateVisibility, shareDeck } from "@/services/deckService" @@ -118,7 +118,6 @@ export default function DecksPage() { deckId={ws.isWorkspace && !ws.isNew ? ws.activeDeckId : null} deckName={ws.deck?.name || null} chatSessionId={ws.deck?.chatSessionId} - slidePreviewUrls={ws.deck?.slides.map(s => s.previewUrl) || []} slideSlugs={ws.deck?.slides.map(s => s.slug || "") || []} onDeckCreated={ws.handleDeckCreated} onPreviewInvalidated={() => ws.setPptxRequested(true)} onWorkflowPhase={setWorkflowPhase} @@ -144,20 +143,6 @@ export default function DecksPage() { workflowPhase={workflowPhase} onStyleSelect={handleStyleSelect} idToken={idToken} - onSlideClick={(page) => { - const dName = ws.deck?.name || "Deck" - const mention = ws.activeDeckId - ? `@${dName}(#${ws.activeDeckId}):Page ${page} ` - : `@Page ${page} ` - const insert = () => chatRef.current?.insertAtCursor(mention) - if (ws.chatOpen) { - insert() - } else { - ws.setChatTab("deck") - ws.setChatOpen(true) - setTimeout(insert, 400) - } - }} ownerAlias={!ws.isOwner ? ws.deck?.ownerAlias : undefined} headerActions={ ws.activeDeckId && !ws.isNew ? ( @@ -242,7 +227,6 @@ export default function DecksPage() { deckId={ws.isWorkspace && !ws.isNew ? ws.activeDeckId : null} deckName={ws.deck?.name || null} chatSessionId={ws.deck?.chatSessionId} - slidePreviewUrls={ws.deck?.slides.map(s => s.previewUrl) || []} slideSlugs={ws.deck?.slides.map(s => s.slug || "") || []} onDeckCreated={ws.handleDeckCreated} onPreviewInvalidated={() => ws.setPptxRequested(true)} onWorkflowPhase={setWorkflowPhase} @@ -250,13 +234,15 @@ export default function DecksPage() { )}
- {list.deleteTarget && ( - list.setDeleteTarget(null)} - /> - )} + { if (!open) list.setDeleteTarget(null) }} + title="Delete this deck?" + description={<>{list.deleteTarget?.name} will be permanently deleted after 30 days. This action cannot be undone.} + confirmLabel="Delete" + variant="destructive" + onConfirm={list.confirmDelete} + /> {isMobile && !ws.isWorkspace && (
diff --git a/web-ui/src/app/(authenticated)/styles/page.tsx b/web-ui/src/app/(authenticated)/styles/page.tsx new file mode 100644 index 00000000..1727c0a1 --- /dev/null +++ b/web-ui/src/app/(authenticated)/styles/page.tsx @@ -0,0 +1,506 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT-0 +/** + * Styles page — Browse, manage, and create presentation styles. + * + * Uses URL hash routing (useStyleWorkspace): + * - No hash: style list grid + * - #create: new style creation (Phase 3 agent chat) + * - #{name}: style preview (user styles get chat panel in Phase 3) + */ + +"use client" + +import { useState, useEffect, useRef, useCallback } from "react" +import { useAuth } from "@/hooks/useAuth" +import { useStyleWorkspace } from "@/hooks/useStyleWorkspace" +import { AppShell } from "@/components/AppShell" +import { ConfirmDialog } from "@/components/ConfirmDialog" +import { fetchStyles, fetchStyleHtml, pinStyle, saveUserStyle, deleteUserStyle, renameUserStyle, type StyleEntry } from "@/services/deckService" +import { StyleSlidePreview } from "@/components/StyleSlidePreview" +import { StyleChatShell } from "@/components/chat/StyleChatShell" +import { Star, Trash2, Palette, Download, Sparkles, Copy, MessageSquare, Pencil, MoreHorizontal } from "lucide-react" + +export default function StylesPage() { + const auth = useAuth() + const idToken = auth.user?.id_token + const ws = useStyleWorkspace(idToken) + + const [styles, setStyles] = useState([]) + const [loading, setLoading] = useState(true) + const fileInputRef = useRef(null) + const [deleteConfirm, setDeleteConfirm] = useState(null) + const [inlineRename, setInlineRename] = useState(null) + const [inlineRenameValue, setInlineRenameValue] = useState("") + const [inlineRenameError, setInlineRenameError] = useState("") + const [toast, setToast] = useState<{ message: string; type: "success" | "error" } | null>(null) + const [chatOpen, setChatOpen] = useState(false) + + const showToast = (message: string, type: "success" | "error" = "success") => { + setToast({ message, type }) + setTimeout(() => setToast(null), 3000) + } + + const refreshStyles = useCallback(async () => { + if (!idToken) return + const s = await fetchStyles(idToken) + setStyles(s) + setLoading(false) + }, [idToken]) + + useEffect(() => { refreshStyles() }, [refreshStyles]) + + // When #create is visited, show name dialog and go back to list view + useEffect(() => { + if (ws.view.mode === "create") { + ws.navigateToList() + handleCreateStyle() + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [ws.view.mode]) + + // Reset chat state when navigating away from preview + useEffect(() => { + if (ws.view.mode !== "preview") { + setChatOpen(false) + } + }, [ws.view.mode]) + + const handlePin = async (name: string) => { + const style = styles.find(s => s.name === name) + const newPinned = !style?.pinned + setStyles(prev => prev.map(s => s.name === name ? { ...s, pinned: newPinned } : s)) + if (idToken) pinStyle(name, newPinned, idToken) + } + + const handleImport = async (file: File) => { + if (!idToken) return + const html = await file.text() + const name = file.name.replace(/\.html?$/i, "").replace(/[^a-zA-Z0-9_-]/g, "-") + const result = await saveUserStyle(name, html, idToken) + if (result.error) { showToast(result.error, "error"); return } + await refreshStyles() + showToast(`Imported "${name}"`) + } + + const handleDelete = async (name: string) => { + if (!idToken) return + const result = await deleteUserStyle(name, idToken) + if (result.error) { showToast(result.error, "error"); return } + setStyles(prev => prev.filter(s => s.name !== name)) + if (ws.styleName === name) ws.navigateToList() + setDeleteConfirm(null) + showToast(`Deleted "${name}"`) + } + + const handleExport = async (name: string) => { + if (!idToken) return + const html = await fetchStyleHtml(name, idToken) + if (!html) return + const blob = new Blob([html], { type: "text/html" }) + const url = URL.createObjectURL(blob) + const a = document.createElement("a") + a.href = url + a.download = `${name}.html` + a.click() + URL.revokeObjectURL(url) + } + + const handleCopyToMyStyles = async (name: string) => { + if (!idToken) return + const html = await fetchStyleHtml(name, idToken) + if (!html) return + // Replace with "Copy of {original}" + const originalTitle = html.match(/<title>(.*?)<\/title>/i)?.[1] || name + const newHtml = html.replace(/<title>.*?<\/title>/i, `<title>Copy of ${originalTitle}`) + const now = new Date() + const pad = (n: number) => String(n).padStart(2, "0") + const filename = `style-${now.getFullYear()}${pad(now.getMonth() + 1)}${pad(now.getDate())}-${pad(now.getHours())}${pad(now.getMinutes())}` + const result = await saveUserStyle(filename, newHtml, idToken) + if (result.error) { showToast(result.error, "error"); return } + await refreshStyles() + showToast(`Copied as "Copy of ${originalTitle}"`) + } + + const handleCreateStyle = async () => { + if (!idToken) return + const now = new Date() + const pad = (n: number) => String(n).padStart(2, "0") + const name = `style-${now.getFullYear()}${pad(now.getMonth() + 1)}${pad(now.getDate())}-${pad(now.getHours())}${pad(now.getMinutes())}` + const html = `Untitled Style

Untitled Style

` + const result = await saveUserStyle(name, html, idToken) + if (result.error) { showToast(result.error, "error"); return } + await refreshStyles() + ws.navigateToStyle(name) + setChatOpen(true) + } + + const handleInlineRenameSubmit = async () => { + if (!idToken || !inlineRename) return + const newName = inlineRenameValue.trim() + if (!newName || newName === inlineRename) { setInlineRename(null); setInlineRenameError(""); return } + if (!/^[a-zA-Z0-9_-]+$/.test(newName)) { setInlineRenameError("Letters, numbers, hyphens, underscores only"); return } + if (styles.some(s => s.name === newName)) { setInlineRenameError("Name already exists"); return } + const result = await renameUserStyle(inlineRename, newName, idToken) + if (result.error) { setInlineRenameError(result.error); return } + setInlineRename(null) + setInlineRenameError("") + await refreshStyles() + } + + const currentStyle = ws.styleName ? styles.find(s => s.name === ws.styleName) : null + + const userStyles = styles.filter(s => s.source === "user") + const builtinStyles = styles.filter(s => s.source === "builtin") + + return ( + +
+
+ {loading ? ( + /* ── Loading skeleton ── */ +
+
+
+

Styles

+

Manage and preview presentation styles

+
+
+
+ {[...Array(8)].map((_, i) => ( +
+ ))} +
+
+ ) : ws.view.mode === "preview" && ws.styleName ? ( + /* ── Style preview ── */ +
+
+ +

{ws.styleName}

+ + {currentStyle?.source === "user" && ( + <> + + handleExport(ws.styleName!)} + onDelete={() => setDeleteConfirm(ws.styleName!)} + /> + + )} + {currentStyle?.source === "builtin" && ( + + )} +
+ +
+ ) : ws.view.mode === "create" ? ( + /* ── Create new style → creates Untitled Style and navigates ── */ +
+
+ +

Create with AI

+

Creating your new style…

+ +
+
+ ) : ( + /* ── Style grid ── */ +
+
+
+

Styles

+

Manage and preview presentation styles

+
+
+
+ {/* User styles */} +
+

My Styles

+
+ {userStyles.map(style => ( + setDeleteConfirm(name)} + onExport={handleExport} + onRename={name => { setInlineRename(name); setInlineRenameValue(name); setInlineRenameError("") }} + isRenaming={inlineRename === style.name} + renameValue={inlineRenameValue} + renameError={inlineRename === style.name ? inlineRenameError : ""} + onRenameChange={v => { setInlineRenameValue(v); setInlineRenameError("") }} + onRenameSubmit={handleInlineRenameSubmit} + onRenameCancel={() => { setInlineRename(null); setInlineRenameError("") }} + /> + ))} + {/* Create with AI card + Import link */} +
+ + +
+ { + const file = e.target.files?.[0] + if (file) handleImport(file) + e.target.value = "" + }} + /> +
+
+ + {/* Built-in styles */} +
+

Built-in Styles

+
+ {builtinStyles.map(style => ( + + ))} +
+
+
+
+ )} +
+ + {/* Style Chat Panel (side panel) */} + {ws.view.mode === "preview" && currentStyle?.source === "user" && ( + setChatOpen(false)} + styleId={ws.styleName!} + styleName={ws.styleName!} + onStyleWritten={() => ws.refreshPreview()} + onStyleSaved={async (saved) => { + showToast(`Style saved: ${saved.title}`, "success") + await refreshStyles() + }} + /> + )} +
+ + {/* Delete confirmation dialog */} + { if (!open) setDeleteConfirm(null) }} + title="Delete style" + description={<>Are you sure you want to delete {deleteConfirm}? This cannot be undone.} + confirmLabel="Delete" + variant="destructive" + onConfirm={() => deleteConfirm && handleDelete(deleteConfirm)} + /> + + {/* Toast notification */} + {toast && ( +
+ {toast.message} +
+ )} + + ) +} + +/** Style card for the /styles list page. */ +function StyleListCard({ style, onPreview, onPin, onDelete, onExport, onRename, isRenaming, renameValue, renameError, onRenameChange, onRenameSubmit, onRenameCancel }: { + style: StyleEntry + onPreview: (name: string) => void + onPin: (name: string) => void + onDelete?: (name: string) => void + onExport?: (name: string) => void + onRename?: (name: string) => void + isRenaming?: boolean + renameValue?: string + renameError?: string + onRenameChange?: (v: string) => void + onRenameSubmit?: () => void + onRenameCancel?: () => void +}) { + const cardRef = useRef(null) + const [scale, setScale] = useState(0.15) + + useEffect(() => { + const el = cardRef.current + if (!el) return + const ro = new ResizeObserver(([entry]) => setScale(entry.contentRect.width / 1920)) + ro.observe(el) + return () => ro.disconnect() + }, []) + + return ( +
onPreview(style.name)} + > + {/* Cover preview */} +
+ {style.coverHtml ? ( +