diff --git a/.github/workflows/mirror-sync.yml b/.github/workflows/mirror-sync.yml new file mode 100644 index 00000000..62a4e139 --- /dev/null +++ b/.github/workflows/mirror-sync.yml @@ -0,0 +1,64 @@ +name: Mirror Monorepo Components + +on: + workflow_dispatch: + inputs: + source_ref: + description: "Monorepo ref to mirror (branch, tag, or SHA)" + required: true + default: "main" + dry_run: + description: "Preview only (no pushes)" + required: true + default: true + type: boolean + +permissions: + contents: read + +jobs: + mirror: + runs-on: ubuntu-latest + steps: + - name: Checkout source ref + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ inputs.source_ref }} + + - name: Configure mirror remotes + env: + MIRROR_PUSH_TOKEN: ${{ secrets.MIRROR_PUSH_TOKEN }} + run: | + set -euo pipefail + if [ -z "${MIRROR_PUSH_TOKEN}" ]; then + echo "MIRROR_PUSH_TOKEN is required" + exit 1 + fi + + git remote add mirror-cli "https://x-access-token:${MIRROR_PUSH_TOKEN}@github.com/kernel/hypeman-cli.git" + git remote add mirror-sdk-go "https://x-access-token:${MIRROR_PUSH_TOKEN}@github.com/kernel/hypeman-go.git" + git remote add mirror-sdk-ts "https://x-access-token:${MIRROR_PUSH_TOKEN}@github.com/onkernel/hypeman-ts.git" + + - name: Build subtree branches + run: | + set -euo pipefail + git subtree split --prefix=apps/cli -b mirror-cli-branch + git subtree split --prefix=sdks/go -b mirror-sdk-go-branch + git subtree split --prefix=sdks/ts -b mirror-sdk-ts-branch + + - name: Preview mirror pushes + if: ${{ inputs.dry_run == true }} + run: | + echo "Dry run enabled. Would push:" + echo " mirror-cli-branch -> kernel/hypeman-cli:main" + echo " mirror-sdk-go-branch -> kernel/hypeman-go:main" + echo " mirror-sdk-ts-branch -> onkernel/hypeman-ts:main" + + - name: Push mirrors + if: ${{ inputs.dry_run != true }} + run: | + set -euo pipefail + git push mirror-cli mirror-cli-branch:main --force-with-lease + git push mirror-sdk-go mirror-sdk-go-branch:main --force-with-lease + git push mirror-sdk-ts mirror-sdk-ts-branch:main --force-with-lease diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 22b499b0..4c8a53ab 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: Release Hypeman API +name: Release Hypeman Monorepo on: push: @@ -9,7 +9,7 @@ permissions: contents: write jobs: - release: + release-server: runs-on: [self-hosted, linux, x64, kvm] steps: - name: Checkout @@ -34,3 +34,41 @@ jobs: args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + release-components: + needs: release-server + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version: '1.25.4' + + - name: Verify CLI and Go SDK release build + run: go test ./... + working-directory: ./apps/cli + + - name: Verify Go SDK tests + run: go test ./... + working-directory: ./sdks/go + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Set up pnpm + uses: pnpm/action-setup@v4 + with: + version: '10.24.0' + + - name: Verify TypeScript SDK build + run: | + ./scripts/bootstrap + ./scripts/build + working-directory: ./sdks/ts diff --git a/.github/workflows/stainless-sdks.yml b/.github/workflows/stainless-sdks.yml index b0173fc4..126d672f 100644 --- a/.github/workflows/stainless-sdks.yml +++ b/.github/workflows/stainless-sdks.yml @@ -1,4 +1,4 @@ -name: Stainless SDK preview on PRs +name: Stainless SDK Sync on PRs on: pull_request: @@ -6,60 +6,198 @@ on: - opened - synchronize - reopened - - closed + paths: + - openapi.yaml + - stainless.yaml + - .github/workflows/stainless-sdks.yml concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number }} cancel-in-progress: true env: - STAINLESS_ORG: ${{ vars.STAINLESS_ORG }} STAINLESS_PROJECT: ${{ vars.STAINLESS_PROJECT }} OAS_PATH: openapi.yaml CONFIG_PATH: stainless.yaml jobs: - preview: - if: github.event.action != 'closed' + build-and-sync: + if: github.event.pull_request.head.repo.full_name == github.repository runs-on: ubuntu-latest permissions: - contents: read + contents: write pull-requests: write + id-token: write steps: - name: Checkout repository uses: actions/checkout@v4 with: - fetch-depth: 2 + fetch-depth: 0 - - name: Run preview builds - uses: stainless-api/upload-openapi-spec-action/preview@v1 + - name: Run Stainless build + id: stainless_build + uses: stainless-api/upload-openapi-spec-action/build@v1 with: stainless_api_key: ${{ secrets.STAINLESS_API_KEY }} - org: ${{ env.STAINLESS_ORG }} project: ${{ env.STAINLESS_PROJECT }} oas_path: ${{ env.OAS_PATH }} config_path: ${{ env.CONFIG_PATH }} - make_comment: true - github_token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ github.event.pull_request.head.ref }} + commit_message: "chore(stainless): update SDKs for PR #${{ github.event.pull_request.number }}" - merge: - if: github.event.action == 'closed' && github.event.pull_request.merged == true - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - steps: - - name: Checkout repository - uses: actions/checkout@v4 + - name: Sync generated SDKs into monorepo + id: sync_sdks + env: + OUTCOMES_JSON: ${{ steps.stainless_build.outputs.outcomes }} + STAINLESS_SYNC_GITHUB_TOKEN: ${{ secrets.STAINLESS_SYNC_GITHUB_TOKEN }} + run: | + set -euo pipefail + + if [ -z "${OUTCOMES_JSON}" ] || [ "${OUTCOMES_JSON}" = "null" ] || [ "${OUTCOMES_JSON}" = "{}" ]; then + echo "No Stainless outcomes available (likely no SDK changes)." + echo "changed=false" >> "$GITHUB_OUTPUT" + exit 0 + fi + + printf '%s' "${OUTCOMES_JSON}" > /tmp/stainless-outcomes.json + GO_SHA="$(jq -r '.go.commit.completed.commit.sha // .go.commit.commit.sha // empty' /tmp/stainless-outcomes.json)" + GO_OWNER="$(jq -r '.go.commit.completed.commit.repo.owner // .go.commit.commit.repo.owner // empty' /tmp/stainless-outcomes.json)" + GO_REPO="$(jq -r '.go.commit.completed.commit.repo.name // .go.commit.commit.repo.name // empty' /tmp/stainless-outcomes.json)" + TS_SHA="$(jq -r '.typescript.commit.completed.commit.sha // .typescript.commit.commit.sha // empty' /tmp/stainless-outcomes.json)" + TS_OWNER="$(jq -r '.typescript.commit.completed.commit.repo.owner // .typescript.commit.commit.repo.owner // empty' /tmp/stainless-outcomes.json)" + TS_REPO="$(jq -r '.typescript.commit.completed.commit.repo.name // .typescript.commit.commit.repo.name // empty' /tmp/stainless-outcomes.json)" + TS_INSTALL_URL="$(jq -r '.typescript.install_url // empty' /tmp/stainless-outcomes.json)" + + if [ -z "${GO_SHA}" ] || [ -z "${GO_OWNER}" ] || [ -z "${GO_REPO}" ]; then + echo "No Go commit coordinates found in outcomes; skipping sync." + jq . /tmp/stainless-outcomes.json + echo "changed=false" >> "$GITHUB_OUTPUT" + exit 0 + fi + + if [ -z "${TS_INSTALL_URL}" ] && { [ -z "${TS_SHA}" ] || [ -z "${TS_OWNER}" ] || [ -z "${TS_REPO}" ]; }; then + echo "No TypeScript source coordinates found in outcomes; skipping sync." + jq . /tmp/stainless-outcomes.json + echo "changed=false" >> "$GITHUB_OUTPUT" + exit 0 + fi + + if [ -z "${STAINLESS_SYNC_GITHUB_TOKEN:-}" ]; then + echo "STAINLESS_SYNC_GITHUB_TOKEN is required to fetch generated SDK commits." + exit 1 + fi + + sync_from_install_url() { + local install_url="$1" + local destination="$2" + local tmp_archive + local tmp_extract + + tmp_archive="$(mktemp)" + tmp_extract="$(mktemp -d)" + + curl --fail --location --silent --show-error "${install_url}" --output "${tmp_archive}" + tar -xzf "${tmp_archive}" -C "${tmp_extract}" + + rm -rf "${destination}" + mkdir -p "${destination}" + + if [ -d "${tmp_extract}/package" ]; then + cp -a "${tmp_extract}/package/." "${destination}/" + else + cp -a "${tmp_extract}/." "${destination}/" + fi + } + + sync_target() { + local owner="$1" + local repo="$2" + local sha="$3" + local monorepo_subpath="$4" + local destination="$5" + local source_url="https://x-access-token:${STAINLESS_SYNC_GITHUB_TOKEN}@github.com/${owner}/${repo}.git" + + local tmp_repo + tmp_repo="$(mktemp -d)" + + git -C "${tmp_repo}" init --quiet + git -C "${tmp_repo}" remote add origin "${source_url}" + git -C "${tmp_repo}" fetch --depth=1 origin "${sha}" + + rm -rf "${destination}" + mkdir -p "${destination}" + + if git -C "${tmp_repo}" cat-file -e "FETCH_HEAD:${monorepo_subpath}" 2>/dev/null; then + local strip_components + strip_components="$(awk -F/ '{print NF}' <<< "${monorepo_subpath}")" + git -C "${tmp_repo}" archive FETCH_HEAD "${monorepo_subpath}" | tar -x -C "${destination}" --strip-components="${strip_components}" + else + git -C "${tmp_repo}" archive FETCH_HEAD | tar -x -C "${destination}" + fi + } + + sync_target "${GO_OWNER}" "${GO_REPO}" "${GO_SHA}" "sdks/go" "sdks/go" + + if [ -n "${TS_INSTALL_URL}" ]; then + sync_from_install_url "${TS_INSTALL_URL}" "sdks/ts" + else + if [ "${TS_OWNER}" = "stainless-sdks" ] && [ "${TS_REPO}" = "hypeman-typescript" ]; then + TS_REPO="hypeman-ts" + fi + sync_target "${TS_OWNER}" "${TS_REPO}" "${TS_SHA}" "sdks/ts" "sdks/ts" + fi + + if git diff --quiet -- sdks/go sdks/ts; then + echo "changed=false" >> "$GITHUB_OUTPUT" + else + echo "changed=true" >> "$GITHUB_OUTPUT" + fi + + - name: Set up Go + if: steps.sync_sdks.outputs.changed == 'true' + uses: actions/setup-go@v5 with: - fetch-depth: 2 + go-version: '1.25.4' + + - name: Run Go SDK tests + if: steps.sync_sdks.outputs.changed == 'true' + run: GOWORK=off go test ./... + working-directory: ./sdks/go + + - name: Run CLI tests + if: steps.sync_sdks.outputs.changed == 'true' + env: + GOWORK: ${{ github.workspace }}/go.work + run: go test ./... + working-directory: ./apps/cli - - name: Run merge build - uses: stainless-api/upload-openapi-spec-action/merge@v1 + - name: Set up Node + if: steps.sync_sdks.outputs.changed == 'true' + uses: actions/setup-node@v4 with: - stainless_api_key: ${{ secrets.STAINLESS_API_KEY }} - org: ${{ env.STAINLESS_ORG }} - project: ${{ env.STAINLESS_PROJECT }} - oas_path: ${{ env.OAS_PATH }} - make_comment: true - github_token: ${{ secrets.GITHUB_TOKEN }} + node-version: '20' + + - name: Set up pnpm + if: steps.sync_sdks.outputs.changed == 'true' + uses: pnpm/action-setup@v4 + with: + version: '10.24.0' + + - name: Run TypeScript SDK tests + if: steps.sync_sdks.outputs.changed == 'true' + run: | + ./scripts/bootstrap + ./scripts/test + working-directory: ./sdks/ts + + - name: Commit synced SDK changes + if: steps.sync_sdks.outputs.changed == 'true' + run: | + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git add sdks/go sdks/ts + git commit -m "chore(stainless): sync SDKs for PR #${{ github.event.pull_request.number }}" + + - name: Push synced SDK changes + if: steps.sync_sdks.outputs.changed == 'true' + run: git push origin "HEAD:${{ github.event.pull_request.head.ref }}" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1b418ad1..39fe2443 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -2,34 +2,89 @@ name: Test on: push: {} + pull_request: {} jobs: + changes: + runs-on: ubuntu-latest + outputs: + server: ${{ steps.filter.outputs.server }} + cli: ${{ steps.filter.outputs.cli }} + sdk_go: ${{ steps.filter.outputs.sdk_go }} + sdk_ts: ${{ steps.filter.outputs.sdk_ts }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + server: + - 'cmd/**' + - 'lib/**' + - 'integration/**' + - 'scripts/**' + - 'openapi.yaml' + - 'go.mod' + - 'go.sum' + - 'Makefile' + - '.github/workflows/test.yml' + - '!apps/cli/**' + - '!sdks/**' + cli: + - 'apps/cli/**' + - 'sdks/go/**' + - 'openapi.yaml' + - 'stainless.yaml' + - 'go.work' + - '.github/workflows/stainless-sdks.yml' + - '.github/workflows/test.yml' + sdk_go: + - 'sdks/go/**' + - 'openapi.yaml' + - 'stainless.yaml' + - '.github/workflows/stainless-sdks.yml' + - '.github/workflows/test.yml' + sdk_ts: + - 'sdks/ts/**' + - 'openapi.yaml' + - 'stainless.yaml' + - '.github/workflows/stainless-sdks.yml' + - '.github/workflows/test.yml' + test: + needs: changes + if: needs.changes.outputs.server == 'true' runs-on: [self-hosted, linux, x64, kvm] steps: - uses: actions/checkout@v4 - + - name: Set up Go uses: actions/setup-go@v6 with: - # Not necessary to upload cache on self-hosted runner(s) - # ~/go/pkg/mod and ~/.cache/go-build stay on disk between runs automatically. cache: false go-version: '1.25.4' - + - name: Install dependencies run: | set -xe + apt_retry() { + local attempts=0 + until sudo "$@"; do + attempts=$((attempts + 1)) + if [ "$attempts" -ge 10 ]; then + return 1 + fi + sleep 10 + done + } if ! command -v mkfs.erofs &> /dev/null || \ ! command -v mkfs.ext4 &> /dev/null || \ ! command -v iptables &> /dev/null; then - sudo apt-get update - sudo apt-get install -y erofs-utils e2fsprogs iptables + apt_retry apt-get update + apt_retry apt-get install -y erofs-utils e2fsprogs iptables fi go mod download - - # Avoids rate limits when running the tests - # Tests includes pulling, then converting to disk images + - name: Login to Docker Hub uses: docker/login-action@v3 with: @@ -48,7 +103,7 @@ jobs: - name: Check gofmt run: | set -euo pipefail - go_files="$(git ls-files '*.go')" + go_files="$(git ls-files '*.go' | grep -Ev '^(apps/cli|sdks/)' || true)" if [ -z "$go_files" ]; then exit 0 fi @@ -58,12 +113,10 @@ jobs: echo "$unformatted" exit 1 fi - + - name: Run tests env: - # Docker auth for tests running as root (sudo) DOCKER_CONFIG: /home/debianuser/.docker - # TLS/ACME testing (optional - tests will skip if not configured) ACME_EMAIL: ${{ secrets.ACME_EMAIL }} ACME_DNS_PROVIDER: "cloudflare" ACME_CA: "https://acme-staging-v02.api.letsencrypt.org/directory" @@ -73,6 +126,8 @@ jobs: run: make test test-darwin: + needs: changes + if: needs.changes.outputs.server == 'true' runs-on: [self-hosted, macos, arm64] concurrency: group: macos-ci-test-${{ github.ref }} @@ -101,11 +156,10 @@ jobs: run: make oapi-generate - name: Build run: make build - - name: Check gofmt run: | set -euo pipefail - go_files="$(git ls-files '*.go')" + go_files="$(git ls-files '*.go' | grep -Ev '^(apps/cli|sdks/)' || true)" if [ -z "$go_files" ]; then exit 0 fi @@ -115,7 +169,6 @@ jobs: echo "$unformatted" exit 1 fi - - name: Run tests env: DEFAULT_HYPERVISOR: vz @@ -129,8 +182,9 @@ jobs: make clean e2e-install: + needs: [changes, test-darwin] + if: needs.changes.outputs.server == 'true' runs-on: [self-hosted, macos, arm64] - needs: test-darwin concurrency: group: macos-ci-e2e-${{ github.ref }} cancel-in-progress: true @@ -147,3 +201,55 @@ jobs: - name: Cleanup on failure if: failure() run: bash scripts/uninstall.sh || true + + test-cli: + needs: changes + if: needs.changes.outputs.cli == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v6 + with: + cache: true + go-version: '1.25.4' + - name: Run CLI tests + env: + GOWORK: ${{ github.workspace }}/go.work + run: go test ./... + working-directory: ./apps/cli + + test-sdk-go: + needs: changes + if: needs.changes.outputs.sdk_go == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v6 + with: + cache: true + go-version: '1.25.4' + - name: Run Go SDK tests + run: GOWORK=off go test ./... + working-directory: ./sdks/go + + test-sdk-ts: + needs: changes + if: needs.changes.outputs.sdk_ts == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '20' + - name: Set up pnpm + uses: pnpm/action-setup@v4 + with: + version: '10.24.0' + - name: Run TypeScript SDK tests + run: | + ./scripts/bootstrap + ./scripts/test + working-directory: ./sdks/ts diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 06807042..466dd2de 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -2,6 +2,38 @@ This document covers development setup, configuration, and contributing to Hypeman. +## Monorepo Migration Notes + +The monorepo now includes: + +- `cmd/`, `lib/`, `integration/` for server code +- `apps/cli` for CLI code +- `sdks/go` and `sdks/ts` for Stainless SDK code + +History import workflow (path-native, blame-correct): + +1. Rewrite source history into a target subdirectory using `git filter-repo` (preferred) or `git filter-branch` fallback. +2. Prefix imported tags with a namespace (for example `legacy/cli/`, `legacy/sdk-go/`, `legacy/sdk-ts/`). +3. Merge rewritten histories with `git merge --allow-unrelated-histories --no-ff`. +4. Anchor non-main imported refs with `git merge -s ours --no-ff ` so all source commits are reachable from the migration branch without changing tree contents. +5. Verify with: + - `git log -- /` + - `git blame /` + +Rollback commands during migration: + +```bash +# Abort in-progress merge +git merge --abort + +# Reset working tree to last commit on migration branch +git reset --hard HEAD + +# Remove imported refs created during staging +git for-each-ref --format='%(refname)' refs/heads/import/ | while read -r ref; do git update-ref -d "$ref"; done +git for-each-ref --format='%(refname)' refs/tags/legacy/ | while read -r ref; do git update-ref -d "$ref"; done +``` + ## Prerequisites ### Linux (Default) diff --git a/Makefile b/Makefile index db88e71b..8b3e1541 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ SHELL := /bin/bash -.PHONY: oapi-generate generate-vmm-client generate-wire generate-all dev build build-linux test test-linux test-darwin install-tools gen-jwt download-ch-binaries download-ch-spec ensure-ch-binaries build-caddy-binaries build-caddy ensure-caddy-binaries release-prep clean build-embedded +.PHONY: oapi-generate generate-vmm-client generate-wire generate-all dev build build-linux test test-linux test-darwin test-cli test-sdk-go test-sdk-ts test-monorepo install-tools gen-jwt download-ch-binaries download-ch-spec ensure-ch-binaries build-caddy-binaries build-caddy ensure-caddy-binaries release-prep clean build-embedded # Directory where local binaries will be installed BIN_DIR ?= $(CURDIR)/bin @@ -226,6 +226,21 @@ else $(MAKE) test-linux endif +# Run monorepo component tests +test-monorepo: test test-cli test-sdk-go test-sdk-ts + +# Run CLI tests against local workspace dependencies +test-cli: + cd apps/cli && go test ./... + +# Run Go SDK tests +test-sdk-go: + cd sdks/go && go test ./... + +# Run TypeScript SDK tests +test-sdk-ts: + cd sdks/ts && pnpm install --frozen-lockfile && pnpm test + # Linux tests (as root for network capabilities) test-linux: ensure-ch-binaries ensure-caddy-binaries build-embedded @VERBOSE_FLAG=""; \ diff --git a/README.md b/README.md index 3a1a449f..05d3ade1 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,7 @@ curl -fsSL https://get.hypeman.sh/cli | bash ```bash go install 'github.com/kernel/hypeman-cli/cmd/hypeman@latest' ``` +The CLI and SDK source now lives in this monorepo; external package/release identities are mirrored for compatibility. Then create a CLI config file at `~/.config/hypeman/cli.yaml`: @@ -183,6 +184,13 @@ For all available commands, run `hypeman --help`. ## Development +Monorepo layout: + +- `cmd/`, `lib/`, `integration/`: Hypeman server and API +- `apps/cli`: Hypeman CLI source +- `sdks/go`: Stainless-generated Go SDK source +- `sdks/ts`: Stainless-generated TypeScript SDK source + See [DEVELOPMENT.md](DEVELOPMENT.md) for build instructions, configuration options, and contributing guidelines. ## License diff --git a/apps/cli/.github/workflows/ci.yml b/apps/cli/.github/workflows/ci.yml new file mode 100644 index 00000000..697b7ff6 --- /dev/null +++ b/apps/cli/.github/workflows/ci.yml @@ -0,0 +1,34 @@ +name: CI +on: + push: + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' + +jobs: + lint: + timeout-minutes: 10 + name: lint + runs-on: ${{ github.repository == 'stainless-sdks/hypeman-cli' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + + steps: + - uses: actions/checkout@v4 + + - name: Setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.mod + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run lints + run: ./scripts/lint diff --git a/apps/cli/.github/workflows/publish-release.yml b/apps/cli/.github/workflows/publish-release.yml new file mode 100644 index 00000000..5088ad07 --- /dev/null +++ b/apps/cli/.github/workflows/publish-release.yml @@ -0,0 +1,40 @@ +--- +name: Publish Release +permissions: + contents: write + +concurrency: + group: publish + +on: + push: + tags: + - "v*" +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - name: Generate app token + id: app-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.ADMIN_APP_ID }} + private-key: ${{ secrets.ADMIN_APP_PRIVATE_KEY }} + owner: kernel + repositories: homebrew-tap + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v6.1.0 + with: + version: latest + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + HOMEBREW_TAP_GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} \ No newline at end of file diff --git a/apps/cli/.github/workflows/release-doctor.yml b/apps/cli/.github/workflows/release-doctor.yml new file mode 100644 index 00000000..b2931090 --- /dev/null +++ b/apps/cli/.github/workflows/release-doctor.yml @@ -0,0 +1,19 @@ +name: Release Doctor +on: + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + release_doctor: + name: release doctor + runs-on: ubuntu-latest + if: github.repository == 'kernel/hypeman-cli' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + + steps: + - uses: actions/checkout@v4 + + - name: Check release environment + run: | + bash ./bin/check-release-environment diff --git a/apps/cli/.gitignore b/apps/cli/.gitignore new file mode 100644 index 00000000..12c45084 --- /dev/null +++ b/apps/cli/.gitignore @@ -0,0 +1,6 @@ +.prism.log +dist/ +/hypeman +.env +hypeman/** +bin/hypeman diff --git a/apps/cli/.goreleaser.yml b/apps/cli/.goreleaser.yml new file mode 100644 index 00000000..b2f3fb97 --- /dev/null +++ b/apps/cli/.goreleaser.yml @@ -0,0 +1,98 @@ +project_name: hypeman +version: 2 + +before: + hooks: + - mkdir -p completions + - sh -c "go run ./cmd/hypeman/main.go @completion bash > completions/hypeman.bash" + - sh -c "go run ./cmd/hypeman/main.go @completion zsh > completions/hypeman.zsh" + - sh -c "go run ./cmd/hypeman/main.go @completion fish > completions/hypeman.fish" + - sh -c "go run ./cmd/hypeman/main.go @manpages -o man" + +builds: + - id: macos + goos: [darwin] + goarch: [amd64, arm64] + binary: '{{ .ProjectName }}' + main: ./cmd/hypeman/main.go + mod_timestamp: '{{ .CommitTimestamp }}' + ldflags: + - '-s -w -X github.com/kernel/hypeman-cli/pkg/cmd.version={{.Version}}' + + - id: linux + goos: [linux] + goarch: ['386', arm, amd64, arm64] + env: + - CGO_ENABLED=0 + binary: '{{ .ProjectName }}' + main: ./cmd/hypeman/main.go + mod_timestamp: '{{ .CommitTimestamp }}' + ldflags: + - '-s -w -X github.com/kernel/hypeman-cli/pkg/cmd.version={{.Version}}' + + - id: windows + goos: [windows] + goarch: ['386', amd64, arm64] + binary: '{{ .ProjectName }}' + main: ./cmd/hypeman/main.go + mod_timestamp: '{{ .CommitTimestamp }}' + ldflags: + - '-s -w -X github.com/kernel/hypeman-cli/pkg/cmd.version={{.Version}}' + +archives: + - id: linux-archive + ids: [linux] + name_template: '{{ .ProjectName }}_{{ .Version }}_linux_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + formats: [tar.gz] + files: + - completions/* + - man/*/* + - id: macos-archive + ids: [macos] + name_template: '{{ .ProjectName }}_{{ .Version }}_macos_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + formats: [zip] + files: + - completions/* + - man/*/* + - id: windows-archive + ids: [windows] + name_template: '{{ .ProjectName }}_{{ .Version }}_windows_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + formats: [zip] + files: + - completions/* + - man/*/* + +snapshot: + version_template: '{{ .Tag }}-next' + +nfpms: + - license: Apache-2.0 + maintainer: Kernel + bindir: /usr + formats: + - apk + - deb + - rpm + - termux.deb + - archlinux + contents: + - src: man/man1/*.1.gz + dst: /usr/share/man/man1/ +brews: + - name: hypeman + repository: + owner: kernel + name: homebrew-tap + token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}" + homepage: https://github.com/kernel/hypeman + description: orchestrate cloud-hypervisor VMs + license: Apache-2.0 + directory: Formula + install: | + bin.install "hypeman" + bash_completion.install "completions/hypeman.bash" => "hypeman" + zsh_completion.install "completions/hypeman.zsh" => "_hypeman" + fish_completion.install "completions/hypeman.fish" + man1.install Dir["man/man1/*.1.gz"] + test: | + system "#{bin}/hypeman", "--version" diff --git a/apps/cli/.release-please-manifest.json b/apps/cli/.release-please-manifest.json new file mode 100644 index 00000000..1b77f506 --- /dev/null +++ b/apps/cli/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.7.0" +} \ No newline at end of file diff --git a/apps/cli/CHANGELOG.md b/apps/cli/CHANGELOG.md new file mode 100644 index 00000000..e1e0a6bf --- /dev/null +++ b/apps/cli/CHANGELOG.md @@ -0,0 +1,119 @@ +# Changelog + +## 0.7.0 (2025-12-23) + +Full Changelog: [v0.6.1...v0.7.0](https://github.com/onkernel/hypeman-cli/compare/v0.6.1...v0.7.0) + +### Features + +* add cp command for file copy to/from instances ([#18](https://github.com/onkernel/hypeman-cli/issues/18)) ([f67ad7b](https://github.com/onkernel/hypeman-cli/commit/f67ad7bcb6fbbe0a9409574fababab862da87840)) + + +### Chores + +* **internal:** codegen related update ([a6c6588](https://github.com/onkernel/hypeman-cli/commit/a6c6588d42a6981b65f5144d033f040afc29a959)) + +## 0.6.1 (2025-12-11) + +Full Changelog: [v0.6.0...v0.6.1](https://github.com/onkernel/hypeman-cli/compare/v0.6.0...v0.6.1) + +## 0.6.0 (2025-12-06) + +Full Changelog: [v0.5.1...v0.6.0](https://github.com/onkernel/hypeman-cli/compare/v0.5.1...v0.6.0) + +### Features + +* **cli:** automatic streaming for paginated endpoints ([9af6924](https://github.com/onkernel/hypeman-cli/commit/9af69246d62010c32d39583c8b1eba39a663d3fa)) + +## 0.5.1 (2025-12-05) + +Full Changelog: [v0.5.0...v0.5.1](https://github.com/onkernel/hypeman-cli/compare/v0.5.0...v0.5.1) + +### Features + +* **api:** manual updates ([a3f2ec1](https://github.com/onkernel/hypeman-cli/commit/a3f2ec15101a6afd6feb1da1addcb3a2589acb53)) +* fix edge cases for sending request data and add YAML support ([3e740a9](https://github.com/onkernel/hypeman-cli/commit/3e740a94698f4704e79cc5c3b6434cbb1bfcb935)) +* Ingress ([bfb79c5](https://github.com/onkernel/hypeman-cli/commit/bfb79c5a160a3b92cac3793ea49da49ddcc7c8c6)) +* Initialize volume with data ([ef9997c](https://github.com/onkernel/hypeman-cli/commit/ef9997cc2c6d0fc14531bdf9d1238f3447e3a454)) +* **push:** add hypeman push command for local image upload ([e120ec6](https://github.com/onkernel/hypeman-cli/commit/e120ec6d96531ab49909a3d55895f5fcc4d43dc2)) +* respect HYPEMAN_BASE_URL environment variable ([17122d7](https://github.com/onkernel/hypeman-cli/commit/17122d7b2d6041c57d4e2d341b52f18697aef5d4)) + + +### Bug Fixes + +* fix for default flag values ([812e009](https://github.com/onkernel/hypeman-cli/commit/812e0091f73ab5e8992adab5ca1c2cef76b60c63)) +* **run:** wait for image to be ready before creating instance ([048ee73](https://github.com/onkernel/hypeman-cli/commit/048ee7311c39d6c3c7efad9c662fa2a1993ced97)) +* use correct user agent value ([580e468](https://github.com/onkernel/hypeman-cli/commit/580e468e95a11c8c57016954464039af3b0586f1)) + + +### Chores + +* add scripts ([c3e4955](https://github.com/onkernel/hypeman-cli/commit/c3e4955f932edc7567d929f22f3e93f22ae69e1a)) +* update dependencies ([4ed31f6](https://github.com/onkernel/hypeman-cli/commit/4ed31f6294c1b94ef764bb7959dc99e89af62cfb)) + +## 0.5.0 (2025-11-26) + +Full Changelog: [v0.4.0...v0.5.0](https://github.com/onkernel/hypeman-cli/compare/v0.4.0...v0.5.0) + +### Features + +* Generate log streaming ([31951c5](https://github.com/onkernel/hypeman-cli/commit/31951c5caf65c008f9811ffd023f54a10c3f1474)) + +## 0.4.0 (2025-11-26) + +Full Changelog: [v0.3.0...v0.4.0](https://github.com/onkernel/hypeman-cli/compare/v0.3.0...v0.4.0) + +### Features + +* Remove exec from openapi spec ([6bde031](https://github.com/onkernel/hypeman-cli/commit/6bde031264de6cd6b17afe32f73a70bf14c2f36d)) + +## 0.3.0 (2025-11-26) + +Full Changelog: [v0.2.0...v0.3.0](https://github.com/onkernel/hypeman-cli/compare/v0.2.0...v0.3.0) + +### Features + +* **api:** add exec ([cc1d174](https://github.com/onkernel/hypeman-cli/commit/cc1d17479467b19436346b30256f92d99474d9ed)) + +## 0.2.0 (2025-11-26) + +Full Changelog: [v0.1.2...v0.2.0](https://github.com/onkernel/hypeman-cli/compare/v0.1.2...v0.2.0) + +### Features + +* Network manager ([cece9ba](https://github.com/onkernel/hypeman-cli/commit/cece9ba6e801a9b29512357060e5642976c8e3ec)) + + +### Chores + +* **client:** change name of underlying properties for models and params ([27fd97a](https://github.com/onkernel/hypeman-cli/commit/27fd97aa3faa8b436a625783232d36250bbd191a)) + +## 0.1.2 (2025-11-20) + +Full Changelog: [v0.1.1...v0.1.2](https://github.com/onkernel/hypeman-cli/compare/v0.1.1...v0.1.2) + +### ⚠ BREAKING CHANGES + +* new logic for parsing arguments + +### Features + +* new logic for parsing arguments ([de05b62](https://github.com/onkernel/hypeman-cli/commit/de05b6274cb3d3c27dcfe9784a331a9762a8dca5)) + +## 0.1.1 (2025-11-14) + +Full Changelog: [v0.1.0...v0.1.1](https://github.com/onkernel/hypeman-cli/compare/v0.1.0...v0.1.1) + +### Features + +* **api:** manual updates ([1133f94](https://github.com/onkernel/hypeman-cli/commit/1133f94ebcc7e53162d26aa03a265e3520806ebb)) + +## 0.1.0 (2025-11-14) + +Full Changelog: [v0.0.1...v0.1.0](https://github.com/onkernel/hypeman-cli/compare/v0.0.1...v0.1.0) + +### Features + +* **api:** add homebrew ([489dbc8](https://github.com/onkernel/hypeman-cli/commit/489dbc83126ed1f9c506ec64d7f5291f3adfc0ac)) +* **api:** make public ([a42708e](https://github.com/onkernel/hypeman-cli/commit/a42708e4e7f906d338b2db8da0ef56355e2b6ba8)) +* **api:** manual updates ([2aa453f](https://github.com/onkernel/hypeman-cli/commit/2aa453f3c0cc4191329ade9a8c8b2b328eca97e1)) diff --git a/apps/cli/LICENSE b/apps/cli/LICENSE new file mode 100644 index 00000000..5e9bf84c --- /dev/null +++ b/apps/cli/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Hypeman + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/apps/cli/README.md b/apps/cli/README.md new file mode 100644 index 00000000..d0a8e905 --- /dev/null +++ b/apps/cli/README.md @@ -0,0 +1,255 @@ +# Hypeman CLI + +The official CLI for [Hypeman](https://github.com/kernel/hypeman/). + +## Installation + +### Installing with Homebrew + +```sh +brew install kernel/tap/hypeman +``` + +### Installing with Go + + + +```sh +go install 'github.com/kernel/hypeman-cli/cmd/hypeman@latest' +``` + +### Running Locally + + + +```sh +go run cmd/hypeman/main.go +``` + + + +## Usage + +```sh +# Pull an image +hypeman pull nginx:alpine + +# Boot a new VM (auto-pulls image if needed) +hypeman run --name my-app nginx:alpine + +# List running VMs +hypeman ps +# show all VMs +hypeman ps -a + +# View logs of your app +# All commands support using VM name, ID, or partial ID +hypeman logs my-app +hypeman logs -f my-app + +# Execute a command in a running VM +hypeman exec my-app whoami +# Shell into the VM +hypeman exec -it my-app /bin/sh + +# VM lifecycle +# Turn off the VM +hypeman stop my-app +# Boot the VM that was turned off +hypeman start my-app +# Put the VM to sleep (paused) +hypeman standby my-app +# Awaken the VM (resumed) +hypeman restore my-app + +# Create a reverse proxy ("ingress") from the host to your VM +hypeman ingress create --name my-ingress my-app --hostname my-nginx-app --port 80 --host-port 8081 + +# List ingresses +hypeman ingress list + +# Curl nginx through your ingress +curl --header "Host: my-nginx-app" http://127.0.0.1:8081 + +# Delete an ingress +hypeman ingress delete my-ingress + +# Delete all VMs +hypeman rm --force --all +``` + +More ingress features: +- Automatic certs +- Subdomain-based routing + +```bash +# Make your VM if not already present +hypeman run --name my-app nginx:alpine + +# This requires configuring the Hypeman server with DNS credentials +# Change --hostname to a domain you own +hypeman ingress create --name my-tls-ingress my-app --hostname hello.hypeman-development.com -p 80 --host-port 7443 --tls + +# Curl through your TLS-terminating reverse proxy configuration +curl \ + --resolve hello.hypeman-development.com:7443:127.0.0.1 \ + https://hello.hypeman-development.com:7443 + +# OR... Ingress also supports subdomain-based routing +hypeman ingress create --name my-tls-subdomain-ingress '{instance}' --hostname '{instance}.hypeman-development.com' -p 80 --host-port 8443 --tls + +# Curling through the subdomain-based routing +curl \ + --resolve my-app.hypeman-development.com:8443:127.0.0.1 \ + https://my-app.hypeman-development.com:8443 + +# Delete all ingress +hypeman ingress delete --all +``` + +More logging features: +- Cloud Hypervisor logs +- Hypeman operational logs + +```bash +# View Cloud Hypervisor logs for your VM +hypeman logs --source vmm my-app +# View Hypeman logs for your VM +hypeman logs --source hypeman my-app +``` + +For details about specific commands, use the `--help` flag. + +The CLI also provides resource-based commands for more advanced usage: + +```sh +hypeman [resource] [command] [flags] +``` + +## Resource Management + +### Viewing Server Resources + +Check available server capacity, current allocations, and GPU availability: + +```bash +# Show server resource status (CPU, memory, disk, network, GPU) +hypeman resources + +# Show resources as JSON +hypeman resources --format json + +# Show only GPU information +hypeman resources --transform gpu +``` + +### Per-VM Resource Limits + +Control resource allocation for instances: + +```bash +# Set disk I/O limit +hypeman run --disk-io 100MB/s --name io-limited myimage:latest + +# Set network bandwidth limits +hypeman run --bandwidth-down 1Gbps --bandwidth-up 500Mbps --name bw-limited myimage:latest + +# Combine multiple resource options +hypeman run \ + --cpus 4 \ + --memory 8GB \ + --gpu-profile L40S-2Q \ + --disk-io 200MB/s \ + --bandwidth-down 10Gbps \ + --name ml-training \ + pytorch:latest +``` + +## GPU support + + +### GPU Passthrough + +For full GPU passthrough (entire GPU dedicated to one VM): + +```bash +# Discover available passthrough-capable devices +hypeman device available + +# Register a GPU for passthrough +hypeman device register --pci-address 0000:a2:00.0 --name my-gpu + +# List registered devices +hypeman device list + +# Run an instance with the GPU attached +hypeman run --device my-gpu --hypervisor qemu --name gpu-workload cuda:12.0 + +# When done, unregister the device +hypeman device delete my-gpu +``` + +### Nvidia vGPU + +Use NVIDIA vGPU to share a physical GPU across multiple VMs: + +```bash +# Run with a vGPU profile +hypeman run --gpu-profile L40S-1Q --name ml-workload pytorch:latest + +# Run with more vGPU resources +hypeman run --gpu-profile L40S-4Q --cpus 8 --memory 32GB --name training-job tensorflow:latest +``` + +### Hypervisor Selection + +Choose between Cloud Hypervisor (default) and QEMU: + +```bash +# Run with QEMU (more compatible with some features like vGPU) +hypeman run --hypervisor qemu --name qemu-vm myimage:latest + +# Run with Cloud Hypervisor (default, faster boot) +hypeman run --hypervisor cloud-hypervisor --name ch-vm myimage:latest +``` + +## Global Flags + +- `--debug` - Enable debug logging (includes HTTP request/response details) +- `--version`, `-v` - Show the CLI version + +## Development + +### Testing Preview Branches + +When developing features in the main [hypeman](https://github.com/kernel/hypeman) repo, Stainless automatically creates preview branches in `stainless-sdks/hypeman-cli` with your API changes. You can check out these branches locally to test the CLI changes: + +```bash +# Checkout preview/ (e.g., if working on "devices" branch in hypeman) +./scripts/checkout-preview devices + +# Checkout an exact branch name +./scripts/checkout-preview -b main +./scripts/checkout-preview -b preview/my-feature +``` + +The script automatically adds the `stainless` remote if needed and also updates `go.mod` to point the `hypeman-go` SDK dependency to the corresponding preview branch in `stainless-sdks/hypeman-go`. + +> **Warning:** The `go.mod` and `go.sum` changes from `checkout-preview` are for local testing only. Do not commit these changes. + +After checking out a preview branch, you can build and test the CLI: + +```bash +go build -o hypeman ./cmd/hypeman +./hypeman --help +``` + +You can also point the SDK dependency independently: + +```bash +# Point hypeman-go to a specific branch +./scripts/use-sdk-preview preview/my-feature + +# Point to a specific commit +./scripts/use-sdk-preview abc1234def567 +``` diff --git a/apps/cli/SECURITY.md b/apps/cli/SECURITY.md new file mode 100644 index 00000000..94a5b008 --- /dev/null +++ b/apps/cli/SECURITY.md @@ -0,0 +1,23 @@ +# Security Policy + +## Reporting Security Issues + +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. + +To report a security issue, please contact the Stainless team at security@stainless.com. + +## Responsible Disclosure + +We appreciate the efforts of security researchers and individuals who help us maintain the security of +SDKs we generate. If you believe you have found a security vulnerability, please adhere to responsible +disclosure practices by allowing us a reasonable amount of time to investigate and address the issue +before making any information public. + +## Reporting Non-SDK Related Security Issues + +If you encounter security issues that are not directly related to SDKs but pertain to the services +or products provided by Hypeman, please follow the respective company's security reporting guidelines. + +--- + +Thank you for helping us keep the SDKs and systems they interact with secure. diff --git a/apps/cli/bin/check-release-environment b/apps/cli/bin/check-release-environment new file mode 100644 index 00000000..1e951e9a --- /dev/null +++ b/apps/cli/bin/check-release-environment @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +errors=() + +lenErrors=${#errors[@]} + +if [[ lenErrors -gt 0 ]]; then + echo -e "Found the following errors in the release environment:\n" + + for error in "${errors[@]}"; do + echo -e "- $error\n" + done + + exit 1 +fi + +echo "The environment is ready to push releases!" diff --git a/apps/cli/cmd/hypeman/main.go b/apps/cli/cmd/hypeman/main.go new file mode 100644 index 00000000..23c43b24 --- /dev/null +++ b/apps/cli/cmd/hypeman/main.go @@ -0,0 +1,39 @@ +package main + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + + "github.com/kernel/hypeman-cli/pkg/cmd" + "github.com/kernel/hypeman-go" + "github.com/tidwall/gjson" +) + +func main() { + app := cmd.Command + if err := app.Run(context.Background(), os.Args); err != nil { + // Handle exec exit codes specially - exit with the command's exit code + var execErr *cmd.ExecExitError + if errors.As(err, &execErr) { + os.Exit(execErr.Code) + } + + var apierr *hypeman.Error + if errors.As(err, &apierr) { + fmt.Fprintf(os.Stderr, "%s %q: %d %s\n", apierr.Request.Method, apierr.Request.URL, apierr.Response.StatusCode, http.StatusText(apierr.Response.StatusCode)) + format := app.String("format-error") + json := gjson.Parse(apierr.RawJSON()) + show_err := cmd.ShowJSON(os.Stdout, "Error", json, format, app.String("transform-error")) + if show_err != nil { + // Just print the original error: + fmt.Fprintf(os.Stderr, "%s\n", err.Error()) + } + } else { + fmt.Fprintf(os.Stderr, "%s\n", err.Error()) + } + os.Exit(1) + } +} diff --git a/apps/cli/go.mod b/apps/cli/go.mod new file mode 100644 index 00000000..f5910038 --- /dev/null +++ b/apps/cli/go.mod @@ -0,0 +1,89 @@ +module github.com/kernel/hypeman-cli + +go 1.25 + +require ( + github.com/charmbracelet/bubbles v0.21.0 + github.com/charmbracelet/bubbletea v1.3.6 + github.com/charmbracelet/lipgloss v1.1.0 + github.com/charmbracelet/x/term v0.2.1 + github.com/google/go-containerregistry v0.20.7 + github.com/gorilla/websocket v1.5.3 + github.com/itchyny/json2yaml v0.1.4 + github.com/kernel/hypeman-go v0.11.0 + github.com/knadh/koanf/parsers/yaml v1.1.0 + github.com/knadh/koanf/providers/file v1.2.1 + github.com/knadh/koanf/v2 v2.3.2 + github.com/muesli/reflow v0.3.0 + github.com/stretchr/testify v1.11.1 + github.com/tidwall/gjson v1.18.0 + github.com/tidwall/pretty v1.2.1 + github.com/tidwall/sjson v1.2.5 + github.com/urfave/cli-docs/v3 v3.0.0-alpha6 + github.com/urfave/cli/v3 v3.3.2 + golang.org/x/sys v0.38.0 + golang.org/x/term v0.37.0 +) + +require ( + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/x/ansi v0.9.3 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/knadh/koanf/maps v0.1.2 // indirect + github.com/knadh/koanf/providers/env v1.1.0 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-localereader v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect + github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/termenv v0.16.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/text v0.31.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/apps/cli/go.sum b/apps/cli/go.sum new file mode 100644 index 00000000..9c04859f --- /dev/null +++ b/apps/cli/go.sum @@ -0,0 +1,224 @@ +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= +github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= +github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= +github.com/charmbracelet/bubbletea v1.3.6 h1:VkHIxPJQeDt0aFJIsVxw8BQdh/F/L2KKZGsK6et5taU= +github.com/charmbracelet/bubbletea v1.3.6/go.mod h1:oQD9VCRQFF8KplacJLo28/jofOI2ToOfGYeFgBBxHOc= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/x/ansi v0.9.3 h1:BXt5DHS/MKF+LjuK4huWrC6NCvHtexww7dMayh6GXd0= +github.com/charmbracelet/x/ansi v0.9.3/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/itchyny/json2yaml v0.1.4 h1:/pErVOXGG5iTyXHi/QKR4y3uzhLjGTEmmJIy97YT+k8= +github.com/itchyny/json2yaml v0.1.4/go.mod h1:6iudhBZdarpjLFRNj+clWLAkGft+9uCcjAZYXUH9eGI= +github.com/kernel/hypeman-go v0.11.0 h1:hCXNUHtrhGKswJapzyWyozBOXhKK/oreKvm0AXHuE6c= +github.com/kernel/hypeman-go v0.11.0/go.mod h1:guRrhyP9QW/ebUS1UcZ0uZLLJeGAAhDNzSi68U4M9hI= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= +github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/parsers/yaml v1.1.0 h1:3ltfm9ljprAHt4jxgeYLlFPmUaunuCgu1yILuTXRdM4= +github.com/knadh/koanf/parsers/yaml v1.1.0/go.mod h1:HHmcHXUrp9cOPcuC+2wrr44GTUB0EC+PyfN3HZD9tFg= +github.com/knadh/koanf/providers/env v1.1.0 h1:U2VXPY0f+CsNDkvdsG8GcsnK4ah85WwWyJgef9oQMSc= +github.com/knadh/koanf/providers/env v1.1.0/go.mod h1:QhHHHZ87h9JxJAn2czdEl6pdkNnDh/JS1Vtsyt65hTY= +github.com/knadh/koanf/providers/file v1.2.1 h1:bEWbtQwYrA+W2DtdBrQWyXqJaJSG3KrP3AESOJYp9wM= +github.com/knadh/koanf/providers/file v1.2.1/go.mod h1:bp1PM5f83Q+TOUu10J/0ApLBd9uIzg+n9UgthfY+nRA= +github.com/knadh/koanf/v2 v2.3.2 h1:Ee6tuzQYFwcZXQpc2MiVeC6qHMandf5SMUJJNoFp/c4= +github.com/knadh/koanf/v2 v2.3.2/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= +github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/urfave/cli-docs/v3 v3.0.0-alpha6 h1:w/l/N0xw1rO/aHRIGXJ0lDwwYFOzilup1qGvIytP3BI= +github.com/urfave/cli-docs/v3 v3.0.0-alpha6/go.mod h1:p7Z4lg8FSTrPB9GTaNyTrK3ygffHZcK3w0cU2VE+mzU= +github.com/urfave/cli/v3 v3.3.2 h1:BYFVnhhZ8RqT38DxEYVFPPmGFTEf7tJwySTXsVRrS/o= +github.com/urfave/cli/v3 v3.3.2/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E= +golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= diff --git a/apps/cli/pkg/cmd/build.go b/apps/cli/pkg/cmd/build.go new file mode 100644 index 00000000..21deedb0 --- /dev/null +++ b/apps/cli/pkg/cmd/build.go @@ -0,0 +1,492 @@ +package cmd + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/tidwall/gjson" + "github.com/urfave/cli/v3" +) + +var buildCmd = cli.Command{ + Name: "build", + Usage: "Build an image from a Dockerfile", + ArgsUsage: "[path]", + Description: `Build an image from a Dockerfile and source context. + +The path argument specifies the build context directory containing the +source code and Dockerfile. If not specified, the current directory is used. + +Subcommands are available for managing builds: + hypeman build list List builds + hypeman build get Get build details + hypeman build cancel Cancel a build + +Examples: + # Build from current directory + hypeman build + + # Build from a specific directory + hypeman build ./myapp + + # Build with a specific Dockerfile + hypeman build -f Dockerfile.prod ./myapp + + # Build with custom timeout + hypeman build --timeout 1200 ./myapp`, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "file", + Aliases: []string{"f"}, + Usage: "Path to Dockerfile (relative to context or absolute)", + }, + &cli.IntFlag{ + Name: "timeout", + Usage: "Build timeout in seconds", + Value: 600, + }, + &cli.StringFlag{ + Name: "base-image-digest", + Usage: "Pinned base image digest for reproducible builds", + }, + &cli.StringFlag{ + Name: "cache-scope", + Usage: "Tenant-specific cache key prefix", + }, + &cli.StringFlag{ + Name: "global-cache-key", + Usage: `Global cache identifier (e.g., "node", "python", "ubuntu")`, + }, + &cli.StringFlag{ + Name: "is-admin-build", + Usage: `Set to "true" to grant push access to global cache (operator-only)`, + }, + &cli.StringFlag{ + Name: "secrets", + Usage: `JSON array of secret references to inject during build (e.g., '[{"id":"npm_token"}]')`, + }, + &cli.StringFlag{ + Name: "image-name", + Usage: `Custom image name for the build output (pushed to {registry}/{image_name} instead of {registry}/builds/{id})`, + }, + }, + Commands: []*cli.Command{ + &buildListCmd, + &buildGetCmd, + &buildCancelCmd, + }, + Action: handleBuild, + HideHelpCommand: true, +} + +func handleBuild(ctx context.Context, cmd *cli.Command) error { + // Get build context path (default to current directory) + contextPath := "." + args := cmd.Args().Slice() + if len(args) > 0 { + contextPath = args[0] + } + + // Resolve to absolute path + absContextPath, err := filepath.Abs(contextPath) + if err != nil { + return fmt.Errorf("failed to resolve path: %w", err) + } + + // Check if context directory exists + info, err := os.Stat(absContextPath) + if err != nil { + return fmt.Errorf("cannot access build context: %w", err) + } + if !info.IsDir() { + return fmt.Errorf("build context must be a directory: %s", absContextPath) + } + + // Get Dockerfile path + dockerfilePath := cmd.String("file") + var dockerfileContent string + + if dockerfilePath != "" { + // If dockerfile is specified, read it + if !filepath.IsAbs(dockerfilePath) { + dockerfilePath = filepath.Join(absContextPath, dockerfilePath) + } + content, err := os.ReadFile(dockerfilePath) + if err != nil { + return fmt.Errorf("cannot read Dockerfile: %w", err) + } + dockerfileContent = string(content) + } + + timeout := cmd.Int("timeout") + + fmt.Fprintf(os.Stderr, "Building from %s...\n", contextPath) + + // Create source tarball + tarball, err := createSourceTarball(absContextPath) + if err != nil { + return fmt.Errorf("failed to create source archive: %w", err) + } + + // Create client with options + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + // Build params + params := hypeman.BuildNewParams{ + Source: bytes.NewReader(tarball.Bytes()), + TimeoutSeconds: hypeman.Opt(int64(timeout)), + } + + if dockerfileContent != "" { + params.Dockerfile = hypeman.Opt(dockerfileContent) + } + + if v := cmd.String("base-image-digest"); v != "" { + params.BaseImageDigest = hypeman.Opt(v) + } + if v := cmd.String("cache-scope"); v != "" { + params.CacheScope = hypeman.Opt(v) + } + if v := cmd.String("global-cache-key"); v != "" { + params.GlobalCacheKey = hypeman.Opt(v) + } + if v := cmd.String("is-admin-build"); v != "" { + params.IsAdminBuild = hypeman.Opt(v) + } + if v := cmd.String("secrets"); v != "" { + params.Secrets = hypeman.Opt(v) + } + if v := cmd.String("image-name"); v != "" { + params.ImageName = hypeman.Opt(v) + } + + // Start build + build, err := client.Builds.New(ctx, params, opts...) + if err != nil { + return fmt.Errorf("failed to start build: %w", err) + } + + fmt.Fprintf(os.Stderr, "Build started: %s\n", build.ID) + + // Stream build events + err = streamBuildEventsSDK(ctx, client, build.ID, opts) + if err != nil { + return fmt.Errorf("build failed: %w", err) + } + + return nil +} + +// streamBuildEventsSDK streams build events using the SDK +func streamBuildEventsSDK(ctx context.Context, client hypeman.Client, buildID string, opts []option.RequestOption) error { + stream := client.Builds.EventsStreaming( + ctx, + buildID, + hypeman.BuildEventsParams{ + Follow: hypeman.Opt(true), + }, + opts..., + ) + defer stream.Close() + + var finalStatus hypeman.BuildStatus + var buildError string + + for stream.Next() { + event := stream.Current() + + switch event.Type { + case hypeman.BuildEventTypeLog: + // Print log content + fmt.Println(event.Content) + + case hypeman.BuildEventTypeStatus: + finalStatus = event.Status + switch event.Status { + case hypeman.BuildStatusQueued: + fmt.Fprintf(os.Stderr, "Build queued...\n") + case hypeman.BuildStatusBuilding: + fmt.Fprintf(os.Stderr, "Building...\n") + case hypeman.BuildStatusPushing: + fmt.Fprintf(os.Stderr, "Pushing image...\n") + case hypeman.BuildStatusReady: + fmt.Fprintf(os.Stderr, "Build complete!\n") + return nil + case hypeman.BuildStatusFailed: + buildError = "build failed" + case hypeman.BuildStatusCancelled: + return fmt.Errorf("build was cancelled") + } + + case hypeman.BuildEventTypeHeartbeat: + // Ignore heartbeat events + } + } + + if err := stream.Err(); err != nil { + return err + } + + // Check final status + if finalStatus == hypeman.BuildStatusFailed { + return fmt.Errorf("%s", buildError) + } + if finalStatus == hypeman.BuildStatusReady { + return nil + } + + return fmt.Errorf("build stream ended unexpectedly (status: %s)", finalStatus) +} + +// createSourceTarball creates a gzipped tar archive of the build context +func createSourceTarball(contextPath string) (*bytes.Buffer, error) { + buf := new(bytes.Buffer) + gzWriter := gzip.NewWriter(buf) + tarWriter := tar.NewWriter(gzWriter) + + err := filepath.Walk(contextPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Get relative path + relPath, err := filepath.Rel(contextPath, path) + if err != nil { + return err + } + + // Skip root directory + if relPath == "." { + return nil + } + + // Skip common build artifacts and version control + base := filepath.Base(path) + if base == ".git" || base == "node_modules" || base == "__pycache__" || + base == ".venv" || base == "venv" || base == "target" || + base == ".docker" || base == ".dockerignore" { + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + + // Create tar header + header, err := tar.FileInfoHeader(info, "") + if err != nil { + return err + } + + // Use forward slashes for tar paths + header.Name = filepath.ToSlash(relPath) + + // Handle symlinks + if info.Mode()&os.ModeSymlink != 0 { + linkTarget, err := os.Readlink(path) + if err != nil { + return err + } + header.Linkname = linkTarget + } + + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + + // Write file content for regular files + if info.Mode().IsRegular() { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + if _, err := io.Copy(tarWriter, file); err != nil { + return err + } + } + + return nil + }) + + if err != nil { + return nil, err + } + + if err := tarWriter.Close(); err != nil { + return nil, err + } + if err := gzWriter.Close(); err != nil { + return nil, err + } + + return buf, nil +} + +var buildListCmd = cli.Command{ + Name: "list", + Usage: "List builds", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "quiet", + Aliases: []string{"q"}, + Usage: "Only display build IDs", + }, + }, + Action: handleBuildList, + HideHelpCommand: true, +} + +var buildGetCmd = cli.Command{ + Name: "get", + Usage: "Get build details", + ArgsUsage: "", + Action: handleBuildGet, + HideHelpCommand: true, +} + +var buildCancelCmd = cli.Command{ + Name: "cancel", + Usage: "Cancel a build", + ArgsUsage: "", + Action: handleBuildCancel, + HideHelpCommand: true, +} + +func handleBuildList(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + if format != "auto" { + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Builds.List(ctx, opts...) + if err != nil { + return err + } + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "build list", obj, format, transform) + } + + builds, err := client.Builds.List(ctx, opts...) + if err != nil { + return err + } + + quietMode := cmd.Bool("quiet") + + if quietMode { + for _, b := range *builds { + fmt.Println(b.ID) + } + return nil + } + + if len(*builds) == 0 { + fmt.Fprintln(os.Stderr, "No builds found.") + return nil + } + + table := NewTableWriter(os.Stdout, "ID", "STATUS", "IMAGE", "DURATION", "CREATED") + table.TruncOrder = []int{2, 0, 4} // IMAGE first, then ID, CREATED + for _, b := range *builds { + imageRef := b.ImageRef + if imageRef == "" { + imageRef = "-" + } + + duration := "-" + if b.DurationMs > 0 { + secs := b.DurationMs / 1000 + if secs < 60 { + duration = fmt.Sprintf("%ds", secs) + } else { + duration = fmt.Sprintf("%dm%ds", secs/60, secs%60) + } + } + + table.AddRow( + TruncateID(b.ID), + string(b.Status), + imageRef, + duration, + FormatTimeAgo(b.CreatedAt), + ) + } + table.Render() + + return nil +} + +func handleBuildGet(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("build ID required\nUsage: hypeman build get ") + } + + id := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Builds.Get(ctx, id, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "build get", obj, format, transform) +} + +func handleBuildCancel(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("build ID required\nUsage: hypeman build cancel ") + } + + id := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + err := client.Builds.Cancel(ctx, id, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Cancelled build %s\n", id) + return nil +} diff --git a/apps/cli/pkg/cmd/cmd.go b/apps/cli/pkg/cmd/cmd.go new file mode 100644 index 00000000..e2614ca3 --- /dev/null +++ b/apps/cli/pkg/cmd/cmd.go @@ -0,0 +1,159 @@ +package cmd + +import ( + "compress/gzip" + "context" + "fmt" + "os" + "path/filepath" + "slices" + "strings" + + docs "github.com/urfave/cli-docs/v3" + "github.com/urfave/cli/v3" +) + +var ( + Command *cli.Command + OutputFormats = []string{"auto", "explore", "json", "jsonl", "pretty", "raw", "yaml"} +) + +func init() { + cli.VersionPrinter = func(cmd *cli.Command) { + fmt.Fprintf(os.Stdout, "Hypeman CLI version %s\n", cmd.Root().Version) + } + Command = &cli.Command{ + Name: "hypeman", + Usage: "CLI for the hypeman API", + Version: Version, + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "debug", + Usage: "Enable debug logging", + }, + &cli.StringFlag{ + Name: "base-url", + DefaultText: "url", + Usage: "Override the base URL for API requests", + }, + &cli.StringFlag{ + Name: "format", + Usage: "The format for displaying response data (one of: " + strings.Join(OutputFormats, ", ") + ")", + Value: "auto", + Validator: func(format string) error { + if !slices.Contains(OutputFormats, strings.ToLower(format)) { + return fmt.Errorf("format must be one of: %s", strings.Join(OutputFormats, ", ")) + } + return nil + }, + }, + &cli.StringFlag{ + Name: "format-error", + Usage: "The format for displaying error data (one of: " + strings.Join(OutputFormats, ", ") + ")", + Value: "auto", + Validator: func(format string) error { + if !slices.Contains(OutputFormats, strings.ToLower(format)) { + return fmt.Errorf("format must be one of: %s", strings.Join(OutputFormats, ", ")) + } + return nil + }, + }, + &cli.StringFlag{ + Name: "transform", + Usage: "The GJSON transformation for data output.", + }, + &cli.StringFlag{ + Name: "transform-error", + Usage: "The GJSON transformation for errors.", + }, + }, + Commands: []*cli.Command{ + &buildCmd, + &execCmd, + &cpCmd, + &pullCmd, + &pushCmd, + &runCmd, + &psCmd, + &logsCmd, + &rmCmd, + &stopCmd, + &startCmd, + &standbyCmd, + &restoreCmd, + &imageCmd, + &ingressCmd, + &volumeCmd, + &resourcesCmd, + &deviceCmd, + { + Name: "@manpages", + Usage: "Generate documentation for 'man'", + UsageText: "hypeman @manpages [-o hypeman.1] [--gzip]", + Hidden: true, + Action: generateManpages, + HideHelpCommand: true, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "output", + Aliases: []string{"o"}, + Usage: "write manpages to the given folder", + Value: "man", + }, + &cli.BoolFlag{ + Name: "gzip", + Aliases: []string{"z"}, + Usage: "output gzipped manpage files to .gz", + Value: true, + }, + &cli.BoolFlag{ + Name: "text", + Aliases: []string{"z"}, + Usage: "output uncompressed text files", + Value: false, + }, + }, + }, + }, + EnableShellCompletion: true, + ShellCompletionCommandName: "@completion", + HideHelpCommand: true, + } +} + +func generateManpages(ctx context.Context, c *cli.Command) error { + manpage, err := docs.ToManWithSection(Command, 1) + if err != nil { + return err + } + dir := c.String("output") + err = os.MkdirAll(filepath.Join(dir, "man1"), 0755) + if err != nil { + // handle error + } + if c.Bool("text") { + file, err := os.Create(filepath.Join(dir, "man1", "hypeman.1")) + if err != nil { + return err + } + defer file.Close() + if _, err := file.WriteString(manpage); err != nil { + return err + } + } + if c.Bool("gzip") { + file, err := os.Create(filepath.Join(dir, "man1", "hypeman.1.gz")) + if err != nil { + return err + } + defer file.Close() + gzWriter := gzip.NewWriter(file) + defer gzWriter.Close() + _, err = gzWriter.Write([]byte(manpage)) + if err != nil { + return err + } + } + fmt.Printf("Wrote manpages to %s\n", dir) + return nil +} diff --git a/apps/cli/pkg/cmd/cmdutil.go b/apps/cli/pkg/cmd/cmdutil.go new file mode 100644 index 00000000..e50b2e0f --- /dev/null +++ b/apps/cli/pkg/cmd/cmdutil.go @@ -0,0 +1,228 @@ +package cmd + +import ( + "fmt" + "io" + "log" + "net/http" + "net/http/httputil" + "os" + "os/exec" + "os/signal" + "strings" + "syscall" + + "github.com/kernel/hypeman-cli/pkg/jsonview" + "github.com/kernel/hypeman-go/option" + + "github.com/itchyny/json2yaml" + "github.com/tidwall/gjson" + "github.com/tidwall/pretty" + "github.com/urfave/cli/v3" + "golang.org/x/term" +) + +func getDefaultRequestOptions(cmd *cli.Command) []option.RequestOption { + opts := []option.RequestOption{ + option.WithHeader("User-Agent", fmt.Sprintf("Hypeman/CLI %s", Version)), + } + + if baseURL := resolveBaseURL(cmd); baseURL != "" { + opts = append(opts, option.WithBaseURL(baseURL)) + } + + if apiKey := resolveAPIKey(); apiKey != "" { + opts = append(opts, option.WithAPIKey(apiKey)) + } + + return opts +} + +var debugMiddlewareOption = option.WithMiddleware( + func(r *http.Request, mn option.MiddlewareNext) (*http.Response, error) { + logger := log.Default() + + if reqBytes, err := httputil.DumpRequest(r, true); err == nil { + logger.Printf("Request Content:\n%s\n", reqBytes) + } + + resp, err := mn(r) + if err != nil { + return resp, err + } + + if respBytes, err := httputil.DumpResponse(resp, true); err == nil { + logger.Printf("Response Content:\n%s\n", respBytes) + } + + return resp, err + }, +) + +func isInputPiped() bool { + stat, _ := os.Stdin.Stat() + return (stat.Mode() & os.ModeCharDevice) == 0 +} + +func isTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return term.IsTerminal(int(v.Fd())) + default: + return false + } +} + +func streamOutput(label string, generateOutput func(w *os.File) error) error { + // For non-tty output (probably a pipe), write directly to stdout + if !isTerminal(os.Stdout) { + return streamToStdout(generateOutput) + } + + pagerInput, outputFile, isSocketPair, err := createPagerFiles() + if err != nil { + return err + } + defer pagerInput.Close() + defer outputFile.Close() + + cmd, err := startPagerCommand(pagerInput, label, isSocketPair) + if err != nil { + return err + } + + if err := pagerInput.Close(); err != nil { + return err + } + + // If the pager exits before reading all input, then generateOutput() will + // produce a broken pipe error, which is fine and we don't want to propagate it. + if err := generateOutput(outputFile); err != nil && !strings.Contains(err.Error(), "broken pipe") { + return err + } + + return cmd.Wait() +} + +func streamToStdout(generateOutput func(w *os.File) error) error { + signal.Ignore(syscall.SIGPIPE) + err := generateOutput(os.Stdout) + if err != nil && strings.Contains(err.Error(), "broken pipe") { + return nil + } + return err +} + +func createPagerFiles() (*os.File, *os.File, bool, error) { + // We prefer sockets when available because they allow for smaller buffer + // sizes, preventing unnecessary data streaming from the backend. Pipes + // typically have large buffers but serve as a decent alternative when + // sockets aren't available (e.g., on Windows). + pagerInput, outputFile, isSocketPair, err := createSocketPair() + if err == nil { + return pagerInput, outputFile, isSocketPair, nil + } + + r, w, err := os.Pipe() + return r, w, false, err +} + +// Start a subprocess running the user's preferred pager (or `less` if `$PAGER` is unset) +func startPagerCommand(pagerInput *os.File, label string, useSocketpair bool) (*exec.Cmd, error) { + pagerProgram := os.Getenv("PAGER") + if pagerProgram == "" { + pagerProgram = "less" + } + + if shouldUseColors(os.Stdout) { + os.Setenv("FORCE_COLOR", "1") + } + + var cmd *exec.Cmd + if useSocketpair { + cmd = exec.Command(pagerProgram, fmt.Sprintf("/dev/fd/%d", pagerInput.Fd())) + cmd.ExtraFiles = []*os.File{pagerInput} + } else { + cmd = exec.Command(pagerProgram) + cmd.Stdin = pagerInput + } + + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), + "LESS=-r -f -P "+label, + "MORE=-r -f -P "+label, + ) + + if err := cmd.Start(); err != nil { + return nil, err + } + + return cmd, nil +} + +func shouldUseColors(w io.Writer) bool { + force, ok := os.LookupEnv("FORCE_COLOR") + if ok { + if force == "1" { + return true + } + if force == "0" { + return false + } + } + return isTerminal(w) +} + +func ShowJSON(out *os.File, title string, res gjson.Result, format string, transform string) error { + if format != "raw" && transform != "" { + transformed := res.Get(transform) + if transformed.Exists() { + res = transformed + } + } + switch strings.ToLower(format) { + case "auto": + return ShowJSON(out, title, res, "json", "") + case "explore": + return jsonview.ExploreJSON(title, res) + case "pretty": + _, err := out.WriteString(jsonview.RenderJSON(title, res) + "\n") + return err + case "json": + prettyJSON := pretty.Pretty([]byte(res.Raw)) + if shouldUseColors(out) { + _, err := out.Write(pretty.Color(prettyJSON, pretty.TerminalStyle)) + return err + } else { + _, err := out.Write(prettyJSON) + return err + } + case "jsonl": + // @ugly is gjson syntax for "no whitespace", so it fits on one line + oneLineJSON := res.Get("@ugly").Raw + if shouldUseColors(out) { + bytes := append(pretty.Color([]byte(oneLineJSON), pretty.TerminalStyle), '\n') + _, err := out.Write(bytes) + return err + } else { + _, err := out.Write([]byte(oneLineJSON + "\n")) + return err + } + case "raw": + if _, err := out.Write([]byte(res.Raw + "\n")); err != nil { + return err + } + return nil + case "yaml": + input := strings.NewReader(res.Raw) + var yaml strings.Builder + if err := json2yaml.Convert(&yaml, input); err != nil { + return err + } + _, err := out.Write([]byte(yaml.String())) + return err + default: + return fmt.Errorf("Invalid format: %s, valid formats are: %s", format, strings.Join(OutputFormats, ", ")) + } +} diff --git a/apps/cli/pkg/cmd/cmdutil_test.go b/apps/cli/pkg/cmd/cmdutil_test.go new file mode 100644 index 00000000..027f3d46 --- /dev/null +++ b/apps/cli/pkg/cmd/cmdutil_test.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "os" + "testing" +) + +func TestStreamOutput(t *testing.T) { + t.Setenv("PAGER", "cat") + err := streamOutput("stream test", func(w *os.File) error { + _, writeErr := w.WriteString("Hello world\n") + return writeErr + }) + if err != nil { + t.Errorf("streamOutput failed: %v", err) + } +} diff --git a/apps/cli/pkg/cmd/cmdutil_unix.go b/apps/cli/pkg/cmd/cmdutil_unix.go new file mode 100644 index 00000000..6883c525 --- /dev/null +++ b/apps/cli/pkg/cmd/cmdutil_unix.go @@ -0,0 +1,33 @@ +//go:build !windows + +package cmd + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// In order to avoid large buffers on pipes, this function create a pair of +// files for reading and writing through a barely buffered socket. +func createSocketPair() (*os.File, *os.File, bool, error) { + fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0) + if err != nil { + return nil, nil, false, err + } + + parentSock, childSock := fds[0], fds[1] + + // Use small buffer sizes so we don't ask the server for more paginated + // values than we actually need. + if err := unix.SetsockoptInt(parentSock, unix.SOL_SOCKET, unix.SO_SNDBUF, 128); err != nil { + return nil, nil, false, err + } + if err := unix.SetsockoptInt(childSock, unix.SOL_SOCKET, unix.SO_RCVBUF, 128); err != nil { + return nil, nil, false, err + } + + pagerInput := os.NewFile(uintptr(childSock), "child_socket") + outputFile := os.NewFile(uintptr(parentSock), "parent_socket") + return pagerInput, outputFile, true, nil +} diff --git a/apps/cli/pkg/cmd/cmdutil_windows.go b/apps/cli/pkg/cmd/cmdutil_windows.go new file mode 100644 index 00000000..48a27b02 --- /dev/null +++ b/apps/cli/pkg/cmd/cmdutil_windows.go @@ -0,0 +1,14 @@ +//go:build windows + +package cmd + +import ( + "errors" + "os" +) + +// createSocketPair is not supported on Windows, so we return an error +// which causes createPagerFiles to fall back to using pipes. +func createSocketPair() (*os.File, *os.File, bool, error) { + return nil, nil, false, errors.New("socket pairs not supported on Windows") +} diff --git a/apps/cli/pkg/cmd/config.go b/apps/cli/pkg/cmd/config.go new file mode 100644 index 00000000..2cd4146c --- /dev/null +++ b/apps/cli/pkg/cmd/config.go @@ -0,0 +1,74 @@ +package cmd + +import ( + "os" + "path/filepath" + "strings" + + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/env" + "github.com/knadh/koanf/providers/file" + "github.com/knadh/koanf/v2" + "github.com/urfave/cli/v3" +) + +// CLIConfig holds CLI configuration loaded from cli.yaml +type CLIConfig struct { + BaseURL string `koanf:"base_url"` + APIKey string `koanf:"api_key"` +} + +// getCLIConfigPath returns the path to the CLI config file. +// The CLI uses ~/.config/hypeman/cli.yaml on all platforms. +func getCLIConfigPath() string { + home, err := os.UserHomeDir() + if err != nil { + return "" + } + return filepath.Join(home, ".config", "hypeman", "cli.yaml") +} + +// loadCLIConfig loads CLI configuration from the config file, then +// overlays HYPEMAN_-prefixed environment variables (highest precedence). +// HYPEMAN_BASE_URL -> base_url, HYPEMAN_API_KEY -> api_key. +// Returns an empty config if the file doesn't exist or can't be parsed. +func loadCLIConfig() *CLIConfig { + cfg := &CLIConfig{} + k := koanf.New(".") + + configPath := getCLIConfigPath() + if configPath != "" { + _ = k.Load(file.Provider(configPath), yaml.Parser()) + } + + // Overlay HYPEMAN_-prefixed env vars: HYPEMAN_BASE_URL -> base_url + _ = k.Load(env.ProviderWithValue("HYPEMAN_", ".", func(key string, value string) (string, interface{}) { + if value == "" { + return "", nil + } + return strings.ToLower(strings.TrimPrefix(key, "HYPEMAN_")), value + }), nil) + + _ = k.Unmarshal("", cfg) + return cfg +} + +// resolveBaseURL returns the effective base URL with precedence: +// CLI flag > HYPEMAN_BASE_URL env > config file > default. +func resolveBaseURL(cmd *cli.Command) string { + if u := cmd.Root().String("base-url"); u != "" { + return u + } + cfg := loadCLIConfig() + if cfg.BaseURL != "" { + return cfg.BaseURL + } + return "http://localhost:8080" +} + +// resolveAPIKey returns the effective API key with precedence: +// HYPEMAN_API_KEY env > config file. +func resolveAPIKey() string { + cfg := loadCLIConfig() + return cfg.APIKey +} diff --git a/apps/cli/pkg/cmd/cp.go b/apps/cli/pkg/cmd/cp.go new file mode 100644 index 00000000..43a653b0 --- /dev/null +++ b/apps/cli/pkg/cmd/cp.go @@ -0,0 +1,984 @@ +package cmd + +import ( + "archive/tar" + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/gorilla/websocket" + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/lib" + "github.com/urfave/cli/v3" +) + +// cpRequest represents the JSON body for cp requests +type cpRequest struct { + Direction string `json:"direction"` + GuestPath string `json:"guest_path"` + IsDir bool `json:"is_dir,omitempty"` + Mode uint32 `json:"mode,omitempty"` + FollowLinks bool `json:"follow_links,omitempty"` + Uid uint32 `json:"uid"` + Gid uint32 `json:"gid"` +} + +// cpFileHeader is received from the server when copying from guest +type cpFileHeader struct { + Type string `json:"type"` + Path string `json:"path"` + Mode uint32 `json:"mode"` + IsDir bool `json:"is_dir"` + IsSymlink bool `json:"is_symlink"` + LinkTarget string `json:"link_target"` + Size int64 `json:"size"` + Mtime int64 `json:"mtime"` + Uid uint32 `json:"uid,omitempty"` + Gid uint32 `json:"gid,omitempty"` +} + +// cpEndMarker signals end of file or transfer +type cpEndMarker struct { + Type string `json:"type"` + Final bool `json:"final"` +} + +// cpResult is the response from a copy-to operation +type cpResult struct { + Type string `json:"type"` + Success bool `json:"success"` + Error string `json:"error,omitempty"` + BytesWritten int64 `json:"bytes_written,omitempty"` +} + +// cpError is an error message from the server +type cpError struct { + Type string `json:"type"` + Message string `json:"message"` + Path string `json:"path,omitempty"` +} + +var cpCmd = cli.Command{ + Name: "cp", + Usage: "Copy files/folders between an instance and the local filesystem", + ArgsUsage: " ", + Description: `Copy files between the local filesystem and an instance. + +The path format is: + - Local path: /path/to/file or ./relative/path + - Instance path: :/path/in/instance + +Examples: + # Copy file to instance + hypeman cp ./local-file.txt myinstance:/app/file.txt + + # Copy file from instance + hypeman cp myinstance:/app/output.txt ./local-output.txt + + # Copy directory to instance + hypeman cp ./local-dir myinstance:/app/dir + + # Copy directory from instance + hypeman cp myinstance:/app/dir ./local-dir`, + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "archive", + Aliases: []string{"a"}, + Usage: "Archive mode (copy all uid/gid information)", + }, + &cli.BoolFlag{ + Name: "follow-links", + Aliases: []string{"L"}, + Usage: "Always follow symbolic links in source", + }, + &cli.BoolFlag{ + Name: "quiet", + Aliases: []string{"q"}, + Usage: "Suppress progress output during copy", + }, + }, + Action: handleCp, + HideHelpCommand: true, +} + +func handleCp(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) != 2 { + return fmt.Errorf("exactly 2 arguments required: source and destination\nUsage: hypeman cp ") + } + + srcArg := args[0] + dstArg := args[1] + + // Parse source and destination + srcInstance, srcPath, srcIsRemote := parseCpPath(srcArg) + dstInstance, dstPath, dstIsRemote := parseCpPath(dstArg) + + // Validate: one must be local, one must be remote + if srcIsRemote && dstIsRemote { + return fmt.Errorf("cannot copy between two instances; one path must be local") + } + if !srcIsRemote && !dstIsRemote { + return fmt.Errorf("at least one path must reference an instance (use instance:/path format)") + } + + // Get client and resolve instance + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var instanceID string + var err error + if srcIsRemote { + instanceID, err = ResolveInstance(ctx, &client, srcInstance) + } else { + instanceID, err = ResolveInstance(ctx, &client, dstInstance) + } + if err != nil { + return err + } + + // Get base URL and API key (flag > env > config file) + baseURL := resolveBaseURL(cmd) + + apiKey := resolveAPIKey() + if apiKey == "" { + return fmt.Errorf("API key required: set HYPEMAN_API_KEY or configure api_key in ~/.config/hypeman/cli.yaml") + } + + archive := cmd.Bool("archive") + followLinks := cmd.Bool("follow-links") + quiet := cmd.Bool("quiet") + + if srcIsRemote { + // Copy from instance to local (or stdout if dstPath is "-") + if dstPath == "-" { + return copyFromInstanceToStdout(ctx, baseURL, apiKey, instanceID, srcPath, followLinks, archive) + } + return copyFromInstance(ctx, &client, baseURL, apiKey, instanceID, srcPath, dstPath, followLinks, quiet, archive) + } else { + // Copy from local (or stdin if srcPath is "-") to instance + if srcPath == "-" { + return copyFromStdinToInstance(ctx, baseURL, apiKey, instanceID, dstPath, archive) + } + return copyToInstance(ctx, &client, baseURL, apiKey, instanceID, srcPath, dstPath, quiet, archive, followLinks) + } +} + +// parseCpPath parses a path like "instance:/path" or "/local/path" +// Following docker cp conventions: +// - Paths starting with / or ./ or ../ or ~ are always local paths +// - If a path contains a colon, it's treated as instance:path UNLESS it's an explicit local path +// - For ambiguous cases (file:name.txt), use explicit paths like ./file:name.txt +func parseCpPath(path string) (instance, containerPath string, isRemote bool) { + // Explicit local paths: absolute path, relative path with ./ or ../, or home directory + if strings.HasPrefix(path, "/") || + strings.HasPrefix(path, "./") || + strings.HasPrefix(path, "../") || + strings.HasPrefix(path, "~") || + path == "." || + path == ".." { + containerPath = path + return + } + + // Check for Windows drive path (e.g., C:\...) + if isWindowsPath(path) { + containerPath = path + return + } + + // Check for colon separator (instance:path format) + colonIdx := strings.Index(path, ":") + if colonIdx > 0 { + potentialInstance := path[:colonIdx] + + // If the part before colon contains path separators, it's a local path with colon in name + // This helps with edge cases like "some/path:with:colons" + if strings.ContainsAny(potentialInstance, "/\\") { + containerPath = path + return + } + + // It's a remote path: instance:path + instance = potentialInstance + containerPath = path[colonIdx+1:] + isRemote = true + return + } + + // No colon found - local path + containerPath = path + return +} + +// isWindowsPath checks if path looks like a Windows drive path (e.g., C:\...) +func isWindowsPath(path string) bool { + if len(path) >= 2 && path[1] == ':' { + c := path[0] + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') + } + return false +} + +// sanitizeTarPath validates and sanitizes a tar entry path to prevent path traversal attacks. +// Returns the sanitized target path or an error if the path is malicious. +// Uses path package (not filepath) because tar paths and guest paths use forward slashes. +func sanitizeTarPath(basePath, entryName string) (string, error) { + // Clean the entry name using path.Clean (forward slashes for guest/tar paths) + clean := path.Clean(entryName) + + // Reject absolute paths (Linux paths start with /) + if strings.HasPrefix(clean, "/") { + return "", fmt.Errorf("invalid tar entry: absolute path not allowed: %s", entryName) + } + + // Reject paths that start with .. (escaping destination) + if strings.HasPrefix(clean, "..") { + return "", fmt.Errorf("invalid tar entry: path escapes destination: %s", entryName) + } + + // Join with base path using path.Join (forward slashes for guest paths) + targetPath := path.Join(basePath, clean) + + // Verify the result is under the base path + // path.Clean removes trailing slashes, so compare cleaned versions + cleanBase := path.Clean(basePath) + // Special case: if basePath is "/" (root), any absolute path under it is valid + if cleanBase == "/" { + // For root destination, just ensure the target is an absolute path (which path.Join guarantees) + if !strings.HasPrefix(targetPath, "/") { + return "", fmt.Errorf("invalid tar entry: path escapes destination: %s", entryName) + } + } else if !strings.HasPrefix(targetPath, cleanBase+"/") && targetPath != cleanBase { + return "", fmt.Errorf("invalid tar entry: path escapes destination: %s", entryName) + } + + return targetPath, nil +} + +// statGuestPath queries the guest for information about a path using the SDK's Stat endpoint +func statGuestPath(ctx context.Context, client *hypeman.Client, instanceID, guestPath string, followLinks bool) (*hypeman.PathInfo, error) { + params := hypeman.InstanceStatParams{ + Path: guestPath, + } + if followLinks { + params.FollowLinks = hypeman.Bool(true) + } + + pathInfo, err := client.Instances.Stat(ctx, instanceID, params) + if err != nil { + return nil, fmt.Errorf("stat path: %w", err) + } + + // Check for stat errors (e.g., permission denied) + if pathInfo.Error != "" { + return nil, fmt.Errorf("stat path %s: %s", guestPath, pathInfo.Error) + } + + return pathInfo, nil +} + +// resolveDestPath resolves the destination path following docker cp semantics +// srcPath is the local source path, dstPath is the guest destination path +// Returns the resolved guest path +func resolveDestPath(ctx context.Context, client *hypeman.Client, instanceID, srcPath, dstPath string) (string, error) { + srcInfo, err := os.Stat(srcPath) + if err != nil { + return "", fmt.Errorf("cannot stat source: %w", err) + } + + // Check if dstPath ends with /. (copy contents only) + // Handle both OS-specific separator and forward slash for cross-platform compatibility + copyContentsOnly := strings.HasSuffix(srcPath, string(filepath.Separator)+".") || + strings.HasSuffix(srcPath, "/.") + if copyContentsOnly { + srcPath = strings.TrimSuffix(srcPath, string(filepath.Separator)+".") + srcPath = strings.TrimSuffix(srcPath, "/.") + } + + // Check if destination ends with / + dstEndsWithSlash := strings.HasSuffix(dstPath, "/") + + // Stat the destination in guest + dstStat, err := statGuestPath(ctx, client, instanceID, dstPath, true) + if err != nil { + return "", fmt.Errorf("stat destination: %w", err) + } + + // Use bool fields directly from PathInfo + isDir := dstStat.IsDir + isFile := dstStat.IsFile + + // Docker cp path resolution rules: + // 1. If SRC is a file: + // - DEST doesn't exist: save as DEST + // - DEST doesn't exist and ends with /: error + // - DEST exists and is a file: overwrite + // - DEST exists and is a dir: copy into dir using basename + // 2. If SRC is a directory: + // - DEST doesn't exist: create DEST dir + // - DEST exists and is a file: error + // - DEST exists and is a dir: + // - SRC ends with /.: copy contents into DEST + // - SRC doesn't end with /.: copy SRC dir into DEST + + if !srcInfo.IsDir() { + // Source is a file + if !dstStat.Exists { + if dstEndsWithSlash { + return "", fmt.Errorf("destination directory %s does not exist", dstPath) + } + // Save as DEST + return dstPath, nil + } + if isDir { + // Copy into directory using basename + // Use path.Join for guest paths (always forward slashes) + return path.Join(dstPath, filepath.Base(srcPath)), nil + } + // Overwrite file + return dstPath, nil + } + + // Source is a directory + if dstStat.Exists && isFile { + return "", fmt.Errorf("cannot copy a directory to a file") + } + + if !dstStat.Exists { + // DEST will be created + return dstPath, nil + } + + // DEST exists and is a directory + if copyContentsOnly { + // Copy contents into DEST + return dstPath, nil + } + + // Copy SRC dir into DEST (create subdir) + // Use path.Join for guest paths (always forward slashes) + return path.Join(dstPath, filepath.Base(srcPath)), nil +} + +// buildCpWsURL builds the WebSocket URL for the cp endpoint +func buildCpWsURL(baseURL, instanceID string) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", fmt.Errorf("invalid base URL: %w", err) + } + u.Path = fmt.Sprintf("/instances/%s/cp", instanceID) + + switch u.Scheme { + case "https": + u.Scheme = "wss" + case "http": + u.Scheme = "ws" + } + + return u.String(), nil +} + +// copyToInstance copies a local file/directory to the instance +func copyToInstance(ctx context.Context, client *hypeman.Client, baseURL, apiKey, instanceID, srcPath, dstPath string, quiet, archive, followLinks bool) error { + // Check for /. suffix (copy contents only) + copyContentsOnly := strings.HasSuffix(srcPath, string(filepath.Separator)+".") || strings.HasSuffix(srcPath, "/.") + originalSrcPath := srcPath + if copyContentsOnly { + srcPath = strings.TrimSuffix(srcPath, string(filepath.Separator)+".") + srcPath = strings.TrimSuffix(srcPath, "/.") + } + + // Stat the source + srcInfo, err := os.Stat(srcPath) + if err != nil { + return fmt.Errorf("cannot stat source: %w", err) + } + + // Resolve destination path using docker cp semantics + resolvedDst, err := resolveDestPath(ctx, client, instanceID, originalSrcPath, dstPath) + if err != nil { + return err + } + + if srcInfo.IsDir() { + if copyContentsOnly { + // Copy contents of srcPath into resolvedDst + return copyDirContentsToInstance(ctx, baseURL, apiKey, instanceID, srcPath, resolvedDst, quiet, archive, followLinks) + } + return copyDirToInstance(ctx, baseURL, apiKey, instanceID, srcPath, resolvedDst, quiet, archive, followLinks) + } + return copyFileToInstance(ctx, baseURL, apiKey, instanceID, srcPath, resolvedDst, srcInfo.Mode().Perm(), quiet, archive, followLinks) +} + +// copyFileToInstance copies a single file to the instance using the SDK +func copyFileToInstance(ctx context.Context, baseURL, apiKey, instanceID, srcPath, dstPath string, mode fs.FileMode, quiet, archive, followLinks bool) error { + srcInfo, err := os.Stat(srcPath) + if err != nil { + return fmt.Errorf("stat source: %w", err) + } + + cfg := lib.CpConfig{ + BaseURL: baseURL, + APIKey: apiKey, + } + + var callbacks *lib.CpCallbacks + if !quiet { + callbacks = &lib.CpCallbacks{ + OnFileEnd: func(path string) { + fmt.Printf("Copied %s -> %s (%d bytes)\n", srcPath, dstPath, srcInfo.Size()) + }, + } + } + + err = lib.CpToInstance(ctx, cfg, lib.CpToInstanceOptions{ + InstanceID: instanceID, + SrcPath: srcPath, + DstPath: dstPath, + Mode: mode, + Archive: archive, + FollowLinks: followLinks, + Callbacks: callbacks, + }) + if err != nil { + return err + } + + return nil +} + +// copyDirToInstance copies a directory recursively to the instance using the SDK +func copyDirToInstance(ctx context.Context, baseURL, apiKey, instanceID, srcPath, dstPath string, quiet, archive, followLinks bool) error { + cfg := lib.CpConfig{ + BaseURL: baseURL, + APIKey: apiKey, + } + + var callbacks *lib.CpCallbacks + if !quiet { + callbacks = &lib.CpCallbacks{ + OnFileEnd: func(path string) { + fmt.Printf("Copied %s\n", path) + }, + } + } + + // First create the destination directory + err := lib.CpToInstance(ctx, cfg, lib.CpToInstanceOptions{ + InstanceID: instanceID, + SrcPath: srcPath, + DstPath: dstPath, + Archive: archive, + FollowLinks: followLinks, + Callbacks: callbacks, + }) + if err != nil { + return err + } + + return nil +} + +// copyDirContentsToInstance copies only the contents of a directory (not the directory itself) +// This implements the /. suffix behavior from docker cp +func copyDirContentsToInstance(ctx context.Context, baseURL, apiKey, instanceID, srcPath, dstPath string, quiet, archive, followLinks bool) error { + entries, err := os.ReadDir(srcPath) + if err != nil { + return fmt.Errorf("read directory: %w", err) + } + + cfg := lib.CpConfig{ + BaseURL: baseURL, + APIKey: apiKey, + } + + var callbacks *lib.CpCallbacks + if !quiet { + callbacks = &lib.CpCallbacks{ + OnFileEnd: func(path string) { + fmt.Printf("Copied %s\n", path) + }, + } + } + + for _, entry := range entries { + srcEntryPath := filepath.Join(srcPath, entry.Name()) + // Use path.Join for guest paths (always forward slashes) + dstEntryPath := path.Join(dstPath, entry.Name()) + + info, err := entry.Info() + if err != nil { + return fmt.Errorf("info: %w", err) + } + + if err := lib.CpToInstance(ctx, cfg, lib.CpToInstanceOptions{ + InstanceID: instanceID, + SrcPath: srcEntryPath, + DstPath: dstEntryPath, + Mode: info.Mode().Perm(), + Archive: archive, + FollowLinks: followLinks, + Callbacks: callbacks, + }); err != nil { + return err + } + } + return nil +} + + +// createDirOnInstanceWithUidGid creates a directory on the instance with explicit uid/gid +func createDirOnInstanceWithUidGid(ctx context.Context, baseURL, apiKey, instanceID, dstPath string, mode fs.FileMode, uid, gid uint32) error { + wsURL, err := buildCpWsURL(baseURL, instanceID) + if err != nil { + return err + } + + headers := http.Header{} + headers.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey)) + + dialer := &websocket.Dialer{} + ws, resp, err := dialer.DialContext(ctx, wsURL, headers) + if err != nil { + if resp != nil { + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("websocket connect failed (HTTP %d): %s", resp.StatusCode, string(body)) + } + return fmt.Errorf("websocket connect failed: %w", err) + } + defer ws.Close() + + req := cpRequest{ + Direction: "to", + GuestPath: dstPath, + IsDir: true, + Mode: uint32(mode), + Uid: uid, + Gid: gid, + } + reqJSON, _ := json.Marshal(req) + if err := ws.WriteMessage(websocket.TextMessage, reqJSON); err != nil { + return fmt.Errorf("send request: %w", err) + } + + // Send end marker + endMsg, _ := json.Marshal(map[string]string{"type": "end"}) + if err := ws.WriteMessage(websocket.TextMessage, endMsg); err != nil { + return fmt.Errorf("send end: %w", err) + } + + // Wait for result + _, message, err := ws.ReadMessage() + if err != nil { + return fmt.Errorf("read result: %w", err) + } + + var result cpResult + if err := json.Unmarshal(message, &result); err != nil { + return fmt.Errorf("parse result: %w", err) + } + + if !result.Success { + return fmt.Errorf("create directory failed: %s", result.Error) + } + + return nil +} + +// copyFromInstance copies a file/directory from the instance to local using the SDK +func copyFromInstance(ctx context.Context, client *hypeman.Client, baseURL, apiKey, instanceID, srcPath, dstPath string, followLinks, quiet, archive bool) error { + // Check for /. suffix (copy contents only) on guest source path + copyContentsOnly := strings.HasSuffix(srcPath, "/.") + if copyContentsOnly { + srcPath = strings.TrimSuffix(srcPath, "/.") + } + + // Check if destination ends with / + dstEndsWithSlash := strings.HasSuffix(dstPath, "/") || strings.HasSuffix(dstPath, string(filepath.Separator)) + + // Stat the guest source to check if it's file or directory + srcStat, err := statGuestPath(ctx, client, instanceID, srcPath, followLinks) + if err != nil { + return fmt.Errorf("stat source: %w", err) + } + if !srcStat.Exists { + return fmt.Errorf("source path %s does not exist in guest", srcPath) + } + + // Use bool field directly from PathInfo + srcIsDir := srcStat.IsDir + + // Stat the local destination + dstInfo, dstErr := os.Stat(dstPath) + dstExists := dstErr == nil + dstIsDir := dstExists && dstInfo.IsDir() + + // Apply docker cp path resolution for "from" direction + resolvedDst := dstPath + if !srcIsDir { + // Source is a file + if !dstExists { + if dstEndsWithSlash { + return fmt.Errorf("destination directory %s does not exist", dstPath) + } + // Will create file at dstPath + } else if dstIsDir { + // Copy into directory using basename + // Use path.Base for guest srcPath (always forward slashes) + resolvedDst = filepath.Join(dstPath, path.Base(srcPath)) + } + // else: overwrite existing file + } else { + // Source is a directory + if dstExists && !dstIsDir { + return fmt.Errorf("cannot copy a directory to a file") + } + if !dstExists { + // Create destination directory - will be created by SDK + } else if !copyContentsOnly { + // Copy SRC dir into DST - create source directory inside destination + // Use path.Base for guest srcPath (always forward slashes) + resolvedDst = filepath.Join(dstPath, path.Base(srcPath)) + } + // else: copyContentsOnly=true - contents go directly into dstPath + } + dstPath = resolvedDst + + cfg := lib.CpConfig{ + BaseURL: baseURL, + APIKey: apiKey, + } + + var fileCount int + var totalBytes int64 + var callbacks *lib.CpCallbacks + if !quiet { + callbacks = &lib.CpCallbacks{ + OnFileEnd: func(path string) { + fileCount++ + }, + OnProgress: func(bytesCopied int64) { + totalBytes = bytesCopied + }, + } + } + + err = lib.CpFromInstance(ctx, cfg, lib.CpFromInstanceOptions{ + InstanceID: instanceID, + SrcPath: srcPath, + DstPath: dstPath, + FollowLinks: followLinks, + Archive: archive, + Callbacks: callbacks, + }) + if err != nil { + return err + } + + if !quiet { + fmt.Printf("Copied %s -> %s (%d files, %d bytes)\n", srcPath, dstPath, fileCount, totalBytes) + } + + return nil +} + +// copyFromStdinToInstance reads a tar archive from stdin and extracts it to the instance +func copyFromStdinToInstance(ctx context.Context, baseURL, apiKey, instanceID, dstPath string, archive bool) error { + tr := tar.NewReader(os.Stdin) + + for { + header, err := tr.Next() + if err == io.EOF { + break // End of tar archive + } + if err != nil { + return fmt.Errorf("read tar header: %w", err) + } + + // Sanitize tar entry path to prevent path traversal attacks + targetPath, err := sanitizeTarPath(dstPath, header.Name) + if err != nil { + return err + } + + // Extract uid/gid from tar header if archive mode + var uid, gid uint32 + if archive { + uid = uint32(header.Uid) + gid = uint32(header.Gid) + } + + switch header.Typeflag { + case tar.TypeDir: + // Create directory + if err := createDirOnInstanceWithUidGid(ctx, baseURL, apiKey, instanceID, targetPath, fs.FileMode(header.Mode), uid, gid); err != nil { + return fmt.Errorf("create directory %s: %w", targetPath, err) + } + + case tar.TypeReg: + // Copy file by reading from tar and streaming to instance + if err := copyTarFileToInstance(ctx, baseURL, apiKey, instanceID, tr, targetPath, fs.FileMode(header.Mode), header.Size, uid, gid); err != nil { + return fmt.Errorf("copy file %s: %w", targetPath, err) + } + + case tar.TypeSymlink: + // TODO: Handle symlinks if needed + fmt.Fprintf(os.Stderr, "Warning: skipping symlink %s -> %s\n", header.Name, header.Linkname) + } + } + + return nil +} + +// copyTarFileToInstance copies a single file from a tar reader to the instance +func copyTarFileToInstance(ctx context.Context, baseURL, apiKey, instanceID string, reader io.Reader, dstPath string, mode fs.FileMode, size int64, uid, gid uint32) error { + wsURL, err := buildCpWsURL(baseURL, instanceID) + if err != nil { + return err + } + + headers := http.Header{} + headers.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey)) + + dialer := &websocket.Dialer{} + ws, resp, err := dialer.DialContext(ctx, wsURL, headers) + if err != nil { + if resp != nil { + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("websocket connect failed (HTTP %d): %s", resp.StatusCode, string(body)) + } + return fmt.Errorf("websocket connect failed: %w", err) + } + defer ws.Close() + + // Send initial request + req := cpRequest{ + Direction: "to", + GuestPath: dstPath, + IsDir: false, + Mode: uint32(mode), + Uid: uid, + Gid: gid, + } + reqJSON, _ := json.Marshal(req) + if err := ws.WriteMessage(websocket.TextMessage, reqJSON); err != nil { + return fmt.Errorf("send request: %w", err) + } + + // Stream file content from tar reader + buf := make([]byte, 32*1024) + for { + n, err := reader.Read(buf) + if n > 0 { + if sendErr := ws.WriteMessage(websocket.BinaryMessage, buf[:n]); sendErr != nil { + return fmt.Errorf("send data: %w", sendErr) + } + } + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("read: %w", err) + } + } + + // Send end marker + endMsg, _ := json.Marshal(map[string]string{"type": "end"}) + if err := ws.WriteMessage(websocket.TextMessage, endMsg); err != nil { + return fmt.Errorf("send end: %w", err) + } + + // Wait for result + _, message, err := ws.ReadMessage() + if err != nil { + return fmt.Errorf("read result: %w", err) + } + + var result cpResult + if err := json.Unmarshal(message, &result); err != nil { + return fmt.Errorf("parse result: %w", err) + } + + if !result.Success { + return fmt.Errorf("copy failed: %s", result.Error) + } + + return nil +} + +// copyFromInstanceToStdout copies files from the instance and writes a tar archive to stdout +func copyFromInstanceToStdout(ctx context.Context, baseURL, apiKey, instanceID, srcPath string, followLinks, archive bool) error { + wsURL, err := buildCpWsURL(baseURL, instanceID) + if err != nil { + return err + } + + headers := http.Header{} + headers.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey)) + + dialer := &websocket.Dialer{} + ws, resp, err := dialer.DialContext(ctx, wsURL, headers) + if err != nil { + if resp != nil { + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("websocket connect failed (HTTP %d): %s", resp.StatusCode, string(body)) + } + return fmt.Errorf("websocket connect failed: %w", err) + } + defer ws.Close() + + // Send initial request + req := cpRequest{ + Direction: "from", + GuestPath: srcPath, + FollowLinks: followLinks, + } + reqJSON, _ := json.Marshal(req) + if err := ws.WriteMessage(websocket.TextMessage, reqJSON); err != nil { + return fmt.Errorf("send request: %w", err) + } + + // Create tar writer for stdout + tw := tar.NewWriter(os.Stdout) + defer tw.Close() + + var currentHeader *cpFileHeader + var bytesWritten int64 + var receivedFinal bool + + for { + msgType, message, err := ws.ReadMessage() + if err != nil { + if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + break + } + return fmt.Errorf("read message: %w", err) + } + + if msgType == websocket.TextMessage { + var msgMap map[string]interface{} + if err := json.Unmarshal(message, &msgMap); err != nil { + return fmt.Errorf("parse message: %w", err) + } + + msgTypeStr, _ := msgMap["type"].(string) + + switch msgTypeStr { + case "header": + // Verify previous file was completely written + if currentHeader != nil && !currentHeader.IsDir && !currentHeader.IsSymlink { + if bytesWritten != currentHeader.Size { + return fmt.Errorf("file %s: expected %d bytes, got %d", currentHeader.Path, currentHeader.Size, bytesWritten) + } + } + + var header cpFileHeader + if err := json.Unmarshal(message, &header); err != nil { + return fmt.Errorf("parse header: %w", err) + } + currentHeader = &header + bytesWritten = 0 + + if header.IsDir { + // Write directory entry to tar + tarHeader := &tar.Header{ + Typeflag: tar.TypeDir, + Name: header.Path + "/", + Mode: int64(header.Mode), + ModTime: time.Unix(header.Mtime, 0), + } + // Only preserve UID/GID in archive mode + if archive { + tarHeader.Uid = int(header.Uid) + tarHeader.Gid = int(header.Gid) + } + if err := tw.WriteHeader(tarHeader); err != nil { + return fmt.Errorf("write tar dir header: %w", err) + } + } else if header.IsSymlink { + // Write symlink entry to tar + tarHeader := &tar.Header{ + Typeflag: tar.TypeSymlink, + Name: header.Path, + Linkname: header.LinkTarget, + Mode: int64(header.Mode), + ModTime: time.Unix(header.Mtime, 0), + } + // Only preserve UID/GID in archive mode + if archive { + tarHeader.Uid = int(header.Uid) + tarHeader.Gid = int(header.Gid) + } + if err := tw.WriteHeader(tarHeader); err != nil { + return fmt.Errorf("write tar symlink header: %w", err) + } + } else { + // Write regular file header with known size - enables streaming + tarHeader := &tar.Header{ + Typeflag: tar.TypeReg, + Name: header.Path, + Size: header.Size, + Mode: int64(header.Mode), + ModTime: time.Unix(header.Mtime, 0), + } + // Only preserve UID/GID in archive mode + if archive { + tarHeader.Uid = int(header.Uid) + tarHeader.Gid = int(header.Gid) + } + if err := tw.WriteHeader(tarHeader); err != nil { + return fmt.Errorf("write tar header: %w", err) + } + } + + case "end": + // Verify file was completely written + if currentHeader != nil && !currentHeader.IsDir && !currentHeader.IsSymlink { + if bytesWritten != currentHeader.Size { + return fmt.Errorf("file %s: expected %d bytes, got %d", currentHeader.Path, currentHeader.Size, bytesWritten) + } + } + currentHeader = nil + + var endMarker cpEndMarker + json.Unmarshal(message, &endMarker) + if endMarker.Final { + receivedFinal = true + return nil + } + + case "error": + var cpErr cpError + json.Unmarshal(message, &cpErr) + return fmt.Errorf("copy error at %s: %s", cpErr.Path, cpErr.Message) + } + } else if msgType == websocket.BinaryMessage { + // Stream file data directly to tar archive + n, err := tw.Write(message) + if err != nil { + return fmt.Errorf("write tar data: %w", err) + } + bytesWritten += int64(n) + } + } + + // If connection closed without receiving final marker, the transfer was incomplete + if !receivedFinal { + return fmt.Errorf("copy stream ended without completion marker") + } + return nil +} + + diff --git a/apps/cli/pkg/cmd/devicecmd.go b/apps/cli/pkg/cmd/devicecmd.go new file mode 100644 index 00000000..5b102de9 --- /dev/null +++ b/apps/cli/pkg/cmd/devicecmd.go @@ -0,0 +1,357 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/tidwall/gjson" + "github.com/urfave/cli/v3" +) + +var deviceCmd = cli.Command{ + Name: "device", + Usage: "Manage PCI/GPU devices for passthrough", + Description: `Manage PCI devices for passthrough to virtual machines. + +This command allows you to discover available passthrough-capable devices, +register them for use with instances, and manage registered devices. + +Examples: + # Discover available devices on the host + hypeman device available + + # Register a GPU for passthrough + hypeman device register --pci-address 0000:a2:00.0 --name my-gpu + + # List registered devices + hypeman device list + + # Delete a registered device + hypeman device delete my-gpu`, + Commands: []*cli.Command{ + &deviceAvailableCmd, + &deviceRegisterCmd, + &deviceListCmd, + &deviceGetCmd, + &deviceDeleteCmd, + }, + HideHelpCommand: true, +} + +var deviceAvailableCmd = cli.Command{ + Name: "available", + Usage: "Discover passthrough-capable devices on host", + Description: `List all PCI devices on the host that are capable of passthrough. + +Shows devices with their PCI address, vendor/device info, IOMMU group, +and current driver binding.`, + Action: handleDeviceAvailable, + HideHelpCommand: true, +} + +var deviceRegisterCmd = cli.Command{ + Name: "register", + Usage: "Register a device for passthrough", + ArgsUsage: "[pci-address]", + Description: `Register a PCI device for use with VM passthrough. + +The device must be in an IOMMU group that supports passthrough. +Once registered, the device can be attached to instances using +the --device flag with 'hypeman run'. + +Examples: + # Register by PCI address + hypeman device register 0000:a2:00.0 + + # Register with a custom name + hypeman device register --pci-address 0000:a2:00.0 --name my-gpu`, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "pci-address", + Usage: "PCI address of the device (e.g., 0000:a2:00.0)", + }, + &cli.StringFlag{ + Name: "name", + Usage: "Optional name for the device (auto-generated if not provided)", + }, + }, + Action: handleDeviceRegister, + HideHelpCommand: true, +} + +var deviceListCmd = cli.Command{ + Name: "list", + Usage: "List registered devices", + Action: handleDeviceList, + HideHelpCommand: true, +} + +var deviceGetCmd = cli.Command{ + Name: "get", + Usage: "Get device details", + ArgsUsage: "", + Action: handleDeviceGet, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "id", + Usage: "Device ID or name", + }, + }, + HideHelpCommand: true, +} + +var deviceDeleteCmd = cli.Command{ + Name: "delete", + Aliases: []string{"rm", "unregister"}, + Usage: "Unregister a device", + ArgsUsage: "", + Action: handleDeviceDelete, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "id", + Usage: "Device ID or name", + }, + }, + HideHelpCommand: true, +} + +func handleDeviceAvailable(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Devices.ListAvailable(ctx, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + // If format is "auto", use our custom table format + if format == "auto" || format == "" { + return showAvailableDevicesTable(res) + } + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "devices available", obj, format, transform) +} + +func showAvailableDevicesTable(data []byte) error { + devices := gjson.ParseBytes(data) + + if !devices.IsArray() || len(devices.Array()) == 0 { + fmt.Println("No passthrough-capable devices found.") + return nil + } + + table := NewTableWriter(os.Stdout, "PCI ADDRESS", "VENDOR", "DEVICE", "IOMMU", "DRIVER") + table.TruncOrder = []int{2, 1} // DEVICE first, then VENDOR + + devices.ForEach(func(key, value gjson.Result) bool { + pciAddr := value.Get("pci_address").String() + vendorID := value.Get("vendor_id").String() + deviceID := value.Get("device_id").String() + vendorName := value.Get("vendor_name").String() + deviceName := value.Get("device_name").String() + iommuGroup := fmt.Sprintf("%d", value.Get("iommu_group").Int()) + driver := value.Get("current_driver").String() + + vendor := vendorName + if vendor == "" { + vendor = vendorID + } + + device := deviceName + if device == "" { + device = deviceID + } + + if driver == "" { + driver = "-" + } + + table.AddRow(pciAddr, vendor, device, iommuGroup, driver) + return true + }) + + table.Render() + return nil +} + +func handleDeviceRegister(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + // Get PCI address from flag or first argument + pciAddress := cmd.String("pci-address") + args := cmd.Args().Slice() + if pciAddress == "" && len(args) > 0 { + pciAddress = args[0] + } + + if pciAddress == "" { + return fmt.Errorf("PCI address required\nUsage: hypeman device register [--pci-address] [--name ]") + } + + params := hypeman.DeviceNewParams{ + PciAddress: pciAddress, + } + + if name := cmd.String("name"); name != "" { + params.Name = hypeman.Opt(name) + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Devices.New(ctx, params, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + if format == "auto" || format == "" { + device := gjson.ParseBytes(res) + fmt.Printf("Registered device %s (%s)\n", device.Get("name").String(), device.Get("id").String()) + return nil + } + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "device register", obj, format, transform) +} + +func handleDeviceList(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Devices.List(ctx, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + if format == "auto" || format == "" { + return showDeviceListTable(res) + } + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "devices list", obj, format, transform) +} + +func showDeviceListTable(data []byte) error { + devices := gjson.ParseBytes(data) + + if !devices.IsArray() || len(devices.Array()) == 0 { + fmt.Println("No registered devices.") + return nil + } + + table := NewTableWriter(os.Stdout, "ID", "NAME", "TYPE", "PCI ADDRESS", "VFIO", "ATTACHED TO") + table.TruncOrder = []int{0, 1, 5} // ID first, then NAME, ATTACHED TO + + devices.ForEach(func(key, value gjson.Result) bool { + id := value.Get("id").String() + name := value.Get("name").String() + deviceType := value.Get("type").String() + pciAddr := value.Get("pci_address").String() + + vfio := "no" + if value.Get("bound_to_vfio").Bool() { + vfio = "yes" + } + + attachedTo := value.Get("attached_to").String() + if attachedTo == "" { + attachedTo = "-" + } + + table.AddRow(id, name, deviceType, pciAddr, vfio, attachedTo) + return true + }) + + table.Render() + return nil +} + +func handleDeviceGet(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + // Get device ID from flag or first argument + id := cmd.String("id") + args := cmd.Args().Slice() + if id == "" && len(args) > 0 { + id = args[0] + } + + if id == "" { + return fmt.Errorf("device ID or name required\nUsage: hypeman device get ") + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Devices.Get(ctx, id, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "device get", obj, format, transform) +} + +func handleDeviceDelete(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + // Get device ID from flag or first argument + id := cmd.String("id") + args := cmd.Args().Slice() + if id == "" && len(args) > 0 { + id = args[0] + } + + if id == "" { + return fmt.Errorf("device ID or name required\nUsage: hypeman device delete ") + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + err := client.Devices.Delete(ctx, id, opts...) + if err != nil { + return err + } + + fmt.Printf("Deleted device %s\n", id) + return nil +} diff --git a/apps/cli/pkg/cmd/devicecmd_test.go b/apps/cli/pkg/cmd/devicecmd_test.go new file mode 100644 index 00000000..a6108e3c --- /dev/null +++ b/apps/cli/pkg/cmd/devicecmd_test.go @@ -0,0 +1,50 @@ +package cmd + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDeviceCommandStructure(t *testing.T) { + // Test that deviceCmd has the expected subcommands + assert.Equal(t, "device", deviceCmd.Name) + assert.Equal(t, "Manage PCI/GPU devices for passthrough", deviceCmd.Usage) + + // Verify subcommands exist + subcommandNames := make([]string, len(deviceCmd.Commands)) + for i, cmd := range deviceCmd.Commands { + subcommandNames[i] = cmd.Name + } + + assert.Contains(t, subcommandNames, "available") + assert.Contains(t, subcommandNames, "register") + assert.Contains(t, subcommandNames, "list") + assert.Contains(t, subcommandNames, "get") + assert.Contains(t, subcommandNames, "delete") +} + +func TestDeviceAvailableCmdStructure(t *testing.T) { + assert.Equal(t, "available", deviceAvailableCmd.Name) + assert.Equal(t, "Discover passthrough-capable devices on host", deviceAvailableCmd.Usage) +} + +func TestDeviceRegisterCmdStructure(t *testing.T) { + assert.Equal(t, "register", deviceRegisterCmd.Name) + assert.Equal(t, "Register a device for passthrough", deviceRegisterCmd.Usage) + + // Check flags exist + flagNames := make([]string, 0) + for _, flag := range deviceRegisterCmd.Flags { + flagNames = append(flagNames, flag.Names()...) + } + + assert.Contains(t, flagNames, "pci-address") + assert.Contains(t, flagNames, "name") +} + +func TestDeviceDeleteCmdAliases(t *testing.T) { + // Verify delete has aliases + assert.Contains(t, deviceDeleteCmd.Aliases, "rm") + assert.Contains(t, deviceDeleteCmd.Aliases, "unregister") +} diff --git a/apps/cli/pkg/cmd/exec.go b/apps/cli/pkg/cmd/exec.go new file mode 100644 index 00000000..d53efe5c --- /dev/null +++ b/apps/cli/pkg/cmd/exec.go @@ -0,0 +1,361 @@ +package cmd + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "sync" + + "github.com/gorilla/websocket" + "github.com/kernel/hypeman-go" + "github.com/urfave/cli/v3" + "golang.org/x/term" +) + +// ExecExitError is returned when exec completes with a non-zero exit code +type ExecExitError struct { + Code int +} + +func (e *ExecExitError) Error() string { + return fmt.Sprintf("exec exited with code %d", e.Code) +} + +// execRequest represents the JSON body for exec requests +type execRequest struct { + Command []string `json:"command"` + TTY bool `json:"tty"` + Env map[string]string `json:"env,omitempty"` + Cwd string `json:"cwd,omitempty"` + Timeout int32 `json:"timeout,omitempty"` + Rows uint32 `json:"rows,omitempty"` + Cols uint32 `json:"cols,omitempty"` +} + +var execCmd = cli.Command{ + Name: "exec", + Usage: "Execute a command in a running instance", + ArgsUsage: " [-- command...]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "it", + Aliases: []string{"i", "t"}, + Usage: "Enable interactive TTY mode", + }, + &cli.BoolFlag{ + Name: "no-tty", + Aliases: []string{"T"}, + Usage: "Disable TTY allocation", + }, + &cli.StringSliceFlag{ + Name: "env", + Aliases: []string{"e"}, + Usage: "Set environment variable (KEY=VALUE, can be repeated)", + }, + &cli.StringFlag{ + Name: "cwd", + Usage: "Working directory inside the instance", + }, + &cli.IntFlag{ + Name: "timeout", + Usage: "Execution timeout in seconds (0 = no timeout)", + }, + }, + Action: handleExec, + HideHelpCommand: true, +} + +func handleExec(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("instance ID required\nUsage: hypeman exec [flags] [-- command...]") + } + + // Resolve instance by ID, partial ID, or name + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + instanceID, err := ResolveInstance(ctx, &client, args[0]) + if err != nil { + return err + } + + var command []string + + // Parse command after -- separator or remaining args + if len(args) > 1 { + command = args[1:] + } + + // Determine TTY mode + tty := true // default + if cmd.Bool("no-tty") { + tty = false + } else if cmd.Bool("it") { + tty = true + } else { + // Auto-detect: enable TTY if stdin and stdout are terminals + tty = term.IsTerminal(int(os.Stdin.Fd())) && term.IsTerminal(int(os.Stdout.Fd())) + } + + // Parse environment variables + env := make(map[string]string) + for _, e := range cmd.StringSlice("env") { + parts := strings.SplitN(e, "=", 2) + if len(parts) == 2 { + env[parts[0]] = parts[1] + } else { + fmt.Fprintf(os.Stderr, "Warning: ignoring malformed env var: %s\n", e) + } + } + + // Build exec request + execReq := execRequest{ + Command: command, + TTY: tty, + } + if len(env) > 0 { + execReq.Env = env + } + if cwd := cmd.String("cwd"); cwd != "" { + execReq.Cwd = cwd + } + if timeout := cmd.Int("timeout"); timeout > 0 { + execReq.Timeout = int32(timeout) + } + + // Get terminal size for TTY mode (only if stdout is actually a terminal) + if tty && term.IsTerminal(int(os.Stdout.Fd())) { + cols, rows, _ := term.GetSize(int(os.Stdout.Fd())) + if rows > 0 { + execReq.Rows = uint32(rows) + } + if cols > 0 { + execReq.Cols = uint32(cols) + } + } + + reqBody, err := json.Marshal(execReq) + if err != nil { + return fmt.Errorf("failed to marshal request: %w", err) + } + + // Get base URL and API key (flag > env > config file) + baseURL := resolveBaseURL(cmd) + + apiKey := resolveAPIKey() + if apiKey == "" { + return fmt.Errorf("API key required: set HYPEMAN_API_KEY or configure api_key in ~/.config/hypeman/cli.yaml") + } + + // Build WebSocket URL + u, err := url.Parse(baseURL) + if err != nil { + return fmt.Errorf("invalid base URL: %w", err) + } + u.Path = fmt.Sprintf("/instances/%s/exec", instanceID) + + // Convert scheme to WebSocket + switch u.Scheme { + case "https": + u.Scheme = "wss" + case "http": + u.Scheme = "ws" + } + + // Connect WebSocket with auth header + headers := http.Header{} + headers.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey)) + + dialer := &websocket.Dialer{} + ws, resp, err := dialer.DialContext(ctx, u.String(), headers) + if err != nil { + if resp != nil { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("websocket connect failed (HTTP %d): %s", resp.StatusCode, string(body)) + } + return fmt.Errorf("websocket connect failed: %w", err) + } + defer ws.Close() + + // Send JSON request as first message + if err := ws.WriteMessage(websocket.TextMessage, reqBody); err != nil { + return fmt.Errorf("failed to send exec request: %w", err) + } + + // Run interactive or non-interactive mode + var exitCode int + if tty { + exitCode, err = runExecInteractive(ws) + } else { + exitCode, err = runExecNonInteractive(ws) + } + + if err != nil { + return err + } + + if exitCode != 0 { + return &ExecExitError{Code: exitCode} + } + + return nil +} + +func runExecInteractive(ws *websocket.Conn) (int, error) { + // Put terminal in raw mode + oldState, err := term.MakeRaw(int(os.Stdin.Fd())) + if err != nil { + return 255, fmt.Errorf("failed to set raw mode: %w", err) + } + defer term.Restore(int(os.Stdin.Fd()), oldState) + + // Handle signals gracefully (os.Interrupt is cross-platform) + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Mutex to protect WebSocket writes from concurrent access + var wsMu sync.Mutex + + // Handle terminal resize events (Unix only, no-op on Windows) + cleanupResize := setupResizeHandler(ws, &wsMu) + defer cleanupResize() + + errCh := make(chan error, 2) + exitCodeCh := make(chan int, 1) + + // Forward stdin to WebSocket + go func() { + buf := make([]byte, 32*1024) + for { + n, err := os.Stdin.Read(buf) + if err != nil { + if err != io.EOF { + errCh <- fmt.Errorf("stdin read error: %w", err) + } + return + } + if n > 0 { + wsMu.Lock() + err := ws.WriteMessage(websocket.BinaryMessage, buf[:n]) + wsMu.Unlock() + if err != nil { + errCh <- fmt.Errorf("websocket write error: %w", err) + return + } + } + } + }() + + // Forward WebSocket to stdout + go func() { + for { + msgType, message, err := ws.ReadMessage() + if err != nil { + if !websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { + exitCodeCh <- 0 + } + return + } + + // Check for exit code message + if msgType == websocket.TextMessage && bytes.Contains(message, []byte("exitCode")) { + var exitMsg struct { + ExitCode int `json:"exitCode"` + } + if json.Unmarshal(message, &exitMsg) == nil { + exitCodeCh <- exitMsg.ExitCode + return + } + } + + // Write binary messages to stdout (actual output) + if msgType == websocket.BinaryMessage { + os.Stdout.Write(message) + } + } + }() + + select { + case err := <-errCh: + return 255, err + case exitCode := <-exitCodeCh: + return exitCode, nil + case <-sigCh: + return 130, nil // 128 + SIGINT + } +} + +func runExecNonInteractive(ws *websocket.Conn) (int, error) { + errCh := make(chan error, 2) + exitCodeCh := make(chan int, 1) + doneCh := make(chan struct{}) + + // Forward stdin to WebSocket + go func() { + buf := make([]byte, 32*1024) + for { + n, err := os.Stdin.Read(buf) + if err != nil { + if err != io.EOF { + errCh <- fmt.Errorf("stdin read error: %w", err) + } + return + } + if n > 0 { + if err := ws.WriteMessage(websocket.BinaryMessage, buf[:n]); err != nil { + errCh <- fmt.Errorf("websocket write error: %w", err) + return + } + } + } + }() + + // Forward WebSocket to stdout + go func() { + defer close(doneCh) + for { + msgType, message, err := ws.ReadMessage() + if err != nil { + if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) || + err == io.EOF { + exitCodeCh <- 0 + return + } + errCh <- fmt.Errorf("websocket read error: %w", err) + return + } + + // Check for exit code message + if msgType == websocket.TextMessage && bytes.Contains(message, []byte("exitCode")) { + var exitMsg struct { + ExitCode int `json:"exitCode"` + } + if json.Unmarshal(message, &exitMsg) == nil { + exitCodeCh <- exitMsg.ExitCode + return + } + } + + // Write to stdout (binary messages contain actual output) + if msgType == websocket.BinaryMessage { + os.Stdout.Write(message) + } + } + }() + + select { + case err := <-errCh: + return 255, err + case exitCode := <-exitCodeCh: + return exitCode, nil + case <-doneCh: + return 0, nil + } +} diff --git a/apps/cli/pkg/cmd/exec_unix.go b/apps/cli/pkg/cmd/exec_unix.go new file mode 100644 index 00000000..43d0c6ba --- /dev/null +++ b/apps/cli/pkg/cmd/exec_unix.go @@ -0,0 +1,35 @@ +//go:build unix + +package cmd + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/gorilla/websocket" + "golang.org/x/term" +) + +// setupResizeHandler listens for SIGWINCH signals and sends resize messages over the WebSocket. +// Returns a cleanup function that should be deferred. +func setupResizeHandler(ws *websocket.Conn, wsMu *sync.Mutex) (cleanup func()) { + sigwinch := make(chan os.Signal, 1) + signal.Notify(sigwinch, syscall.SIGWINCH) + + go func() { + for range sigwinch { + cols, rows, _ := term.GetSize(int(os.Stdout.Fd())) + if rows > 0 && cols > 0 { + msg := fmt.Sprintf(`{"resize":{"rows":%d,"cols":%d}}`, rows, cols) + wsMu.Lock() + ws.WriteMessage(websocket.TextMessage, []byte(msg)) + wsMu.Unlock() + } + } + }() + + return func() { signal.Stop(sigwinch) } +} diff --git a/apps/cli/pkg/cmd/exec_windows.go b/apps/cli/pkg/cmd/exec_windows.go new file mode 100644 index 00000000..678cfb28 --- /dev/null +++ b/apps/cli/pkg/cmd/exec_windows.go @@ -0,0 +1,16 @@ +//go:build windows + +package cmd + +import ( + "sync" + + "github.com/gorilla/websocket" +) + +// setupResizeHandler is a no-op on Windows since SIGWINCH doesn't exist. +// Terminal resize events are not supported on native Windows. +// The initial terminal size is still sent in the exec request. +func setupResizeHandler(ws *websocket.Conn, wsMu *sync.Mutex) (cleanup func()) { + return func() {} +} diff --git a/apps/cli/pkg/cmd/format.go b/apps/cli/pkg/cmd/format.go new file mode 100644 index 00000000..9d74e4dc --- /dev/null +++ b/apps/cli/pkg/cmd/format.go @@ -0,0 +1,299 @@ +package cmd + +import ( + "context" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/kernel/hypeman-go" + "golang.org/x/term" +) + +// TableWriter provides simple table formatting for CLI output with +// terminal-width-aware column sizing. +type TableWriter struct { + w io.Writer + headers []string + widths []int // natural widths (max of header and cell values) + rows [][]string + + // TruncOrder specifies column indices in truncation priority order. + // The first index in the slice is truncated first when the table is + // too wide for the terminal. Columns not listed are never truncated. + TruncOrder []int +} + +const columnGap = 2 // spaces between columns + +// NewTableWriter creates a new table writer +func NewTableWriter(w io.Writer, headers ...string) *TableWriter { + widths := make([]int, len(headers)) + for i, h := range headers { + widths[i] = len(h) + } + return &TableWriter{ + w: w, + headers: headers, + widths: widths, + } +} + +// AddRow adds a row to the table +func (t *TableWriter) AddRow(cells ...string) { + // Pad or truncate to match header count + row := make([]string, len(t.headers)) + for i := range row { + if i < len(cells) { + row[i] = cells[i] + } + if len(row[i]) > t.widths[i] { + t.widths[i] = len(row[i]) + } + } + t.rows = append(t.rows, row) +} + +// getTerminalWidth returns the terminal width. It tries the stdout +// file descriptor first, then falls back to stderr (which remains +// connected to the terminal even when stdout is piped), then the +// COLUMNS env var, then defaults to 80. +func getTerminalWidth() int { + if w, _, err := term.GetSize(int(os.Stdout.Fd())); err == nil && w > 0 { + return w + } + if w, _, err := term.GetSize(int(os.Stderr.Fd())); err == nil && w > 0 { + return w + } + if cols := os.Getenv("COLUMNS"); cols != "" { + if w, err := strconv.Atoi(cols); err == nil && w > 0 { + return w + } + } + return 80 +} + +// renderWidths computes the final column widths, shrinking columns in +// TruncOrder as needed to fit within the terminal width. +func (t *TableWriter) renderWidths() []int { + n := len(t.headers) + widths := make([]int, n) + copy(widths, t.widths) + + termWidth := getTerminalWidth() + + // Total space: column widths + gaps (no trailing gap on last column) + total := func() int { + s := 0 + for _, w := range widths { + s += w + } + s += columnGap * (n - 1) + return s + } + + if total() <= termWidth { + return widths + } + + // Shrink columns in TruncOrder until the table fits + for _, col := range t.TruncOrder { + if col < 0 || col >= n { + continue + } + excess := total() - termWidth + if excess <= 0 { + break + } + // Minimum width: at least the header length, but no less than 5 + minW := len(t.headers[col]) + if minW < 5 { + minW = 5 + } + canShrink := widths[col] - minW + if canShrink <= 0 { + continue + } + shrink := excess + if shrink > canShrink { + shrink = canShrink + } + widths[col] -= shrink + } + + return widths +} + +// Render outputs the table, dynamically fitting columns to the terminal width. +func (t *TableWriter) Render() { + widths := t.renderWidths() + last := len(t.headers) - 1 + + // Print headers + for i, h := range t.headers { + cell := truncateCell(h, widths[i]) + if i < last { + fmt.Fprintf(t.w, "%-*s", widths[i]+columnGap, cell) + } else { + fmt.Fprint(t.w, cell) + } + } + fmt.Fprintln(t.w) + + // Print rows + for _, row := range t.rows { + for i, cell := range row { + cell = truncateCell(cell, widths[i]) + if i < last { + fmt.Fprintf(t.w, "%-*s", widths[i]+columnGap, cell) + } else { + fmt.Fprint(t.w, cell) + } + } + fmt.Fprintln(t.w) + } +} + +// truncateCell truncates s to fit within maxWidth, appending "..." if needed. +func truncateCell(s string, maxWidth int) string { + if len(s) <= maxWidth { + return s + } + if maxWidth <= 3 { + return s[:maxWidth] + } + return s[:maxWidth-3] + "..." +} + +// FormatTimeAgo formats a time as "X ago" string +func FormatTimeAgo(t time.Time) string { + if t.IsZero() { + return "N/A" + } + + d := time.Since(t) + + switch { + case d < time.Minute: + return fmt.Sprintf("%d seconds ago", int(d.Seconds())) + case d < time.Hour: + mins := int(d.Minutes()) + if mins == 1 { + return "1 minute ago" + } + return fmt.Sprintf("%d minutes ago", mins) + case d < 24*time.Hour: + hours := int(d.Hours()) + if hours == 1 { + return "1 hour ago" + } + return fmt.Sprintf("%d hours ago", hours) + default: + days := int(d.Hours() / 24) + if days == 1 { + return "1 day ago" + } + return fmt.Sprintf("%d days ago", days) + } +} + +// TruncateID truncates an ID to 12 characters (like Docker) +func TruncateID(id string) string { + if len(id) > 12 { + return id[:12] + } + return id +} + +// TruncateString truncates a string to max length with ellipsis +func TruncateString(s string, max int) string { + if len(s) <= max { + return s + } + if max <= 3 { + return s[:max] + } + return s[:max-3] + "..." +} + +// GenerateInstanceName generates a name from image reference +func GenerateInstanceName(image string) string { + // Extract image name without registry/tag + name := image + + // Remove registry prefix + if idx := strings.LastIndex(name, "/"); idx != -1 { + name = name[idx+1:] + } + + // Remove tag/digest + if idx := strings.Index(name, ":"); idx != -1 { + name = name[:idx] + } + if idx := strings.Index(name, "@"); idx != -1 { + name = name[:idx] + } + + // Add random suffix + suffix := randomSuffix(4) + return fmt.Sprintf("%s-%s", name, suffix) +} + +// randomSuffix generates a random alphanumeric suffix +func randomSuffix(n int) string { + const chars = "abcdefghijklmnopqrstuvwxyz0123456789" + b := make([]byte, n) + for i := range b { + // Simple pseudo-random using time + b[i] = chars[(time.Now().UnixNano()+int64(i))%int64(len(chars))] + } + return string(b) +} + +// ResolveInstance resolves an instance identifier to a full instance ID. +// It supports: +// - Full instance ID (exact match) +// - Partial instance ID (prefix match) +// - Instance name (exact match) +// Returns an error if the identifier is ambiguous or not found. +func ResolveInstance(ctx context.Context, client *hypeman.Client, identifier string) (string, error) { + // List all instances + instances, err := client.Instances.List(ctx, hypeman.InstanceListParams{}) + if err != nil { + return "", fmt.Errorf("failed to list instances: %w", err) + } + + var matches []hypeman.Instance + + for _, inst := range *instances { + // Exact ID match - return immediately + if inst.ID == identifier { + return inst.ID, nil + } + // Exact name match - return immediately + if inst.Name == identifier { + return inst.ID, nil + } + // Partial ID match (prefix) + if strings.HasPrefix(inst.ID, identifier) { + matches = append(matches, inst) + } + } + + switch len(matches) { + case 0: + return "", fmt.Errorf("no instance found matching %q", identifier) + case 1: + return matches[0].ID, nil + default: + // Ambiguous - show matching IDs + ids := make([]string, len(matches)) + for i, m := range matches { + ids[i] = TruncateID(m.ID) + } + return "", fmt.Errorf("ambiguous instance identifier %q matches: %s", identifier, strings.Join(ids, ", ")) + } +} diff --git a/apps/cli/pkg/cmd/imagecmd.go b/apps/cli/pkg/cmd/imagecmd.go new file mode 100644 index 00000000..c178ef72 --- /dev/null +++ b/apps/cli/pkg/cmd/imagecmd.go @@ -0,0 +1,175 @@ +package cmd + +import ( + "context" + "fmt" + "net/url" + "os" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/tidwall/gjson" + "github.com/urfave/cli/v3" +) + +var imageCmd = cli.Command{ + Name: "image", + Usage: "Manage images", + Commands: []*cli.Command{ + &imageListCmd, + &imageGetCmd, + &imageDeleteCmd, + }, + HideHelpCommand: true, +} + +var imageListCmd = cli.Command{ + Name: "list", + Usage: "List images", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "quiet", + Aliases: []string{"q"}, + Usage: "Only display image names", + }, + }, + Action: handleImageList, + HideHelpCommand: true, +} + +var imageGetCmd = cli.Command{ + Name: "get", + Usage: "Get image details", + ArgsUsage: "", + Action: handleImageGet, + HideHelpCommand: true, +} + +var imageDeleteCmd = cli.Command{ + Name: "delete", + Aliases: []string{"rm"}, + Usage: "Delete an image", + ArgsUsage: "", + Action: handleImageDelete, + HideHelpCommand: true, +} + +func handleImageList(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + if format != "auto" { + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Images.List(ctx, opts...) + if err != nil { + return err + } + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "image list", obj, format, transform) + } + + images, err := client.Images.List(ctx, opts...) + if err != nil { + return err + } + + quietMode := cmd.Bool("quiet") + + if quietMode { + for _, img := range *images { + fmt.Println(img.Name) + } + return nil + } + + if len(*images) == 0 { + fmt.Fprintln(os.Stderr, "No images found.") + return nil + } + + table := NewTableWriter(os.Stdout, "NAME", "STATUS", "DIGEST", "SIZE", "CREATED") + table.TruncOrder = []int{0, 2, 4} // NAME first, then DIGEST, CREATED + for _, img := range *images { + digest := img.Digest + if len(digest) > 19 { + digest = digest[:19] + } + + size := "-" + if img.SizeBytes > 0 { + size = formatBytes(img.SizeBytes) + } + + table.AddRow( + img.Name, + string(img.Status), + digest, + size, + FormatTimeAgo(img.CreatedAt), + ) + } + table.Render() + + return nil +} + +func handleImageGet(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("image name required\nUsage: hypeman image get ") + } + + name := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Images.Get(ctx, url.PathEscape(name), opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "image get", obj, format, transform) +} + +func handleImageDelete(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("image name required\nUsage: hypeman image delete ") + } + + name := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + err := client.Images.Delete(ctx, url.PathEscape(name), opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Deleted image %s\n", name) + return nil +} diff --git a/apps/cli/pkg/cmd/ingresscmd.go b/apps/cli/pkg/cmd/ingresscmd.go new file mode 100644 index 00000000..6da8a52e --- /dev/null +++ b/apps/cli/pkg/cmd/ingresscmd.go @@ -0,0 +1,299 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/tidwall/gjson" + "github.com/urfave/cli/v3" +) + +var ingressCmd = cli.Command{ + Name: "ingress", + Usage: "Manage ingresses", + Commands: []*cli.Command{ + &ingressCreateCmd, + &ingressListCmd, + &ingressGetCmd, + &ingressDeleteCmd, + }, + HideHelpCommand: true, +} + +var ingressCreateCmd = cli.Command{ + Name: "create", + Usage: "Create an ingress for an instance", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "hostname", + Aliases: []string{"H"}, + Usage: "Hostname to match (exact match on Host header)", + Required: true, + }, + &cli.IntFlag{ + Name: "port", + Aliases: []string{"p"}, + Usage: "Target port on the instance", + Required: true, + }, + &cli.IntFlag{ + Name: "host-port", + Usage: "Host port to listen on (default: 80)", + Value: 80, + }, + &cli.BoolFlag{ + Name: "tls", + Usage: "Enable TLS termination (certificate auto-issued via ACME)", + }, + &cli.BoolFlag{ + Name: "redirect-http", + Usage: "Auto-create HTTP to HTTPS redirect (only applies when --tls is enabled)", + }, + &cli.StringFlag{ + Name: "name", + Usage: "Ingress name (auto-generated from hostname if not provided)", + }, + }, + Action: handleIngressCreate, + HideHelpCommand: true, +} + +var ingressListCmd = cli.Command{ + Name: "list", + Usage: "List ingresses", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "quiet", + Aliases: []string{"q"}, + Usage: "Only display ingress IDs", + }, + }, + Action: handleIngressList, + HideHelpCommand: true, +} + +var ingressGetCmd = cli.Command{ + Name: "get", + Usage: "Get ingress details", + ArgsUsage: "", + Action: handleIngressGet, + HideHelpCommand: true, +} + +var ingressDeleteCmd = cli.Command{ + Name: "delete", + Usage: "Delete an ingress", + ArgsUsage: "", + Action: handleIngressDelete, + HideHelpCommand: true, +} + +func handleIngressCreate(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("instance name or ID required\nUsage: hypeman ingress create --hostname --port ") + } + + instance := args[0] + hostname := cmd.String("hostname") + port := cmd.Int("port") + hostPort := cmd.Int("host-port") + tls := cmd.Bool("tls") + redirectHTTP := cmd.Bool("redirect-http") + name := cmd.String("name") + + // Auto-generate name from hostname if not provided + if name == "" { + name = generateIngressName(hostname) + } + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + params := hypeman.IngressNewParams{ + Name: name, + Rules: []hypeman.IngressRuleParam{ + { + Match: hypeman.IngressMatchParam{ + Hostname: hostname, + Port: hypeman.Int(int64(hostPort)), + }, + Target: hypeman.IngressTargetParam{ + Instance: instance, + Port: int64(port), + }, + Tls: hypeman.Bool(tls), + RedirectHTTP: hypeman.Bool(redirectHTTP), + }, + }, + } + + fmt.Fprintf(os.Stderr, "Creating ingress %s...\n", name) + + result, err := client.Ingresses.New(ctx, params, opts...) + if err != nil { + return err + } + + fmt.Println(result.ID) + return nil +} + +func handleIngressList(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + // If a specific format is requested (not "auto"), output in that format + if format != "auto" { + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Ingresses.List(ctx, opts...) + if err != nil { + return err + } + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "ingress list", obj, format, transform) + } + + ingresses, err := client.Ingresses.List(ctx, opts...) + if err != nil { + return err + } + + quietMode := cmd.Bool("quiet") + + if quietMode { + for _, ing := range *ingresses { + fmt.Println(ing.ID) + } + return nil + } + + if len(*ingresses) == 0 { + fmt.Fprintln(os.Stderr, "No ingresses found.") + return nil + } + + table := NewTableWriter(os.Stdout, "ID", "NAME", "HOSTNAME", "TARGET", "TLS", "CREATED") + table.TruncOrder = []int{2, 3, 5, 1} // HOSTNAME first, then TARGET, CREATED, NAME + for _, ing := range *ingresses { + // Extract first rule's hostname and target for display + hostname := "" + target := "" + tlsEnabled := "-" + if len(ing.Rules) > 0 { + rule := ing.Rules[0] + hostname = rule.Match.Hostname + target = fmt.Sprintf("%s:%d", rule.Target.Instance, rule.Target.Port) + if rule.Tls { + tlsEnabled = "yes" + } else { + tlsEnabled = "no" + } + } + + table.AddRow( + TruncateID(ing.ID), + ing.Name, + hostname, + target, + tlsEnabled, + FormatTimeAgo(ing.CreatedAt), + ) + } + table.Render() + + return nil +} + +func handleIngressGet(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("ingress ID required\nUsage: hypeman ingress get ") + } + + id := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Ingresses.Get(ctx, id, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "ingress get", obj, format, transform) +} + +func handleIngressDelete(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("ingress ID or name required\nUsage: hypeman ingress delete ") + } + + id := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + err := client.Ingresses.Delete(ctx, id, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Deleted ingress %s\n", id) + return nil +} + +// generateIngressName generates an ingress name from hostname +func generateIngressName(hostname string) string { + // Replace dots with dashes + name := strings.ReplaceAll(hostname, ".", "-") + name = strings.ToLower(name) + + // Remove invalid characters (only allow a-z, 0-9, and -) + var cleaned strings.Builder + for _, r := range name { + if (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' { + cleaned.WriteRune(r) + } + } + name = cleaned.String() + + // Trim leading/trailing dashes + name = strings.Trim(name, "-") + + // Add random suffix + suffix := randomSuffix(4) + return fmt.Sprintf("%s-%s", name, suffix) +} diff --git a/apps/cli/pkg/cmd/lifecycle.go b/apps/cli/pkg/cmd/lifecycle.go new file mode 100644 index 00000000..cc511596 --- /dev/null +++ b/apps/cli/pkg/cmd/lifecycle.go @@ -0,0 +1,178 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/urfave/cli/v3" +) + +var stopCmd = cli.Command{ + Name: "stop", + Usage: "Stop a running instance", + ArgsUsage: "", + Action: handleStop, + HideHelpCommand: true, +} + +var startCmd = cli.Command{ + Name: "start", + Usage: "Start a stopped instance", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringSliceFlag{ + Name: "entrypoint", + Usage: "Override image entrypoint for this run (can be repeated for multiple args)", + }, + &cli.StringSliceFlag{ + Name: "cmd", + Usage: "Override image CMD for this run (can be repeated for multiple args)", + }, + }, + Action: handleStart, + HideHelpCommand: true, +} + +var standbyCmd = cli.Command{ + Name: "standby", + Usage: "Put an instance into standby (pause and snapshot)", + ArgsUsage: "", + Action: handleStandby, + HideHelpCommand: true, +} + +var restoreCmd = cli.Command{ + Name: "restore", + Usage: "Restore an instance from standby", + ArgsUsage: "", + Action: handleRestore, + HideHelpCommand: true, +} + +func handleStop(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("instance name or ID required\nUsage: hypeman stop ") + } + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + instanceID, err := ResolveInstance(ctx, &client, args[0]) + if err != nil { + return err + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + fmt.Fprintf(os.Stderr, "Stopping %s...\n", args[0]) + + instance, err := client.Instances.Stop(ctx, instanceID, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Stopped %s (state: %s)\n", instance.Name, instance.State) + return nil +} + +func handleStart(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("instance name or ID required\nUsage: hypeman start ") + } + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + instanceID, err := ResolveInstance(ctx, &client, args[0]) + if err != nil { + return err + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + params := hypeman.InstanceStartParams{} + + if entrypoint := cmd.StringSlice("entrypoint"); len(entrypoint) > 0 { + params.Entrypoint = entrypoint + } + if cmdArgs := cmd.StringSlice("cmd"); len(cmdArgs) > 0 { + params.Cmd = cmdArgs + } + + fmt.Fprintf(os.Stderr, "Starting %s...\n", args[0]) + + instance, err := client.Instances.Start(ctx, instanceID, params, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Started %s (state: %s)\n", instance.Name, instance.State) + return nil +} + +func handleStandby(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("instance name or ID required\nUsage: hypeman standby ") + } + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + instanceID, err := ResolveInstance(ctx, &client, args[0]) + if err != nil { + return err + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + fmt.Fprintf(os.Stderr, "Putting %s into standby...\n", args[0]) + + instance, err := client.Instances.Standby(ctx, instanceID, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Standby %s (state: %s)\n", instance.Name, instance.State) + return nil +} + +func handleRestore(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("instance name or ID required\nUsage: hypeman restore ") + } + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + instanceID, err := ResolveInstance(ctx, &client, args[0]) + if err != nil { + return err + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + fmt.Fprintf(os.Stderr, "Restoring %s from standby...\n", args[0]) + + instance, err := client.Instances.Restore(ctx, instanceID, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Restored %s (state: %s)\n", instance.Name, instance.State) + return nil +} diff --git a/apps/cli/pkg/cmd/logs.go b/apps/cli/pkg/cmd/logs.go new file mode 100644 index 00000000..d461aae9 --- /dev/null +++ b/apps/cli/pkg/cmd/logs.go @@ -0,0 +1,80 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/urfave/cli/v3" +) + +var logsCmd = cli.Command{ + Name: "logs", + Usage: "Fetch the logs of an instance", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "follow", + Aliases: []string{"f"}, + Usage: "Follow log output", + }, + &cli.IntFlag{ + Name: "tail", + Usage: "Number of lines to show from the end of the logs", + Value: 100, + }, + &cli.StringFlag{ + Name: "source", + Aliases: []string{"s"}, + Usage: "Log source: app (default), vmm (Cloud Hypervisor), or hypeman (operations log)", + }, + }, + Action: handleLogs, + HideHelpCommand: true, +} + +func handleLogs(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("instance ID required\nUsage: hypeman logs [flags] ") + } + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + // Resolve instance by ID, partial ID, or name + instanceID, err := ResolveInstance(ctx, &client, args[0]) + if err != nil { + return err + } + + params := hypeman.InstanceLogsParams{} + if cmd.IsSet("follow") { + params.Follow = hypeman.Opt(cmd.Bool("follow")) + } + if cmd.IsSet("tail") { + params.Tail = hypeman.Opt(int64(cmd.Int("tail"))) + } + if cmd.IsSet("source") { + params.Source = hypeman.InstanceLogsParamsSource(cmd.String("source")) + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + stream := client.Instances.LogsStreaming( + ctx, + instanceID, + params, + opts..., + ) + defer stream.Close() + + for stream.Next() { + fmt.Println(stream.Current()) + } + + return stream.Err() +} diff --git a/apps/cli/pkg/cmd/ps.go b/apps/cli/pkg/cmd/ps.go new file mode 100644 index 00000000..9f4c7bed --- /dev/null +++ b/apps/cli/pkg/cmd/ps.go @@ -0,0 +1,122 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/urfave/cli/v3" +) + +var psCmd = cli.Command{ + Name: "ps", + Usage: "List instances", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "all", + Aliases: []string{"a"}, + Usage: "Show all instances (default: running only)", + }, + &cli.BoolFlag{ + Name: "quiet", + Aliases: []string{"q"}, + Usage: "Only display instance IDs", + }, + }, + Action: handlePs, + HideHelpCommand: true, +} + +func handlePs(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + instances, err := client.Instances.List( + ctx, + hypeman.InstanceListParams{}, + opts..., + ) + if err != nil { + return err + } + + showAll := cmd.Bool("all") + quietMode := cmd.Bool("quiet") + + // Filter instances + var filtered []hypeman.Instance + for _, inst := range *instances { + if showAll || inst.State == "Running" { + filtered = append(filtered, inst) + } + } + + // Quiet mode - just IDs + if quietMode { + for _, inst := range filtered { + fmt.Println(inst.ID) + } + return nil + } + + // Table output + if len(filtered) == 0 { + if !showAll { + fmt.Fprintln(os.Stderr, "No running instances. Use -a to show all.") + } + return nil + } + + table := NewTableWriter(os.Stdout, "INSTANCE ID", "NAME", "IMAGE", "STATE", "GPU", "HV", "CREATED") + table.TruncOrder = []int{2, 4, 6, 1} // IMAGE first, then GPU, CREATED, NAME + for _, inst := range filtered { + table.AddRow( + TruncateID(inst.ID), + inst.Name, + inst.Image, + string(inst.State), + formatGPU(inst.GPU), + formatHypervisor(inst.Hypervisor), + FormatTimeAgo(inst.CreatedAt), + ) + } + table.Render() + + return nil +} + +// formatGPU returns a short representation of GPU configuration +func formatGPU(gpu hypeman.InstanceGPU) string { + // Check if GPU profile is set + if gpu.Profile != "" { + return gpu.Profile + } + // Check if mdev UUID is set (indicates vGPU without profile name shown) + if gpu.MdevUuid != "" { + return "vgpu" + } + return "-" +} + +// formatHypervisor returns a short abbreviation for the hypervisor +func formatHypervisor(hv hypeman.InstanceHypervisor) string { + switch hv { + case hypeman.InstanceHypervisorCloudHypervisor: + return "ch" + case hypeman.InstanceHypervisorQemu: + return "qemu" + case hypeman.InstanceHypervisorVz: + return "vz" + default: + if hv == "" { + return "ch" // default + } + return string(hv) + } +} diff --git a/apps/cli/pkg/cmd/ps_test.go b/apps/cli/pkg/cmd/ps_test.go new file mode 100644 index 00000000..3e48553e --- /dev/null +++ b/apps/cli/pkg/cmd/ps_test.go @@ -0,0 +1,80 @@ +package cmd + +import ( + "testing" + + "github.com/kernel/hypeman-go" + "github.com/stretchr/testify/assert" +) + +func TestFormatGPU(t *testing.T) { + tests := []struct { + name string + gpu hypeman.InstanceGPU + expected string + }{ + { + name: "no GPU", + gpu: hypeman.InstanceGPU{}, + expected: "-", + }, + { + name: "vGPU with profile", + gpu: hypeman.InstanceGPU{ + Profile: "L40S-1Q", + MdevUuid: "abc-123", + }, + expected: "L40S-1Q", + }, + { + name: "vGPU without profile but with mdev", + gpu: hypeman.InstanceGPU{ + MdevUuid: "abc-123", + }, + expected: "vgpu", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := formatGPU(tt.gpu) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestFormatHypervisor(t *testing.T) { + tests := []struct { + name string + hypervisor hypeman.InstanceHypervisor + expected string + }{ + { + name: "cloud-hypervisor", + hypervisor: hypeman.InstanceHypervisorCloudHypervisor, + expected: "ch", + }, + { + name: "qemu", + hypervisor: hypeman.InstanceHypervisorQemu, + expected: "qemu", + }, + { + name: "empty defaults to ch", + hypervisor: "", + expected: "ch", + }, + { + name: "unknown value", + hypervisor: hypeman.InstanceHypervisor("unknown"), + expected: "unknown", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := formatHypervisor(tt.hypervisor) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/apps/cli/pkg/cmd/pull.go b/apps/cli/pkg/cmd/pull.go new file mode 100644 index 00000000..cf5c6864 --- /dev/null +++ b/apps/cli/pkg/cmd/pull.go @@ -0,0 +1,59 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/urfave/cli/v3" +) + +var pullCmd = cli.Command{ + Name: "pull", + Usage: "Pull an image from a registry", + ArgsUsage: "", + Action: handlePull, + HideHelpCommand: true, +} + +func handlePull(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("image reference required\nUsage: hypeman pull ") + } + + image := args[0] + + fmt.Fprintf(os.Stderr, "Pulling %s...\n", image) + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + params := hypeman.ImageNewParams{ + Name: image, + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + result, err := client.Images.New( + ctx, + params, + opts..., + ) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Status: %s\n", result.Status) + if result.Digest != "" { + fmt.Fprintf(os.Stderr, "Digest: %s\n", result.Digest) + } + fmt.Fprintf(os.Stderr, "Image: %s\n", result.Name) + + return nil +} + diff --git a/apps/cli/pkg/cmd/push.go b/apps/cli/pkg/cmd/push.go new file mode 100644 index 00000000..da8f30e1 --- /dev/null +++ b/apps/cli/pkg/cmd/push.go @@ -0,0 +1,102 @@ +package cmd + +import ( + "context" + "fmt" + "net/http" + "net/url" + "os" + "strings" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/daemon" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/urfave/cli/v3" +) + +var pushCmd = cli.Command{ + Name: "push", + Usage: "Push a local Docker image to hypeman", + ArgsUsage: " [target-name]", + Action: handlePush, + HideHelpCommand: true, +} + +func handlePush(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("image reference required\nUsage: hypeman push ") + } + + sourceImage := args[0] + targetName := sourceImage + if len(args) > 1 { + targetName = args[1] + } + + baseURL := resolveBaseURL(cmd) + + parsedURL, err := url.Parse(baseURL) + if err != nil { + return fmt.Errorf("invalid base URL: %w", err) + } + + registryHost := parsedURL.Host + + fmt.Fprintf(os.Stderr, "Loading image %s from Docker...\n", sourceImage) + + srcRef, err := name.ParseReference(sourceImage) + if err != nil { + return fmt.Errorf("invalid source image: %w", err) + } + + img, err := daemon.Image(srcRef) + if err != nil { + return fmt.Errorf("load image: %w", err) + } + + // Build target reference - server computes digest from manifest + targetRef := registryHost + "/" + strings.TrimPrefix(targetName, "/") + fmt.Fprintf(os.Stderr, "Pushing to %s...\n", targetRef) + + dstRef, err := name.ParseReference(targetRef, name.Insecure) + if err != nil { + return fmt.Errorf("invalid target: %w", err) + } + + token := resolveAPIKey() + + // Use custom transport that always sends Basic auth header + transport := &authTransport{ + base: http.DefaultTransport, + token: token, + } + + err = remote.Write(dstRef, img, + remote.WithContext(ctx), + remote.WithAuth(authn.Anonymous), + remote.WithTransport(transport), + ) + if err != nil { + return fmt.Errorf("push failed: %w", err) + } + + fmt.Fprintf(os.Stderr, "Pushed %s\n", targetRef) + return nil +} + +// authTransport adds Basic auth header to all requests +type authTransport struct { + base http.RoundTripper + token string +} + +func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.token != "" { + // Clone request to avoid modifying the original (RoundTripper contract) + req = req.Clone(req.Context()) + req.Header.Set("Authorization", "Bearer "+t.token) + } + return t.base.RoundTrip(req) +} diff --git a/apps/cli/pkg/cmd/resourcecmd.go b/apps/cli/pkg/cmd/resourcecmd.go new file mode 100644 index 00000000..de2cce4b --- /dev/null +++ b/apps/cli/pkg/cmd/resourcecmd.go @@ -0,0 +1,288 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/tidwall/gjson" + "github.com/urfave/cli/v3" +) + +var resourcesCmd = cli.Command{ + Name: "resources", + Usage: "Show server resource capacity and allocation status", + Description: `Display current host resource capacity, allocation status, and per-instance breakdown. + +Resources include CPU, memory, disk, network, and GPU (if available). +Oversubscription ratios are applied to calculate effective limits. + +Examples: + # Show all resources (default table format) + hypeman resources + + # Show resources as JSON + hypeman resources --format json + + # Show only GPU information + hypeman resources --transform gpu`, + Action: handleResources, + HideHelpCommand: true, +} + +func handleResources(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Resources.Get(ctx, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + // If format is "auto", use our custom table format + if format == "auto" || format == "" { + return showResourcesTable(res) + } + + // Otherwise use standard JSON display + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "resources", obj, format, transform) +} + +func showResourcesTable(data []byte) error { + obj := gjson.ParseBytes(data) + + // Print resource summary table + fmt.Println("RESOURCE CAPACITY EFFECTIVE ALLOCATED AVAILABLE OVERSUB") + fmt.Println(strings.Repeat("-", 75)) + + printResourceRow("cpu", obj.Get("cpu"), "cores") + printResourceRow("memory", obj.Get("memory"), "bytes") + printResourceRow("disk", obj.Get("disk"), "bytes") + printResourceRow("disk_io", obj.Get("disk_io"), "disk_bps") + printResourceRow("network", obj.Get("network"), "bps") + + // Print GPU information if available + gpu := obj.Get("gpu") + if gpu.Exists() && gpu.Type != gjson.Null { + fmt.Println() + printGPUInfo(gpu) + } + + // Print disk breakdown if available + diskBreakdown := obj.Get("disk_breakdown") + if diskBreakdown.Exists() { + fmt.Println() + fmt.Println("DISK BREAKDOWN:") + if v := diskBreakdown.Get("images_bytes").Int(); v > 0 { + fmt.Printf(" Images: %s\n", formatBytes(v)) + } + if v := diskBreakdown.Get("volumes_bytes").Int(); v > 0 { + fmt.Printf(" Volumes: %s\n", formatBytes(v)) + } + if v := diskBreakdown.Get("overlays_bytes").Int(); v > 0 { + fmt.Printf(" Overlays: %s\n", formatBytes(v)) + } + if v := diskBreakdown.Get("oci_cache_bytes").Int(); v > 0 { + fmt.Printf(" OCI Cache: %s\n", formatBytes(v)) + } + } + + // Print allocations if any + allocations := obj.Get("allocations") + if allocations.Exists() && allocations.IsArray() && len(allocations.Array()) > 0 { + fmt.Println() + fmt.Println("ALLOCATIONS:") + table := NewTableWriter(os.Stdout, "INSTANCE", "CPU", "MEMORY", "DISK", "DISK I/O", "NET DOWN", "NET UP") + table.TruncOrder = []int{0} // Only truncate INSTANCE name if needed + allocations.ForEach(func(key, value gjson.Result) bool { + name := value.Get("instance_name").String() + cpu := fmt.Sprintf("%d", value.Get("cpu").Int()) + mem := formatBytes(value.Get("memory_bytes").Int()) + disk := formatBytes(value.Get("disk_bytes").Int()) + diskIO := formatDiskBps(value.Get("disk_io_bps").Int()) + netDown := formatBps(value.Get("network_download_bps").Int()) + netUp := formatBps(value.Get("network_upload_bps").Int()) + table.AddRow(name, cpu, mem, disk, diskIO, netDown, netUp) + return true + }) + table.Render() + } + + return nil +} + +func printResourceRow(name string, res gjson.Result, unit string) { + if !res.Exists() { + return + } + + capacity := res.Get("capacity").Int() + effective := res.Get("effective_limit").Int() + allocated := res.Get("allocated").Int() + available := res.Get("available").Int() + ratio := res.Get("oversub_ratio").Float() + + var capStr, effStr, allocStr, availStr string + + switch unit { + case "bytes": + capStr = formatBytes(capacity) + effStr = formatBytes(effective) + allocStr = formatBytes(allocated) + availStr = formatBytes(available) + case "bps": + capStr = formatBps(capacity) + effStr = formatBps(effective) + allocStr = formatBps(allocated) + availStr = formatBps(available) + case "disk_bps": + capStr = formatDiskBps(capacity) + effStr = formatDiskBps(effective) + allocStr = formatDiskBps(allocated) + availStr = formatDiskBps(available) + default: + capStr = fmt.Sprintf("%d", capacity) + effStr = fmt.Sprintf("%d", effective) + allocStr = fmt.Sprintf("%d", allocated) + availStr = fmt.Sprintf("%d", available) + } + + ratioStr := fmt.Sprintf("%.1fx", ratio) + if ratio == 1.0 { + ratioStr = "1.0x" + } + + fmt.Printf("%-10s %-14s %-14s %-14s %-14s %s\n", name, capStr, effStr, allocStr, availStr, ratioStr) +} + +func printGPUInfo(gpu gjson.Result) { + mode := gpu.Get("mode").String() + totalSlots := gpu.Get("total_slots").Int() + usedSlots := gpu.Get("used_slots").Int() + + fmt.Printf("GPU: %s mode (%d/%d slots used)\n", mode, usedSlots, totalSlots) + + if mode == "vgpu" { + profiles := gpu.Get("profiles") + if profiles.Exists() && profiles.IsArray() && len(profiles.Array()) > 0 { + fmt.Println("PROFILE VRAM AVAILABLE") + fmt.Println(strings.Repeat("-", 40)) + profiles.ForEach(func(key, value gjson.Result) bool { + name := value.Get("name").String() + framebufferMB := value.Get("framebuffer_mb").Int() + available := value.Get("available").Int() + vram := formatMB(framebufferMB) + fmt.Printf("%-14s %-10s %d\n", name, vram, available) + return true + }) + } + } else if mode == "passthrough" { + devices := gpu.Get("devices") + if devices.Exists() && devices.IsArray() && len(devices.Array()) > 0 { + fmt.Println("DEVICE AVAILABLE") + fmt.Println(strings.Repeat("-", 45)) + devices.ForEach(func(key, value gjson.Result) bool { + name := value.Get("name").String() + available := value.Get("available").Bool() + availStr := "no" + if available { + availStr = "yes" + } + fmt.Printf("%-30s %s\n", name, availStr) + return true + }) + } + } +} + +func formatBytes(b int64) string { + const ( + KB = 1024 + MB = KB * 1024 + GB = MB * 1024 + TB = GB * 1024 + ) + + switch { + case b >= TB: + return fmt.Sprintf("%.1f TB", float64(b)/TB) + case b >= GB: + return fmt.Sprintf("%.1f GB", float64(b)/GB) + case b >= MB: + return fmt.Sprintf("%.1f MB", float64(b)/MB) + case b >= KB: + return fmt.Sprintf("%.1f KB", float64(b)/KB) + default: + return fmt.Sprintf("%d B", b) + } +} + +func formatMB(mb int64) string { + if mb >= 1024 { + return fmt.Sprintf("%.1f GB", float64(mb)/1024) + } + return fmt.Sprintf("%d MB", mb) +} + +// formatBps converts bytes per second (as returned by the API) to a human-readable +// bits per second string (Kbps, Mbps, Gbps). The API stores bandwidth in bytes/sec, +// but users specify and expect to see bandwidth in bits/sec (the standard unit for +// network bandwidth). +func formatBps(bytesPerSec int64) string { + const ( + Kbps = 1000 + Mbps = Kbps * 1000 + Gbps = Mbps * 1000 + ) + + // Convert bytes/sec to bits/sec (multiply by 8) + bps := bytesPerSec * 8 + + switch { + case bps >= Gbps: + return fmt.Sprintf("%.1f Gbps", float64(bps)/Gbps) + case bps >= Mbps: + return fmt.Sprintf("%.0f Mbps", float64(bps)/Mbps) + case bps >= Kbps: + return fmt.Sprintf("%.0f Kbps", float64(bps)/Kbps) + default: + return fmt.Sprintf("%d bps", bps) + } +} + +// formatDiskBps formats disk I/O bandwidth in bytes per second to a human-readable +// string (KB/s, MB/s, GB/s). Unlike network bandwidth which uses bits, disk I/O +// is conventionally displayed in bytes per second. +func formatDiskBps(bytesPerSec int64) string { + const ( + KBps = 1000 + MBps = KBps * 1000 + GBps = MBps * 1000 + ) + + switch { + case bytesPerSec >= GBps: + return fmt.Sprintf("%.1f GB/s", float64(bytesPerSec)/GBps) + case bytesPerSec >= MBps: + return fmt.Sprintf("%.0f MB/s", float64(bytesPerSec)/MBps) + case bytesPerSec >= KBps: + return fmt.Sprintf("%.0f KB/s", float64(bytesPerSec)/KBps) + case bytesPerSec == 0: + return "-" + default: + return fmt.Sprintf("%d B/s", bytesPerSec) + } +} diff --git a/apps/cli/pkg/cmd/resourcecmd_test.go b/apps/cli/pkg/cmd/resourcecmd_test.go new file mode 100644 index 00000000..4882adbd --- /dev/null +++ b/apps/cli/pkg/cmd/resourcecmd_test.go @@ -0,0 +1,128 @@ +package cmd + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFormatBytes(t *testing.T) { + tests := []struct { + bytes int64 + expected string + }{ + {0, "0 B"}, + {100, "100 B"}, + {1024, "1.0 KB"}, + {1536, "1.5 KB"}, + {1048576, "1.0 MB"}, + {1073741824, "1.0 GB"}, + {1099511627776, "1.0 TB"}, + {1649267441664, "1.5 TB"}, + } + + for _, tt := range tests { + t.Run(tt.expected, func(t *testing.T) { + result := formatBytes(tt.bytes) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestFormatMB(t *testing.T) { + tests := []struct { + mb int64 + expected string + }{ + {512, "512 MB"}, + {1024, "1.0 GB"}, + {2048, "2.0 GB"}, + {6144, "6.0 GB"}, + } + + for _, tt := range tests { + t.Run(tt.expected, func(t *testing.T) { + result := formatMB(tt.mb) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestFormatBps(t *testing.T) { + // The API returns network_download_bps in BYTES per second + // The CLI should convert to bits and display as Mbps/Gbps + // Formula: bytes/sec * 8 = bits/sec + tests := []struct { + name string + bytesPerSec int64 + expected string + }{ + // 30 Mbps = 30,000,000 bits/sec = 3,750,000 bytes/sec + // This is the user's reported bug: they set 30Mbps, API stores 3750000 bytes/sec, + // CLI was incorrectly showing "4 Mbps" instead of "30 Mbps" + {"30Mbps bandwidth limit", 3750000, "30 Mbps"}, + + // 1 Gbps = 1,000,000,000 bits/sec = 125,000,000 bytes/sec + {"1Gbps bandwidth limit", 125000000, "1.0 Gbps"}, + + // 100 Mbps = 100,000,000 bits/sec = 12,500,000 bytes/sec + {"100Mbps bandwidth limit", 12500000, "100 Mbps"}, + + // 500 Mbps = 500,000,000 bits/sec = 62,500,000 bytes/sec + {"500Mbps bandwidth limit", 62500000, "500 Mbps"}, + + // 10 Gbps = 10,000,000,000 bits/sec = 1,250,000,000 bytes/sec + {"10Gbps bandwidth limit", 1250000000, "10.0 Gbps"}, + + // Small values: 1 Mbps = 1,000,000 bits/sec = 125,000 bytes/sec + {"1Mbps bandwidth limit", 125000, "1 Mbps"}, + + // Very small: 100 Kbps = 100,000 bits/sec = 12,500 bytes/sec + {"100Kbps bandwidth limit", 12500, "100 Kbps"}, + + // Tiny: 8000 bits/sec = 1000 bytes/sec + {"8Kbps bandwidth limit", 1000, "8 Kbps"}, + + // Edge case: 0 bytes/sec + {"zero bandwidth", 0, "0 bps"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := formatBps(tt.bytesPerSec) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestFormatDiskBps(t *testing.T) { + // Disk I/O is displayed in bytes/sec (KB/s, MB/s, GB/s), not bits/sec + tests := []struct { + name string + bytesPerSec int64 + expected string + }{ + // Common disk I/O limits + {"100 MB/s SSD limit", 100000000, "100 MB/s"}, + {"500 MB/s NVMe limit", 500000000, "500 MB/s"}, + {"1 GB/s high-perf limit", 1000000000, "1.0 GB/s"}, + {"3.5 GB/s NVMe Gen4", 3500000000, "3.5 GB/s"}, + + // Smaller values + {"50 MB/s HDD limit", 50000000, "50 MB/s"}, + {"10 MB/s throttled", 10000000, "10 MB/s"}, + {"1 MB/s minimal", 1000000, "1 MB/s"}, + {"500 KB/s very slow", 500000, "500 KB/s"}, + + // Edge cases + {"zero (no limit or disabled)", 0, "-"}, + {"tiny value", 500, "500 B/s"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := formatDiskBps(tt.bytesPerSec) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/apps/cli/pkg/cmd/rm.go b/apps/cli/pkg/cmd/rm.go new file mode 100644 index 00000000..68b1ee91 --- /dev/null +++ b/apps/cli/pkg/cmd/rm.go @@ -0,0 +1,122 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/urfave/cli/v3" +) + +var rmCmd = cli.Command{ + Name: "rm", + Usage: "Remove one or more instances", + ArgsUsage: "[instance...]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "force", + Aliases: []string{"f"}, + Usage: "Force removal of running instances", + }, + &cli.BoolFlag{ + Name: "all", + Usage: "Remove all instances (stopped only, unless --force)", + }, + }, + Action: handleRm, + HideHelpCommand: true, +} + +func handleRm(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + force := cmd.Bool("force") + all := cmd.Bool("all") + + if !all && len(args) < 1 { + return fmt.Errorf("instance ID required\nUsage: hypeman rm [flags] [instance...]\n hypeman rm --all [--force]") + } + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + // If --all, get all instance IDs + var identifiers []string + if all { + instances, err := client.Instances.List(ctx, hypeman.InstanceListParams{}) + if err != nil { + return fmt.Errorf("failed to list instances: %w", err) + } + for _, inst := range *instances { + identifiers = append(identifiers, inst.ID) + } + if len(identifiers) == 0 { + fmt.Println("No instances to remove") + return nil + } + } else { + identifiers = args + } + + var lastErr error + for _, identifier := range identifiers { + // Resolve instance by ID, partial ID, or name (skip if --all since we have full IDs) + var instanceID string + var err error + if all { + instanceID = identifier + } else { + instanceID, err = ResolveInstance(ctx, &client, identifier) + if err != nil { + fmt.Printf("Error: %v\n", err) + lastErr = err + continue + } + } + + // Build debug options once + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + // Check instance state if not forcing + if !force { + inst, err := client.Instances.Get( + ctx, + instanceID, + opts..., + ) + if err != nil { + fmt.Printf("Error: failed to get instance %s: %v\n", instanceID, err) + lastErr = err + continue + } + + if inst.State == "Running" { + if all { + // Silently skip running instances when using --all without --force + continue + } + fmt.Printf("Error: cannot remove running instance %s. Stop it first or use --force\n", instanceID) + lastErr = fmt.Errorf("instance is running") + continue + } + } + + // Delete the instance + err = client.Instances.Delete( + ctx, + instanceID, + opts..., + ) + if err != nil { + fmt.Printf("Error: failed to remove instance %s: %v\n", instanceID, err) + lastErr = err + continue + } + + fmt.Println(instanceID) + } + + return lastErr +} diff --git a/apps/cli/pkg/cmd/run.go b/apps/cli/pkg/cmd/run.go new file mode 100644 index 00000000..771d5571 --- /dev/null +++ b/apps/cli/pkg/cmd/run.go @@ -0,0 +1,441 @@ +package cmd + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + "time" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/urfave/cli/v3" +) + +var runCmd = cli.Command{ + Name: "run", + Usage: "Create and start a new instance from an image", + ArgsUsage: "", + Description: `Create and start a new virtual machine instance from an OCI image. + +Examples: + # Basic run + hypeman run myimage:latest + + # Run with custom resources + hypeman run --cpus 4 --memory 8GB myimage:latest + + # Run with vGPU + hypeman run --gpu-profile L40S-1Q myimage:latest + + # Run with GPU passthrough + hypeman run --device my-gpu myimage:latest + + # Run with QEMU hypervisor + hypeman run --hypervisor qemu myimage:latest + + # Run with bandwidth limits + hypeman run --bandwidth-down 1Gbps --bandwidth-up 500Mbps myimage:latest`, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "name", + Usage: "Instance name (auto-generated if not provided)", + }, + &cli.StringSliceFlag{ + Name: "env", + Aliases: []string{"e"}, + Usage: "Set environment variable (KEY=VALUE, can be repeated)", + }, + &cli.StringFlag{ + Name: "memory", + Usage: `Base memory size (e.g., "1GB", "512MB")`, + Value: "1GB", + }, + &cli.IntFlag{ + Name: "cpus", + Usage: "Number of virtual CPUs", + Value: 2, + }, + &cli.StringFlag{ + Name: "overlay-size", + Usage: `Writable overlay disk size (e.g., "10GB")`, + Value: "10GB", + }, + &cli.StringFlag{ + Name: "hotplug-size", + Usage: `Additional memory for hotplug (e.g., "3GB")`, + Value: "3GB", + }, + &cli.BoolFlag{ + Name: "network", + Usage: "Enable network (default: true)", + Value: true, + }, + // GPU/vGPU flags + &cli.StringFlag{ + Name: "gpu-profile", + Usage: `vGPU profile name (e.g., "L40S-1Q", "L40S-2Q")`, + }, + &cli.StringSliceFlag{ + Name: "device", + Usage: "Device ID or name for PCI/GPU passthrough (can be repeated)", + }, + // Hypervisor flag + &cli.StringFlag{ + Name: "hypervisor", + Usage: `Hypervisor to use: "cloud-hypervisor", "qemu", or "vz"`, + }, + // Resource limit flags + &cli.StringFlag{ + Name: "disk-io", + Usage: `Disk I/O rate limit (e.g., "100MB/s", "500MB/s")`, + }, + &cli.StringFlag{ + Name: "bandwidth-down", + Usage: `Download bandwidth limit (e.g., "1Gbps", "125MB/s")`, + }, + &cli.StringFlag{ + Name: "bandwidth-up", + Usage: `Upload bandwidth limit (e.g., "1Gbps", "125MB/s")`, + }, + // Boot option flags + &cli.BoolFlag{ + Name: "skip-guest-agent", + Usage: "Skip guest-agent installation during boot (exec and stat APIs will not work)", + }, + &cli.BoolFlag{ + Name: "skip-kernel-headers", + Usage: "Skip kernel headers installation during boot for faster startup (DKMS will not work)", + }, + // Entrypoint and CMD overrides + &cli.StringSliceFlag{ + Name: "entrypoint", + Usage: "Override image entrypoint (can be repeated for multiple args)", + }, + &cli.StringSliceFlag{ + Name: "cmd", + Usage: "Override image CMD (can be repeated for multiple args)", + }, + // Metadata flags + &cli.StringSliceFlag{ + Name: "metadata", + Aliases: []string{"l"}, + Usage: "Set metadata key-value pair (KEY=VALUE, can be repeated)", + }, + // Volume mount flags + &cli.StringSliceFlag{ + Name: "volume", + Aliases: []string{"v"}, + Usage: `Attach volume at creation (format: volume-id:/mount/path[:ro[:overlay=SIZE]]). Can be repeated.`, + }, + }, + Action: handleRun, + HideHelpCommand: true, +} + +func handleRun(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("image reference required\nUsage: hypeman run [flags] ") + } + + image := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + // Check if image exists and is ready + // URL-encode the image name to handle slashes (e.g., docker.io/library/nginx:latest) + imgInfo, err := client.Images.Get(ctx, url.PathEscape(image)) + if err != nil { + // Image not found, try to pull it + var apiErr *hypeman.Error + if ok := isNotFoundError(err, &apiErr); ok { + fmt.Fprintf(os.Stderr, "Image not found locally. Pulling %s...\n", image) + imgInfo, err = client.Images.New(ctx, hypeman.ImageNewParams{ + Name: image, + }) + if err != nil { + return fmt.Errorf("failed to pull image: %w", err) + } + } else { + return fmt.Errorf("failed to check image: %w", err) + } + } + + // Wait for image to be ready (build is asynchronous) + if err := waitForImageReady(ctx, &client, imgInfo); err != nil { + return err + } + + // Generate name if not provided + name := cmd.String("name") + if name == "" { + name = GenerateInstanceName(image) + } + + // Parse environment variables + env := make(map[string]string) + for _, e := range cmd.StringSlice("env") { + parts := strings.SplitN(e, "=", 2) + if len(parts) == 2 { + env[parts[0]] = parts[1] + } else { + fmt.Fprintf(os.Stderr, "Warning: ignoring malformed env var: %s\n", e) + } + } + + // Build instance params + params := hypeman.InstanceNewParams{ + Image: image, + Name: name, + Vcpus: hypeman.Opt(int64(cmd.Int("cpus"))), + Size: hypeman.Opt(cmd.String("memory")), + OverlaySize: hypeman.Opt(cmd.String("overlay-size")), + HotplugSize: hypeman.Opt(cmd.String("hotplug-size")), + } + + if len(env) > 0 { + params.Env = env + } + + // Network configuration + networkEnabled := cmd.Bool("network") + bandwidthDown := cmd.String("bandwidth-down") + bandwidthUp := cmd.String("bandwidth-up") + + if !networkEnabled || bandwidthDown != "" || bandwidthUp != "" { + params.Network = hypeman.InstanceNewParamsNetwork{ + Enabled: hypeman.Opt(networkEnabled), + } + if bandwidthDown != "" { + params.Network.BandwidthDownload = hypeman.Opt(bandwidthDown) + } + if bandwidthUp != "" { + params.Network.BandwidthUpload = hypeman.Opt(bandwidthUp) + } + } + + // GPU configuration + gpuProfile := cmd.String("gpu-profile") + if gpuProfile != "" { + params.GPU = hypeman.InstanceNewParamsGPU{ + Profile: hypeman.Opt(gpuProfile), + } + } + + // Device passthrough + devices := cmd.StringSlice("device") + if len(devices) > 0 { + params.Devices = devices + } + + // Hypervisor selection + hypervisor := cmd.String("hypervisor") + if hypervisor != "" { + switch hypervisor { + case "cloud-hypervisor", "ch": + params.Hypervisor = hypeman.InstanceNewParamsHypervisorCloudHypervisor + case "qemu": + params.Hypervisor = hypeman.InstanceNewParamsHypervisorQemu + case "vz": + params.Hypervisor = hypeman.InstanceNewParamsHypervisorVz + default: + return fmt.Errorf("invalid hypervisor: %s (must be 'cloud-hypervisor', 'qemu', or 'vz')", hypervisor) + } + } + + // Disk I/O limit + diskIO := cmd.String("disk-io") + if diskIO != "" { + params.DiskIoBps = hypeman.Opt(diskIO) + } + + // Boot options + if cmd.IsSet("skip-guest-agent") { + params.SkipGuestAgent = hypeman.Opt(cmd.Bool("skip-guest-agent")) + } + if cmd.IsSet("skip-kernel-headers") { + params.SkipKernelHeaders = hypeman.Opt(cmd.Bool("skip-kernel-headers")) + } + + // Entrypoint and CMD overrides + if entrypoint := cmd.StringSlice("entrypoint"); len(entrypoint) > 0 { + params.Entrypoint = entrypoint + } + if cmdArgs := cmd.StringSlice("cmd"); len(cmdArgs) > 0 { + params.Cmd = cmdArgs + } + + // Metadata + metadataSpecs := cmd.StringSlice("metadata") + if len(metadataSpecs) > 0 { + metadata := make(map[string]string) + for _, m := range metadataSpecs { + parts := strings.SplitN(m, "=", 2) + if len(parts) == 2 { + metadata[parts[0]] = parts[1] + } else { + fmt.Fprintf(os.Stderr, "Warning: ignoring malformed metadata: %s\n", m) + } + } + params.Metadata = metadata + } + + // Volume mounts + volumeSpecs := cmd.StringSlice("volume") + if len(volumeSpecs) > 0 { + var mounts []hypeman.VolumeMountParam + for _, spec := range volumeSpecs { + mount, err := parseVolumeSpec(spec) + if err != nil { + return fmt.Errorf("invalid volume spec %q: %w", spec, err) + } + mounts = append(mounts, mount) + } + params.Volumes = mounts + } + + fmt.Fprintf(os.Stderr, "Creating instance %s...\n", name) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + result, err := client.Instances.New( + ctx, + params, + opts..., + ) + if err != nil { + return err + } + + // Output instance ID (useful for scripting) + fmt.Println(result.ID) + + return nil +} + +// isNotFoundError checks if err is a 404 not found error +func isNotFoundError(err error, target **hypeman.Error) bool { + if apiErr, ok := err.(*hypeman.Error); ok { + *target = apiErr + return apiErr.Response != nil && apiErr.Response.StatusCode == 404 + } + return false +} + +// waitForImageReady polls image status until it becomes ready or failed +func waitForImageReady(ctx context.Context, client *hypeman.Client, img *hypeman.Image) error { + if img.Status == hypeman.ImageStatusReady { + return nil + } + if img.Status == hypeman.ImageStatusFailed { + if img.Error != "" { + return fmt.Errorf("image build failed: %s", img.Error) + } + return fmt.Errorf("image build failed") + } + + // Poll until ready using the normalized image name from the API response + ticker := time.NewTicker(300 * time.Millisecond) + defer ticker.Stop() + + // Show initial status + showImageStatus(img) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + updated, err := client.Images.Get(ctx, url.PathEscape(img.Name)) + if err != nil { + return fmt.Errorf("failed to check image status: %w", err) + } + + // Show status update if changed + if updated.Status != img.Status { + showImageStatus(updated) + img = updated + } + + switch updated.Status { + case hypeman.ImageStatusReady: + return nil + case hypeman.ImageStatusFailed: + if updated.Error != "" { + return fmt.Errorf("image build failed: %s", updated.Error) + } + return fmt.Errorf("image build failed") + } + } + } +} + +// parseVolumeSpec parses a volume mount specification string. +// Format: volume-id:/mount/path[:ro[:overlay=SIZE]] +// Examples: +// +// my-vol:/data +// my-vol:/data:ro +// my-vol:/data:ro:overlay=10GB +func parseVolumeSpec(spec string) (hypeman.VolumeMountParam, error) { + parts := strings.SplitN(spec, ":", 2) + if len(parts) < 2 { + return hypeman.VolumeMountParam{}, fmt.Errorf("expected format volume-id:/mount/path[:ro[:overlay=SIZE]]") + } + + volumeID := parts[0] + if volumeID == "" { + return hypeman.VolumeMountParam{}, fmt.Errorf("volume ID cannot be empty") + } + + remaining := parts[1] + // Split remaining by colon to get mount path and options + segments := strings.Split(remaining, ":") + mountPath := segments[0] + if mountPath == "" { + return hypeman.VolumeMountParam{}, fmt.Errorf("mount path cannot be empty") + } + + mount := hypeman.VolumeMountParam{ + VolumeID: volumeID, + MountPath: mountPath, + } + + // Parse optional flags + for _, seg := range segments[1:] { + switch { + case seg == "ro": + mount.Readonly = hypeman.Opt(true) + case strings.HasPrefix(seg, "overlay="): + mount.Overlay = hypeman.Opt(true) + mount.OverlaySize = hypeman.Opt(strings.TrimPrefix(seg, "overlay=")) + default: + return hypeman.VolumeMountParam{}, fmt.Errorf("unknown option %q", seg) + } + } + + return mount, nil +} + +// showImageStatus prints image build status to stderr +func showImageStatus(img *hypeman.Image) { + switch img.Status { + case hypeman.ImageStatusPending: + if img.QueuePosition > 0 { + fmt.Fprintf(os.Stderr, "Queued (position %d)...\n", img.QueuePosition) + } else { + fmt.Fprintf(os.Stderr, "Queued...\n") + } + case hypeman.ImageStatusPulling: + fmt.Fprintf(os.Stderr, "Pulling image...\n") + case hypeman.ImageStatusConverting: + fmt.Fprintf(os.Stderr, "Converting to disk image...\n") + case hypeman.ImageStatusReady: + fmt.Fprintf(os.Stderr, "Image ready.\n") + } +} diff --git a/apps/cli/pkg/cmd/version.go b/apps/cli/pkg/cmd/version.go new file mode 100644 index 00000000..f55a0991 --- /dev/null +++ b/apps/cli/pkg/cmd/version.go @@ -0,0 +1,61 @@ +package cmd + +import ( + "regexp" + "runtime/debug" +) + +// version can be overridden at build time via ldflags: +// +// -X github.com/kernel/hypeman-cli/pkg/cmd.version=1.2.3 +var version string + +// semverTag matches clean semver tags like v1.2.3 or v0.9.5 (no prerelease/pseudo-version suffix). +var semverTag = regexp.MustCompile(`^v\d+\.\d+\.\d+$`) + +// Version is the CLI version string, resolved at init time. +var Version = resolveVersion() + +func resolveVersion() string { + // 1. ldflags override (GoReleaser sets this) + if version != "" { + return version + } + + // 2. Build info from Go toolchain + info, ok := debug.ReadBuildInfo() + if !ok { + return "dev" + } + + // 3. VCS revision from git (embedded automatically by `go build`) + var revision string + var dirty bool + for _, s := range info.Settings { + switch s.Key { + case "vcs.revision": + revision = s.Value + case "vcs.modified": + dirty = s.Value == "true" + } + } + + // Only use module version if it's a clean semver tag (e.g. v1.2.3), + // not a pseudo-version like v0.9.5-0.20260211212111-7ef5ed6df05d. + if v := info.Main.Version; semverTag.MatchString(v) { + return v + } + + if revision != "" { + short := revision + if len(short) > 7 { + short = short[:7] + } + if dirty { + return short + "-dirty" + } + return short + } + + return "dev" +} diff --git a/apps/cli/pkg/cmd/volumecmd.go b/apps/cli/pkg/cmd/volumecmd.go new file mode 100644 index 00000000..74767789 --- /dev/null +++ b/apps/cli/pkg/cmd/volumecmd.go @@ -0,0 +1,349 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" + "github.com/tidwall/gjson" + "github.com/urfave/cli/v3" +) + +var volumeCmd = cli.Command{ + Name: "volume", + Usage: "Manage volumes", + Commands: []*cli.Command{ + &volumeCreateCmd, + &volumeListCmd, + &volumeGetCmd, + &volumeDeleteCmd, + &volumeAttachCmd, + &volumeDetachCmd, + }, + HideHelpCommand: true, +} + +var volumeCreateCmd = cli.Command{ + Name: "create", + Usage: "Create a new volume", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "name", + Usage: "Volume name", + Required: true, + }, + &cli.IntFlag{ + Name: "size", + Usage: "Size in gigabytes", + Required: true, + }, + &cli.StringFlag{ + Name: "id", + Usage: "Optional custom identifier (auto-generated if not provided)", + }, + }, + Action: handleVolumeCreate, + HideHelpCommand: true, +} + +var volumeListCmd = cli.Command{ + Name: "list", + Usage: "List volumes", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "quiet", + Aliases: []string{"q"}, + Usage: "Only display volume IDs", + }, + }, + Action: handleVolumeList, + HideHelpCommand: true, +} + +var volumeGetCmd = cli.Command{ + Name: "get", + Usage: "Get volume details", + ArgsUsage: "", + Action: handleVolumeGet, + HideHelpCommand: true, +} + +var volumeDeleteCmd = cli.Command{ + Name: "delete", + Aliases: []string{"rm"}, + Usage: "Delete a volume", + ArgsUsage: "", + Action: handleVolumeDelete, + HideHelpCommand: true, +} + +var volumeAttachCmd = cli.Command{ + Name: "attach", + Usage: "Attach a volume to an instance", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "instance", + Aliases: []string{"i"}, + Usage: "Instance ID or name", + Required: true, + }, + &cli.StringFlag{ + Name: "mount-path", + Usage: "Path where volume should be mounted in the guest", + Required: true, + }, + &cli.BoolFlag{ + Name: "readonly", + Usage: "Mount as read-only", + }, + }, + Action: handleVolumeAttach, + HideHelpCommand: true, +} + +var volumeDetachCmd = cli.Command{ + Name: "detach", + Usage: "Detach a volume from an instance", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "instance", + Aliases: []string{"i"}, + Usage: "Instance ID or name", + Required: true, + }, + }, + Action: handleVolumeDetach, + HideHelpCommand: true, +} + +func handleVolumeCreate(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + params := hypeman.VolumeNewParams{ + Name: cmd.String("name"), + SizeGB: int64(cmd.Int("size")), + } + + if id := cmd.String("id"); id != "" { + params.ID = hypeman.Opt(id) + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Volumes.New(ctx, params, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + if format == "auto" || format == "" { + vol := gjson.ParseBytes(res) + fmt.Printf("%s\n", vol.Get("id").String()) + return nil + } + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "volume create", obj, format, transform) +} + +func handleVolumeList(ctx context.Context, cmd *cli.Command) error { + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + if format != "auto" { + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Volumes.List(ctx, opts...) + if err != nil { + return err + } + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "volume list", obj, format, transform) + } + + volumes, err := client.Volumes.List(ctx, opts...) + if err != nil { + return err + } + + quietMode := cmd.Bool("quiet") + + if quietMode { + for _, vol := range *volumes { + fmt.Println(vol.ID) + } + return nil + } + + if len(*volumes) == 0 { + fmt.Fprintln(os.Stderr, "No volumes found.") + return nil + } + + table := NewTableWriter(os.Stdout, "ID", "NAME", "SIZE", "ATTACHMENTS", "CREATED") + table.TruncOrder = []int{0, 1, 4} // ID first, then NAME, CREATED + for _, vol := range *volumes { + attachments := fmt.Sprintf("%d", len(vol.Attachments)) + if len(vol.Attachments) == 0 { + attachments = "-" + } + + table.AddRow( + TruncateID(vol.ID), + vol.Name, + fmt.Sprintf("%d GB", vol.SizeGB), + attachments, + FormatTimeAgo(vol.CreatedAt), + ) + } + table.Render() + + return nil +} + +func handleVolumeGet(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("volume ID required\nUsage: hypeman volume get ") + } + + id := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + var res []byte + opts = append(opts, option.WithResponseBodyInto(&res)) + _, err := client.Volumes.Get(ctx, id, opts...) + if err != nil { + return err + } + + format := cmd.Root().String("format") + transform := cmd.Root().String("transform") + + obj := gjson.ParseBytes(res) + return ShowJSON(os.Stdout, "volume get", obj, format, transform) +} + +func handleVolumeDelete(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("volume ID required\nUsage: hypeman volume delete ") + } + + id := args[0] + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + err := client.Volumes.Delete(ctx, id, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Deleted volume %s\n", id) + return nil +} + +func handleVolumeAttach(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("volume ID required\nUsage: hypeman volume attach --instance --mount-path ") + } + + volumeID := args[0] + instanceIdentifier := cmd.String("instance") + mountPath := cmd.String("mount-path") + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + // Resolve instance + instanceID, err := ResolveInstance(ctx, &client, instanceIdentifier) + if err != nil { + return err + } + + params := hypeman.InstanceVolumeAttachParams{ + ID: instanceID, + MountPath: mountPath, + } + + if cmd.IsSet("readonly") { + params.Readonly = hypeman.Opt(cmd.Bool("readonly")) + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + _, err = client.Instances.Volumes.Attach(ctx, volumeID, params, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Attached volume %s to instance %s at %s\n", volumeID, instanceIdentifier, mountPath) + return nil +} + +func handleVolumeDetach(ctx context.Context, cmd *cli.Command) error { + args := cmd.Args().Slice() + if len(args) < 1 { + return fmt.Errorf("volume ID required\nUsage: hypeman volume detach --instance ") + } + + volumeID := args[0] + instanceIdentifier := cmd.String("instance") + + client := hypeman.NewClient(getDefaultRequestOptions(cmd)...) + + // Resolve instance + instanceID, err := ResolveInstance(ctx, &client, instanceIdentifier) + if err != nil { + return err + } + + params := hypeman.InstanceVolumeDetachParams{ + ID: instanceID, + } + + var opts []option.RequestOption + if cmd.Root().Bool("debug") { + opts = append(opts, debugMiddlewareOption) + } + + _, err = client.Instances.Volumes.Detach(ctx, volumeID, params, opts...) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "Detached volume %s from instance %s\n", volumeID, instanceIdentifier) + return nil +} diff --git a/apps/cli/pkg/jsonflag/json_flag.go b/apps/cli/pkg/jsonflag/json_flag.go new file mode 100644 index 00000000..605f883b --- /dev/null +++ b/apps/cli/pkg/jsonflag/json_flag.go @@ -0,0 +1,248 @@ +package jsonflag + +import ( + "fmt" + "strconv" + "time" + + "github.com/urfave/cli/v3" +) + +type JSONConfig struct { + Kind MutationKind + Path string + // For boolean flags that set a specific value when present + SetValue any +} + +type JSONValueCreator[T any] struct{} + +func (c JSONValueCreator[T]) Create(val T, dest *T, config JSONConfig) cli.Value { + *dest = val + return &jsonValue[T]{ + destination: dest, + config: config, + } +} + +func (c JSONValueCreator[T]) ToString(val T) string { + switch v := any(val).(type) { + case string: + if v == "" { + return v + } + return fmt.Sprintf("%q", v) + case bool: + return strconv.FormatBool(v) + case int: + return strconv.Itoa(v) + case float64: + return strconv.FormatFloat(v, 'g', -1, 64) + case time.Time: + return v.Format(time.RFC3339) + default: + return fmt.Sprintf("%v", v) + } +} + +type jsonValue[T any] struct { + destination *T + config JSONConfig +} + +func (v *jsonValue[T]) Set(val string) error { + var parsed T + var err error + + // If SetValue is configured, use that value instead of parsing the input + if v.config.SetValue != nil { + // For boolean flags with SetValue, register the configured value + if _, isBool := any(parsed).(bool); isBool { + globalRegistry.Mutate(v.config.Kind, v.config.Path, v.config.SetValue) + *v.destination = any(true).(T) // Set the flag itself to true + return nil + } + // For any flags with SetValue, register the configured value + globalRegistry.Mutate(v.config.Kind, v.config.Path, v.config.SetValue) + *v.destination = any(v.config.SetValue).(T) + return nil + } + + switch any(parsed).(type) { + case string: + parsed = any(val).(T) + case bool: + boolVal, parseErr := strconv.ParseBool(val) + if parseErr != nil { + return fmt.Errorf("invalid boolean value %q: %w", val, parseErr) + } + parsed = any(boolVal).(T) + case int: + intVal, parseErr := strconv.Atoi(val) + if parseErr != nil { + return fmt.Errorf("invalid integer value %q: %w", val, parseErr) + } + parsed = any(intVal).(T) + case float64: + floatVal, parseErr := strconv.ParseFloat(val, 64) + if parseErr != nil { + return fmt.Errorf("invalid float value %q: %w", val, parseErr) + } + parsed = any(floatVal).(T) + case time.Time: + // Try common datetime formats + formats := []string{ + time.RFC3339, + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02 15:04:05", + "2006-01-02", + "15:04:05", + "15:04", + } + var timeVal time.Time + var parseErr error + for _, format := range formats { + timeVal, parseErr = time.Parse(format, val) + if parseErr == nil { + break + } + } + if parseErr != nil { + return fmt.Errorf("invalid datetime value %q: %w", val, parseErr) + } + parsed = any(timeVal).(T) + case any: + // For `any`, store the string value directly + parsed = any(val).(T) + default: + return fmt.Errorf("unsupported type for JSON flag") + } + + *v.destination = parsed + globalRegistry.Mutate(v.config.Kind, v.config.Path, parsed) + return err +} + +func (v *jsonValue[T]) Get() any { + if v.destination != nil { + return *v.destination + } + var zero T + return zero +} + +func (v *jsonValue[T]) String() string { + if v.destination != nil { + switch val := any(*v.destination).(type) { + case string: + return val + case bool: + return strconv.FormatBool(val) + case int: + return strconv.Itoa(val) + case float64: + return strconv.FormatFloat(val, 'g', -1, 64) + case time.Time: + return val.Format(time.RFC3339) + default: + return fmt.Sprintf("%v", val) + } + } + var zero T + switch any(zero).(type) { + case string: + return "" + case bool: + return "false" + case int: + return "0" + case float64: + return "0" + case time.Time: + return "" + default: + return fmt.Sprintf("%v", zero) + } +} + +func (v *jsonValue[T]) IsBoolFlag() bool { + return v.config.SetValue != nil +} + +// JSONDateValueCreator is a specialized creator for date-only values +type JSONDateValueCreator struct{} + +func (c JSONDateValueCreator) Create(val time.Time, dest *time.Time, config JSONConfig) cli.Value { + *dest = val + return &jsonDateValue{ + destination: dest, + config: config, + } +} + +func (c JSONDateValueCreator) ToString(val time.Time) string { + return val.Format("2006-01-02") +} + +type jsonDateValue struct { + destination *time.Time + config JSONConfig +} + +func (v *jsonDateValue) Set(val string) error { + // Try date-only formats first, then fall back to datetime formats + formats := []string{ + "2006-01-02", + "01/02/2006", + "Jan 2, 2006", + "January 2, 2006", + "2-Jan-2006", + time.RFC3339, + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02 15:04:05", + } + + var timeVal time.Time + var parseErr error + for _, format := range formats { + timeVal, parseErr = time.Parse(format, val) + if parseErr == nil { + break + } + } + if parseErr != nil { + return fmt.Errorf("invalid date value %q: %w", val, parseErr) + } + + *v.destination = timeVal + globalRegistry.Mutate(v.config.Kind, v.config.Path, timeVal.Format("2006-01-02")) + return nil +} + +func (v *jsonDateValue) Get() any { + if v.destination != nil { + return *v.destination + } + return time.Time{} +} + +func (v *jsonDateValue) String() string { + if v.destination != nil { + return v.destination.Format("2006-01-02") + } + return "" +} + +func (v *jsonDateValue) IsBoolFlag() bool { + return false +} + +type JSONStringFlag = cli.FlagBase[string, JSONConfig, JSONValueCreator[string]] +type JSONBoolFlag = cli.FlagBase[bool, JSONConfig, JSONValueCreator[bool]] +type JSONIntFlag = cli.FlagBase[int, JSONConfig, JSONValueCreator[int]] +type JSONFloatFlag = cli.FlagBase[float64, JSONConfig, JSONValueCreator[float64]] +type JSONDatetimeFlag = cli.FlagBase[time.Time, JSONConfig, JSONValueCreator[time.Time]] +type JSONDateFlag = cli.FlagBase[time.Time, JSONConfig, JSONDateValueCreator] +type JSONAnyFlag = cli.FlagBase[any, JSONConfig, JSONValueCreator[any]] diff --git a/apps/cli/pkg/jsonflag/mutation.go b/apps/cli/pkg/jsonflag/mutation.go new file mode 100644 index 00000000..46c115b9 --- /dev/null +++ b/apps/cli/pkg/jsonflag/mutation.go @@ -0,0 +1,104 @@ +package jsonflag + +import ( + "fmt" + "strconv" + "strings" + + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +type MutationKind string + +const ( + Body MutationKind = "body" + Query MutationKind = "query" + Header MutationKind = "header" +) + +type Mutation struct { + Kind MutationKind + Path string + Value any +} + +type registry struct { + mutations []Mutation +} + +var globalRegistry = ®istry{} + +func (r *registry) Mutate(kind MutationKind, path string, value any) { + r.mutations = append(r.mutations, Mutation{ + Kind: kind, + Path: path, + Value: value, + }) +} + +func (r *registry) Apply(body, query, header []byte) ([]byte, []byte, []byte, error) { + var err error + + for _, mutation := range r.mutations { + switch mutation.Kind { + case Body: + body, err = jsonSet(body, mutation.Path, mutation.Value) + case Query: + query, err = jsonSet(query, mutation.Path, mutation.Value) + case Header: + header, err = jsonSet(header, mutation.Path, mutation.Value) + } + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to apply mutation %s.%s: %w", mutation.Kind, mutation.Path, err) + } + } + + return body, query, header, nil +} + +func (r *registry) Clear() { + r.mutations = nil +} + +func (r *registry) List() []Mutation { + result := make([]Mutation, len(r.mutations)) + copy(result, r.mutations) + return result +} + +// Mutate adds a mutation that will be applied to the specified kind of data +func Mutate(kind MutationKind, path string, value any) { + globalRegistry.Mutate(kind, path, value) +} + +// ApplyMutations applies all registered mutations to the provided JSON data +func ApplyMutations(body, query, header []byte) ([]byte, []byte, []byte, error) { + return globalRegistry.Apply(body, query, header) +} + +// ClearMutations removes all registered mutations from the global registry +func ClearMutations() { + globalRegistry.Clear() +} + +// ListMutations returns a copy of all currently registered mutations +func ListMutations() []Mutation { + return globalRegistry.List() +} + +func jsonSet(json []byte, path string, value any) ([]byte, error) { + keys := strings.Split(path, ".") + path = "" + for _, key := range keys { + if key == "#" { + key = strconv.Itoa(len(gjson.GetBytes(json, path).Array()) - 1) + } + + if len(path) > 0 { + path += "." + } + path += key + } + return sjson.SetBytes(json, path, value) +} diff --git a/apps/cli/pkg/jsonflag/mutation_test.go b/apps/cli/pkg/jsonflag/mutation_test.go new file mode 100644 index 00000000..e87e518b --- /dev/null +++ b/apps/cli/pkg/jsonflag/mutation_test.go @@ -0,0 +1,37 @@ +package jsonflag + +import ( + "testing" +) + +func TestApply(t *testing.T) { + ClearMutations() + + Mutate(Body, "name", "test") + Mutate(Query, "page", 1) + Mutate(Header, "authorization", "Bearer token") + + body, query, header, err := ApplyMutations( + []byte(`{}`), + []byte(`{}`), + []byte(`{}`), + ) + + if err != nil { + t.Fatalf("Failed to apply mutations: %v", err) + } + + expectedBody := `{"name":"test"}` + expectedQuery := `{"page":1}` + expectedHeader := `{"authorization":"Bearer token"}` + + if string(body) != expectedBody { + t.Errorf("Body mismatch. Expected: %s, Got: %s", expectedBody, string(body)) + } + if string(query) != expectedQuery { + t.Errorf("Query mismatch. Expected: %s, Got: %s", expectedQuery, string(query)) + } + if string(header) != expectedHeader { + t.Errorf("Header mismatch. Expected: %s, Got: %s", expectedHeader, string(header)) + } +} diff --git a/apps/cli/pkg/jsonview/explorer.go b/apps/cli/pkg/jsonview/explorer.go new file mode 100644 index 00000000..8d725eb9 --- /dev/null +++ b/apps/cli/pkg/jsonview/explorer.go @@ -0,0 +1,590 @@ +package jsonview + +import ( + "errors" + "fmt" + "math" + "strings" + + "github.com/charmbracelet/bubbles/help" + "github.com/charmbracelet/bubbles/key" + "github.com/charmbracelet/bubbles/table" + "github.com/charmbracelet/bubbles/viewport" + tea "github.com/charmbracelet/bubbletea" + "github.com/charmbracelet/lipgloss" + "github.com/muesli/reflow/truncate" + "github.com/muesli/reflow/wordwrap" + "github.com/tidwall/gjson" +) + +const ( + // UI layout constants + borderPadding = 2 + heightOffset = 5 + tableMinHeight = 2 + titlePaddingLeft = 2 + titlePaddingTop = 0 + footerPaddingLeft = 1 + + // Column width constants + defaultColumnWidth = 10 + keyColumnWidth = 3 + valueColumnWidth = 5 + + // String formatting constants + maxStringLength = 100 + maxPreviewLength = 24 + + arrayColor = lipgloss.Color("1") + stringColor = lipgloss.Color("5") + objectColor = lipgloss.Color("4") +) + +type keyMap struct { + Up key.Binding + Down key.Binding + Enter key.Binding + Back key.Binding + PrintValue key.Binding + Raw key.Binding + Quit key.Binding +} + +func (k keyMap) ShortHelp() []key.Binding { + return []key.Binding{k.Quit, k.Up, k.Down, k.Back, k.Enter, k.PrintValue, k.Raw} +} + +func (k keyMap) FullHelp() [][]key.Binding { + return [][]key.Binding{k.ShortHelp()} +} + +var keys = keyMap{ + Up: key.NewBinding( + key.WithKeys("up", "k"), + key.WithHelp("↑/k", "up"), + ), + Down: key.NewBinding( + key.WithKeys("down", "j"), + key.WithHelp("↓/j", "down"), + ), + Back: key.NewBinding( + key.WithKeys("left", "h", "backspace"), + key.WithHelp("←/h", "go back"), + ), + Enter: key.NewBinding( + key.WithKeys("right", "l"), + key.WithHelp("→/l", "expand"), + ), + PrintValue: key.NewBinding( + key.WithKeys("p"), + key.WithHelp("p", "print and exit"), + ), + Raw: key.NewBinding( + key.WithKeys("r"), + key.WithHelp("r", "toggle raw JSON"), + ), + Quit: key.NewBinding( + key.WithKeys("q", "esc", "ctrl+c", "enter"), + key.WithHelp("q/enter", "quit"), + ), +} + +var ( + titleStyle = lipgloss.NewStyle().Bold(true).PaddingLeft(titlePaddingLeft).PaddingTop(titlePaddingTop) + arrayStyle = lipgloss.NewStyle().BorderStyle(lipgloss.RoundedBorder()).BorderForeground(arrayColor) + stringStyle = lipgloss.NewStyle().BorderStyle(lipgloss.RoundedBorder()).BorderForeground(stringColor) + objectStyle = lipgloss.NewStyle().BorderStyle(lipgloss.RoundedBorder()).BorderForeground(objectColor) + stringLiteralStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("2")) +) + +type JSONView interface { + GetPath() string + GetData() gjson.Result + Update(tea.Msg) tea.Cmd + View() string + Resize(width, height int) +} + +type TableView struct { + path string + data gjson.Result + table table.Model + rowData []gjson.Result +} + +func (tv *TableView) GetPath() string { return tv.path } +func (tv *TableView) GetData() gjson.Result { return tv.data } +func (tv *TableView) View() string { return tv.table.View() } + +func (tv *TableView) Update(msg tea.Msg) tea.Cmd { + var cmd tea.Cmd + tv.table, cmd = tv.table.Update(msg) + return cmd +} + +func (tv *TableView) Resize(width, height int) { + tv.updateColumnWidths(width) + tv.table.SetHeight(min(height-heightOffset, tableMinHeight+len(tv.table.Rows()))) +} + +func (tv *TableView) updateColumnWidths(width int) { + columns := tv.table.Columns() + widths := make([]int, len(columns)) + + // Calculate required widths from headers and content + for i, col := range columns { + widths[i] = lipgloss.Width(col.Title) + } + + for _, row := range tv.table.Rows() { + for i, cell := range row { + if i < len(widths) { + widths[i] = max(widths[i], lipgloss.Width(cell)) + } + } + } + + totalWidth := sum(widths) + available := width - borderPadding*len(columns) + + if totalWidth <= available { + for i, w := range widths { + columns[i].Width = w + } + return + } + + fairShare := float64(available) / float64(len(columns)) + shrinkable := 0.0 + + for _, w := range widths { + if float64(w) > fairShare { + shrinkable += float64(w) - fairShare + } + } + + if shrinkable > 0 { + excess := float64(totalWidth - available) + for i, w := range widths { + if float64(w) > fairShare { + reduction := (float64(w) - fairShare) * (excess / shrinkable) + widths[i] = int(math.Round(float64(w) - reduction)) + } + } + } + + for i, w := range widths { + columns[i].Width = w + } + + tv.table.SetColumns(columns) +} + +type TextView struct { + path string + data gjson.Result + viewport viewport.Model + ready bool +} + +func (tv *TextView) GetPath() string { return tv.path } +func (tv *TextView) GetData() gjson.Result { return tv.data } +func (tv *TextView) View() string { return tv.viewport.View() } +func (tv *TextView) Update(msg tea.Msg) tea.Cmd { + var cmd tea.Cmd + tv.viewport, cmd = tv.viewport.Update(msg) + return cmd +} + +func (tv *TextView) Resize(width, height int) { + h := height - heightOffset + if !tv.ready { + tv.viewport = viewport.New(width, h) + tv.viewport.SetContent(wordwrap.String(tv.data.String(), width)) + tv.ready = true + return + } + tv.viewport.Width = width + tv.viewport.Height = h +} + +type JSONViewer struct { + stack []JSONView + root string + width int + height int + rawMode bool + message string + help help.Model +} + +func ExploreJSON(title string, json gjson.Result) error { + view, err := newView("", json, false) + if err != nil { + return err + } + + viewer := &JSONViewer{stack: []JSONView{view}, root: title, rawMode: false, help: help.New()} + _, err = tea.NewProgram(viewer).Run() + if viewer.message != "" { + _, msgErr := fmt.Println("\n" + viewer.message) + err = errors.Join(err, msgErr) + } + return err +} + +func (v *JSONViewer) current() JSONView { return v.stack[len(v.stack)-1] } +func (v *JSONViewer) Init() tea.Cmd { return nil } + +func (v *JSONViewer) resize(width, height int) { + v.width, v.height = width, height + v.help.Width = width + for i := range v.stack { + v.stack[i].Resize(width, height) + } +} + +func (v *JSONViewer) Update(msg tea.Msg) (tea.Model, tea.Cmd) { + switch msg := msg.(type) { + case tea.WindowSizeMsg: + v.resize(msg.Width-borderPadding, msg.Height) + return v, nil + case tea.KeyMsg: + switch { + case key.Matches(msg, keys.Quit): + return v, tea.Quit + case key.Matches(msg, keys.Enter): + return v.navigateForward() + case key.Matches(msg, keys.Back): + return v.navigateBack() + case key.Matches(msg, keys.Raw): + return v.toggleRaw() + case key.Matches(msg, keys.PrintValue): + v.message = v.getSelectedContent() + return v, tea.Quit + } + } + + return v, v.current().Update(msg) +} + +func (v *JSONViewer) getSelectedContent() string { + tableView, ok := v.current().(*TableView) + if !ok { + return v.current().GetData().Raw + } + + selected := tableView.rowData[tableView.table.Cursor()] + if selected.Type == gjson.String { + return selected.String() + } + return selected.Raw +} + +func (v *JSONViewer) navigateForward() (tea.Model, tea.Cmd) { + tableView, ok := v.current().(*TableView) + if !ok { + return v, nil + } + + cursor := tableView.table.Cursor() + selected := tableView.rowData[cursor] + if !v.canNavigateInto(selected) { + return v, nil + } + + path := v.buildNavigationPath(tableView, cursor) + forwardView, err := newView(path, selected, v.rawMode) + if err != nil { + return v, nil + } + + v.stack = append(v.stack, forwardView) + v.resize(v.width, v.height) + return v, nil +} + +func (v *JSONViewer) buildNavigationPath(tableView *TableView, cursor int) string { + if tableView.data.IsArray() { + return fmt.Sprintf("%s[%d]", tableView.path, cursor) + } + key := tableView.data.Get("@keys").Array()[cursor].Str + return fmt.Sprintf("%s[%s]", tableView.path, quoteString(key)) +} + +func quoteString(s string) string { + // Replace backslashes and quotes with escaped versions + s = strings.ReplaceAll(s, "\\", "\\\\") + s = strings.ReplaceAll(s, "\"", "\\\"") + return stringLiteralStyle.Render("\"" + s + "\"") +} + +func (v *JSONViewer) canNavigateInto(data gjson.Result) bool { + switch { + case data.IsArray(): + return len(data.Array()) > 0 + case data.IsObject(): + return len(data.Map()) > 0 + case data.Type == gjson.String: + str := data.String() + return strings.Contains(str, "\n") || lipgloss.Width(str) >= maxStringLength + } + return false +} + +func (v *JSONViewer) navigateBack() (tea.Model, tea.Cmd) { + if len(v.stack) > 1 { + v.stack = v.stack[:len(v.stack)-1] + } + return v, nil +} + +func (v *JSONViewer) toggleRaw() (tea.Model, tea.Cmd) { + v.rawMode = !v.rawMode + + for i, view := range v.stack { + rawView, err := newView(view.GetPath(), view.GetData(), v.rawMode) + if err != nil { + return v, tea.Printf("Error: %s", err) + } + v.stack[i] = rawView + } + + v.resize(v.width, v.height) + return v, nil +} + +func (v *JSONViewer) View() string { + view := v.current() + title := v.buildTitle(view) + content := titleStyle.Render(title) + style := v.getStyleForData(view.GetData()) + content += "\n" + style.Render(view.View()) + content += "\n" + v.help.View(keys) + return content +} + +func (v *JSONViewer) buildTitle(view JSONView) string { + title := v.root + if len(view.GetPath()) > 0 { + title += " → " + view.GetPath() + } + if v.rawMode { + title += " (JSON)" + } + return title +} + +func (v *JSONViewer) getStyleForData(data gjson.Result) lipgloss.Style { + switch { + case data.Type == gjson.String: + return stringStyle + case data.IsArray(): + return arrayStyle + default: + return objectStyle + } +} + +func newView(path string, data gjson.Result, raw bool) (JSONView, error) { + if data.Type == gjson.String { + return newTextView(path, data) + } + return newTableView(path, data, raw) +} + +func newTextView(path string, data gjson.Result) (*TextView, error) { + if !data.Exists() || data.Type != gjson.String { + return nil, fmt.Errorf("invalid text JSON") + } + return &TextView{path: path, data: data}, nil +} + +func newTableView(path string, data gjson.Result, raw bool) (*TableView, error) { + if !data.Exists() || data.Type != gjson.JSON { + return nil, fmt.Errorf("invalid table JSON") + } + + switch { + case data.IsArray(): + array := data.Array() + if isArrayOfObjects(array) { + return newArrayOfObjectsTableView(path, data, array, raw), nil + } else { + return newArrayTableView(path, data, array, raw), nil + } + case data.IsObject(): + return newObjectTableView(path, data, raw), nil + default: + return nil, fmt.Errorf("unsupported JSON type") + } +} + +func newArrayTableView(path string, data gjson.Result, array []gjson.Result, raw bool) *TableView { + columns := []table.Column{{Title: "Items", Width: defaultColumnWidth}} + rows := make([]table.Row, 0, len(array)) + rowData := make([]gjson.Result, 0, len(array)) + + for _, item := range array { + rows = append(rows, table.Row{formatValue(item, raw)}) + rowData = append(rowData, item) + } + + t := createTable(columns, rows, arrayColor) + return &TableView{path: path, data: data, table: t, rowData: rowData} +} + +func newArrayOfObjectsTableView(path string, data gjson.Result, array []gjson.Result, raw bool) *TableView { + // Collect unique keys + keySet := make(map[string]struct{}) + var columns []table.Column + + for _, item := range array { + for _, key := range item.Get("@keys").Array() { + if _, exists := keySet[key.Str]; !exists { + keySet[key.Str] = struct{}{} + title := key.Str + columns = append(columns, table.Column{Title: title, Width: defaultColumnWidth}) + } + } + } + + rows := make([]table.Row, 0, len(array)) + rowData := make([]gjson.Result, 0, len(array)) + + for _, item := range array { + row := make(table.Row, len(columns)) + for i, col := range columns { + row[i] = formatValue(item.Get(col.Title), raw) + } + rows = append(rows, row) + rowData = append(rowData, item) + } + + t := createTable(columns, rows, arrayColor) + return &TableView{path: path, data: data, table: t, rowData: rowData} +} + +func newObjectTableView(path string, data gjson.Result, raw bool) *TableView { + columns := []table.Column{{Title: "Object"}, {}} + + keys := data.Get("@keys").Array() + rows := make([]table.Row, 0, len(keys)) + rowData := make([]gjson.Result, 0, len(keys)) + + for _, key := range keys { + value := data.Get(key.Str) + title := key.Str + rows = append(rows, table.Row{title, formatValue(value, raw)}) + rowData = append(rowData, value) + } + + // Adjust column widths based on content + for _, row := range rows { + for i, cell := range row { + if i < len(columns) { + columns[i].Width = max(columns[i].Width, lipgloss.Width(cell)) + } + } + } + + t := createTable(columns, rows, objectColor) + return &TableView{path: path, data: data, table: t, rowData: rowData} +} + +func createTable(columns []table.Column, rows []table.Row, bgColor lipgloss.Color) table.Model { + t := table.New( + table.WithColumns(columns), + table.WithRows(rows), + table.WithFocused(true), + ) + + // Set common table styles + s := table.DefaultStyles() + s.Header = s.Header. + BorderStyle(lipgloss.NormalBorder()). + BorderForeground(lipgloss.Color("240")). + BorderBottom(true). + Bold(true) + s.Selected = s.Selected. + Foreground(lipgloss.Color("229")). + Background(bgColor). + Bold(false) + t.SetStyles(s) + + return t +} + +func formatValue(value gjson.Result, raw bool) string { + if raw { + return value.Get("@ugly").Raw + } + + switch { + case value.IsObject(): + return formatObject(value) + case value.IsArray(): + return formatArray(value) + case value.Type == gjson.String: + return value.Str + default: + return value.Raw + } +} + +func formatObject(value gjson.Result) string { + keys := value.Get("@keys").Array() + keyStrs := make([]string, len(keys)) + + for i, key := range keys { + val := value.Get(key.Str) + keyStrs[i] = formatObjectKey(key.Str, val) + } + + return "{" + strings.Join(keyStrs, ", ") + "}" +} + +func formatObjectKey(key string, val gjson.Result) string { + switch { + case val.IsObject(): + return key + ":{…}" + case val.IsArray(): + return key + ":[…]" + case val.Type == gjson.String: + str := val.Str + if lipgloss.Width(str) <= maxPreviewLength { + return fmt.Sprintf(`%s:"%s"`, key, str) + } + return fmt.Sprintf(`%s:"%s…"`, key, truncate.String(str, uint(maxPreviewLength))) + default: + return key + ":" + val.Raw + } +} + +func formatArray(value gjson.Result) string { + switch count := len(value.Array()); count { + case 0: + return "[]" + case 1: + return "[...1 item...]" + default: + return fmt.Sprintf("[...%d items...]", count) + } +} + +func isArrayOfObjects(array []gjson.Result) bool { + for _, item := range array { + if !item.IsObject() { + return false + } + } + return len(array) > 0 +} + +func sum(ints []int) int { + total := 0 + for _, n := range ints { + total += n + } + return total +} diff --git a/apps/cli/pkg/jsonview/staticdisplay.go b/apps/cli/pkg/jsonview/staticdisplay.go new file mode 100644 index 00000000..768ea34b --- /dev/null +++ b/apps/cli/pkg/jsonview/staticdisplay.go @@ -0,0 +1,135 @@ +package jsonview + +import ( + "fmt" + "os" + "strings" + + "github.com/charmbracelet/lipgloss" + "github.com/charmbracelet/x/term" + "github.com/muesli/reflow/truncate" + "github.com/tidwall/gjson" +) + +const ( + tabWidth = 2 +) + +var ( + keyStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("75")).Bold(false) + stringValueStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("113")) + numberValueStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("215")) + boolValueStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("207")) + nullValueStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("245")).Italic(true) + bulletStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("242")) + containerStyle = lipgloss.NewStyle(). + Border(lipgloss.RoundedBorder()). + BorderForeground(lipgloss.Color("63")). + Padding(0, 1) +) + +func formatJSON(json gjson.Result, width int) string { + if !json.Exists() { + return nullValueStyle.Render("Invalid JSON") + } + return formatResult(json, 0, width) +} + +func formatResult(result gjson.Result, indent, width int) string { + switch result.Type { + case gjson.String: + str := result.Str + if str == "" { + return nullValueStyle.Render("(empty)") + } + if lipgloss.Width(str) > width { + str = truncate.String(str, uint(width-1)) + "…" + } + return stringValueStyle.Render(str) + case gjson.Number: + return numberValueStyle.Render(result.Raw) + case gjson.True: + return boolValueStyle.Render("yes") + case gjson.False: + return boolValueStyle.Render("no") + case gjson.Null: + return nullValueStyle.Render("null") + case gjson.JSON: + if result.IsArray() { + return formatJSONArray(result, indent, width) + } + return formatJSONObject(result, indent, width) + default: + return stringValueStyle.Render(result.String()) + } +} + +func isSingleLine(result gjson.Result, indent int) bool { + return !(result.IsObject() || result.IsArray()) +} + +func formatJSONArray(result gjson.Result, indent, width int) string { + items := result.Array() + if len(items) == 0 { + return nullValueStyle.Render(" (none)") + } + + numberWidth := lipgloss.Width(fmt.Sprintf("%d. ", len(items))) + + var formattedItems []string + for i, item := range items { + number := fmt.Sprintf("%d.", i+1) + numbering := getIndent(indent) + bulletStyle.Render(number) + + // If the item will be a one-liner, put it inline after the numbering, + // otherwise it starts with a newline and goes below the numbering. + itemWidth := width + if isSingleLine(item, indent+1) { + // Add right-padding: + numbering += strings.Repeat(" ", numberWidth-lipgloss.Width(number)) + itemWidth = width - lipgloss.Width(numbering) + } + value := formatResult(item, indent+1, itemWidth) + formattedItems = append(formattedItems, numbering+value) + } + return "\n" + strings.Join(formattedItems, "\n") +} + +func formatJSONObject(result gjson.Result, indent, width int) string { + keys := result.Get("@keys").Array() + if len(keys) == 0 { + return nullValueStyle.Render("(empty)") + } + + var items []string + for _, key := range keys { + value := result.Get(key.String()) + keyStr := getIndent(indent) + keyStyle.Render(key.String()+":") + // If item will be a one-liner, put it inline after the key, otherwise + // it starts with a newline and goes below the key. + itemWidth := width + if isSingleLine(value, indent+1) { + keyStr += " " + itemWidth = width - lipgloss.Width(keyStr) + } + formattedValue := formatResult(value, indent+1, itemWidth) + items = append(items, keyStr+formattedValue) + } + + return "\n" + strings.Join(items, "\n") +} + +func getIndent(indent int) string { + return strings.Repeat(" ", indent*tabWidth) +} + +func RenderJSON(title string, json gjson.Result) string { + width, _, err := term.GetSize(os.Stdout.Fd()) + if err != nil { + width = 80 + } + width -= containerStyle.GetBorderLeftSize() + containerStyle.GetBorderRightSize() + + containerStyle.GetPaddingLeft() + containerStyle.GetPaddingRight() + content := strings.TrimLeft(formatJSON(json, width), "\n") + return titleStyle.Render(title) + "\n" + containerStyle.Render(content) +} diff --git a/apps/cli/release-please-config.json b/apps/cli/release-please-config.json new file mode 100644 index 00000000..53619de6 --- /dev/null +++ b/apps/cli/release-please-config.json @@ -0,0 +1,67 @@ +{ + "packages": { + ".": {} + }, + "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", + "include-v-in-tag": true, + "include-component-in-tag": false, + "versioning": "prerelease", + "prerelease": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": false, + "pull-request-header": "Automated Release PR", + "pull-request-title-pattern": "release: ${version}", + "changelog-sections": [ + { + "type": "feat", + "section": "Features" + }, + { + "type": "fix", + "section": "Bug Fixes" + }, + { + "type": "perf", + "section": "Performance Improvements" + }, + { + "type": "revert", + "section": "Reverts" + }, + { + "type": "chore", + "section": "Chores" + }, + { + "type": "docs", + "section": "Documentation" + }, + { + "type": "style", + "section": "Styles" + }, + { + "type": "refactor", + "section": "Refactors" + }, + { + "type": "test", + "section": "Tests", + "hidden": true + }, + { + "type": "build", + "section": "Build System" + }, + { + "type": "ci", + "section": "Continuous Integration", + "hidden": true + } + ], + "release-type": "simple", + "extra-files": [ + "pkg/cmd/version.go", + "README.md" + ] +} \ No newline at end of file diff --git a/apps/cli/scripts/bootstrap b/apps/cli/scripts/bootstrap new file mode 100755 index 00000000..a73aff96 --- /dev/null +++ b/apps/cli/scripts/bootstrap @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then + brew bundle check >/dev/null 2>&1 || { + echo -n "==> Install Homebrew dependencies? (y/N): " + read -r response + case "$response" in + [yY][eE][sS]|[yY]) + brew bundle + ;; + *) + ;; + esac + echo + } +fi + +echo "==> Installing Go dependencies…" + +go mod tidy diff --git a/apps/cli/scripts/build b/apps/cli/scripts/build new file mode 100755 index 00000000..0ca9e5b4 --- /dev/null +++ b/apps/cli/scripts/build @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Building hypeman" +go build ./cmd/hypeman diff --git a/apps/cli/scripts/checkout-preview b/apps/cli/scripts/checkout-preview new file mode 100755 index 00000000..3c147200 --- /dev/null +++ b/apps/cli/scripts/checkout-preview @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +# Checkout a preview branch from the stainless-sdks mirror. +# +# Usage: +# ./scripts/checkout-preview # Checkout preview/ +# ./scripts/checkout-preview -b # Checkout exact branch name + +STAINLESS_REMOTE="git@github.com:stainless-sdks/hypeman-cli.git" + +usage() { + echo "Usage: $0 [-b] " + echo "" + echo "Checkout a branch from the stainless-sdks/hypeman-cli mirror." + echo "" + echo "Options:" + echo " -b Use exact branch name instead of preview/" + echo "" + echo "Examples:" + echo " $0 devices # Checkout preview/devices" + echo " $0 -b main # Checkout main" + echo " $0 -b preview/foo # Checkout preview/foo" + exit 1 +} + +ensure_stainless_remote() { + if ! git remote get-url stainless >/dev/null 2>&1; then + echo "==> Adding stainless remote..." + git remote add stainless "${STAINLESS_REMOTE}" + fi +} + +EXACT_BRANCH=false + +while getopts "bh" opt; do + case $opt in + b) EXACT_BRANCH=true ;; + h) usage ;; + *) usage ;; + esac +done +shift $((OPTIND - 1)) + +if [ $# -lt 1 ]; then + usage +fi + +BRANCH="$1" + +if [ "$EXACT_BRANCH" = false ]; then + BRANCH="preview/${BRANCH}" +fi + +ensure_stainless_remote + +echo "==> Fetching from stainless remote..." +git fetch stainless + +echo "==> Checking out ${BRANCH}..." +git checkout -B "${BRANCH}" "stainless/${BRANCH}" + +echo "" +echo "Switched to branch '${BRANCH}' tracking stainless/${BRANCH}" + +# Point the hypeman-go SDK dependency to the corresponding preview branch +echo "" +SCRIPT_DIR="$(dirname "$0")" +"${SCRIPT_DIR}/use-sdk-preview" "${BRANCH}" diff --git a/apps/cli/scripts/format b/apps/cli/scripts/format new file mode 100755 index 00000000..db2a3fa2 --- /dev/null +++ b/apps/cli/scripts/format @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running gofmt -s -w" +gofmt -s -w . diff --git a/apps/cli/scripts/link b/apps/cli/scripts/link new file mode 100755 index 00000000..5c7f88e4 --- /dev/null +++ b/apps/cli/scripts/link @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [[ -n "$1" ]]; then + LOCAL_GO="$1" + shift +else + LOCAL_GO=../hypeman-go +fi + +echo "==> Linking with local directory" +go mod tidy -e +go mod edit -replace github.com/kernel/hypeman-go="$LOCAL_GO" diff --git a/apps/cli/scripts/lint b/apps/cli/scripts/lint new file mode 100755 index 00000000..fa7ba1f6 --- /dev/null +++ b/apps/cli/scripts/lint @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running Go build" +go build ./... diff --git a/apps/cli/scripts/mock b/apps/cli/scripts/mock new file mode 100755 index 00000000..0b28f6ea --- /dev/null +++ b/apps/cli/scripts/mock @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [[ -n "$1" && "$1" != '--'* ]]; then + URL="$1" + shift +else + URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)" +fi + +# Check if the URL is empty +if [ -z "$URL" ]; then + echo "Error: No OpenAPI spec path/url provided or found in .stats.yml" + exit 1 +fi + +echo "==> Starting mock server with URL ${URL}" + +# Run prism mock on the given spec +if [ "$1" == "--daemon" ]; then + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log & + + # Wait for server to come online + echo -n "Waiting for server" + while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do + echo -n "." + sleep 0.1 + done + + if grep -q "✖ fatal" ".prism.log"; then + cat .prism.log + exit 1 + fi + + echo +else + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" +fi diff --git a/apps/cli/scripts/run b/apps/cli/scripts/run new file mode 100755 index 00000000..7818f0ee --- /dev/null +++ b/apps/cli/scripts/run @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +go run ./cmd/hypeman "$@" diff --git a/apps/cli/scripts/test b/apps/cli/scripts/test new file mode 100755 index 00000000..c26b1222 --- /dev/null +++ b/apps/cli/scripts/test @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + +function prism_is_running() { + curl --silent "http://localhost:4010" >/dev/null 2>&1 +} + +kill_server_on_port() { + pids=$(lsof -t -i tcp:"$1" || echo "") + if [ "$pids" != "" ]; then + kill "$pids" + echo "Stopped $pids." + fi +} + +function is_overriding_api_base_url() { + [ -n "$TEST_API_BASE_URL" ] +} + +if ! is_overriding_api_base_url && ! prism_is_running ; then + # When we exit this script, make sure to kill the background mock server process + trap 'kill_server_on_port 4010' EXIT + + # Start the dev server + ./scripts/mock --daemon +fi + +if is_overriding_api_base_url ; then + echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" + echo +elif ! prism_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" + echo -e "running against your OpenAPI spec." + echo + echo -e "To run the server, pass in the path or url of your OpenAPI" + echo -e "spec to the prism command:" + echo + echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}" + echo + + exit 1 +else + echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo +fi + +echo "==> Running tests" +go test ./... "$@" diff --git a/apps/cli/scripts/unlink b/apps/cli/scripts/unlink new file mode 100755 index 00000000..e96c0dc4 --- /dev/null +++ b/apps/cli/scripts/unlink @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Unlinking with local directory" +go mod edit -dropreplace github.com/kernel/hypeman-go diff --git a/apps/cli/scripts/use-sdk-preview b/apps/cli/scripts/use-sdk-preview new file mode 100755 index 00000000..8518d610 --- /dev/null +++ b/apps/cli/scripts/use-sdk-preview @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Point the kernel/hypeman-go dependency to stainless-sdks/hypeman-go at a specific branch. +# +# Usage: +# ./scripts/use-sdk-preview +# ./scripts/use-sdk-preview +# +# This modifies go.mod with a replace directive. Do not commit this change. + +cd "$(dirname "$0")/.." + +STAINLESS_SDK_REPO="github.com/stainless-sdks/hypeman-go" +PUBLIC_SDK_REPO="github.com/kernel/hypeman-go" + +# Ensure the Go toolchain can access the private SDK repository. +if [[ -z "${GOPRIVATE:-}" ]]; then + export GOPRIVATE="$STAINLESS_SDK_REPO" +elif [[ "$GOPRIVATE" != *"$STAINLESS_SDK_REPO"* ]]; then + export GOPRIVATE="${GOPRIVATE},${STAINLESS_SDK_REPO}" +fi + +# Ensure git rewrites GitHub HTTPS URLs to SSH for private repo access. +if ! git config --global --get-all url."git@github.com:".insteadOf | grep -q "https://github.com/"; then + echo "Your git configuration is missing the rewrite from HTTPS to SSH for GitHub repositories." >&2 + echo "Run the following command and try again:" >&2 + echo " git config --global url.\"git@github.com:\".insteadOf \"https://github.com/\"" >&2 + exit 1 +fi + +usage() { + echo "Usage: $(basename "$0") " >&2 + echo "" >&2 + echo "Point the hypeman-go SDK dependency to stainless-sdks/hypeman-go at a specific ref." >&2 + echo "" >&2 + echo "Examples:" >&2 + echo " $(basename "$0") preview/devices # Use preview/devices branch" >&2 + echo " $(basename "$0") main # Use main branch" >&2 + echo " $(basename "$0") abc1234 # Use specific commit" >&2 + exit 1 +} + +if [ "$#" -ne 1 ]; then + usage +fi + +ref="$1" +commit="" +tmp_dir="/tmp/hypeman-go" + +# Clone the stainless-sdks/hypeman-go repo (shallow clone for speed) +rm -rf "$tmp_dir" +echo "==> Cloning stainless-sdks/hypeman-go..." +git clone --filter=blob:none --quiet git@github.com:stainless-sdks/hypeman-go "$tmp_dir" + +# Determine the commit hash corresponding to the provided ref +pushd "$tmp_dir" >/dev/null + +# If the ref looks like a commit SHA (7-40 hex chars), use it directly; otherwise treat it as a branch +if [[ "$ref" =~ ^[0-9a-f]{7,40}$ ]]; then + commit="$ref" +else + # Fetch the branch and resolve its HEAD commit hash + git fetch --depth=1 origin "$ref" >/dev/null 2>&1 || { + echo "Error: failed to fetch branch '$ref' from stainless-sdks/hypeman-go." >&2 + popd >/dev/null + exit 1 + } + commit=$(git rev-parse FETCH_HEAD) +fi + +# Compute the Go pseudo-version for the resolved commit +gomod_version=$(git show -s --abbrev=12 \ + --date=format:%Y%m%d%H%M%S \ + --format='v0.0.0-%cd-%h' "$commit") + +popd >/dev/null + +# Verify we're in the CLI module directory +if [ ! -f go.mod ]; then + echo "go.mod not found. Please run this script from the hypeman-cli repository root." >&2 + exit 1 +fi + +echo "==> Updating go.mod to use stainless-sdks/hypeman-go @ $gomod_version..." + +# Remove any existing replace directive for the SDK, then add the new one +go mod edit -dropreplace="$PUBLIC_SDK_REPO" 2>/dev/null || true +go mod edit -replace="$PUBLIC_SDK_REPO"="$STAINLESS_SDK_REPO"@"$gomod_version" +go mod tidy + +echo "" +echo "go.mod updated to use $STAINLESS_SDK_REPO @ $gomod_version" +echo "" +echo "WARNING: Do not commit this change to go.mod/go.sum!" + diff --git a/go.work b/go.work new file mode 100644 index 00000000..5dbeed1c --- /dev/null +++ b/go.work @@ -0,0 +1,7 @@ +go 1.25.4 + +use ( + . + ./apps/cli + ./sdks/go +) diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 00000000..24c9e0e4 --- /dev/null +++ b/go.work.sum @@ -0,0 +1,36 @@ +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= diff --git a/openapi.yaml b/openapi.yaml index 8e60ee13..5c94bcc1 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -593,6 +593,18 @@ components: type: string enum: [ok] example: ok + automation_probe: + type: string + description: Temporary field for CI SDK sync validation + example: probe + automation_probe_phase: + type: string + description: Temporary second field to force SDK regeneration + example: phase-2 + automation_probe_nonce: + type: string + description: Temporary third field to force another SDK regeneration + example: n-3 IngressMatch: type: object diff --git a/scripts/e2e-install-test.sh b/scripts/e2e-install-test.sh index 764d0353..84d5046f 100755 --- a/scripts/e2e-install-test.sh +++ b/scripts/e2e-install-test.sh @@ -37,6 +37,15 @@ KEEP_DATA=false bash scripts/uninstall.sh 2>/dev/null || true # ============================================================================= info "Phase 2: Installing from source..." BRANCH=$(git rev-parse --abbrev-ref HEAD) +if [ "$BRANCH" = "HEAD" ]; then + if [ -n "${GITHUB_HEAD_REF:-}" ]; then + BRANCH="$GITHUB_HEAD_REF" + elif [ -n "${GITHUB_REF_NAME:-}" ]; then + BRANCH="$GITHUB_REF_NAME" + else + fail "Unable to determine branch for source install (detached HEAD without GitHub branch context)" + fi +fi # Build CLI from source too when CLI_BRANCH is set (e.g., for testing unreleased CLI features) BRANCH="$BRANCH" CLI_BRANCH="${CLI_BRANCH:-}" bash scripts/install.sh diff --git a/scripts/install.sh b/scripts/install.sh index 896fa12a..73e4bce3 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -9,7 +9,7 @@ # VERSION - Install specific API version (default: latest) # CLI_VERSION - Install specific CLI version (default: latest) # BRANCH - Build from source using this branch (for development/testing) -# CLI_BRANCH - Build CLI from source using this branch of kernel/hypeman-cli +# CLI_BRANCH - Build CLI from source using this branch of kernel/hypeman # BINARY_DIR - Use binaries from this directory instead of building/downloading # INSTALL_DIR - Binary installation directory (default: /opt/hypeman/bin on Linux, /usr/local/bin on macOS) # DATA_DIR - Data directory (default: /var/lib/hypeman on Linux, ~/Library/Application Support/hypeman on macOS) @@ -163,6 +163,11 @@ if [ "$count" -gt 1 ]; then error "BRANCH, VERSION, and BINARY_DIR are mutually exclusive" fi +# When building API from source, default CLI source branch to the same branch. +if [ -n "$BRANCH" ] && [ -z "$CLI_BRANCH" ]; then + CLI_BRANCH="$BRANCH" +fi + # Additional checks for build-from-source mode if [ -n "$BRANCH" ]; then command -v git >/dev/null 2>&1 || error "git is required for BRANCH mode but not installed" @@ -629,22 +634,28 @@ fi # Install Hypeman CLI # ============================================================================= -CLI_REPO="kernel/hypeman-cli" +CLI_RELEASE_REPO="kernel/hypeman-cli" CLI_INSTALLED=false if [ -n "$CLI_BRANCH" ]; then # Build CLI from source - info "Building CLI from source (branch: $CLI_BRANCH)..." + info "Building CLI from monorepo source (branch: $CLI_BRANCH)..." command -v go >/dev/null 2>&1 || error "go is required for CLI_BRANCH mode but not installed" + # Reuse API source checkout when possible to avoid re-cloning into the same temp path. CLI_BUILD_DIR="${TMP_DIR}/hypeman-cli" - if ! git clone --branch "$CLI_BRANCH" --depth 1 -q "https://github.com/${CLI_REPO}.git" "$CLI_BUILD_DIR" 2>&1; then - error "Failed to clone CLI repository (branch: $CLI_BRANCH)" + if [ -n "$BRANCH" ] && [ "$CLI_BRANCH" = "$BRANCH" ] && [ -d "${TMP_DIR}/hypeman" ]; then + CLI_BUILD_DIR="${TMP_DIR}/hypeman" + else + rm -rf "$CLI_BUILD_DIR" + if ! git clone --branch "$CLI_BRANCH" --depth 1 -q "https://github.com/${REPO}.git" "$CLI_BUILD_DIR" 2>&1; then + error "Failed to clone monorepo for CLI build (branch: $CLI_BRANCH)" + fi fi info "Compiling CLI..." mkdir -p "${TMP_DIR}/cli-bin" - if ! (cd "$CLI_BUILD_DIR" && go build -o "${TMP_DIR}/cli-bin/hypeman" ./cmd/hypeman 2>&1); then + if ! (cd "$CLI_BUILD_DIR" && go build -o "${TMP_DIR}/cli-bin/hypeman" ./apps/cli/cmd/hypeman 2>&1); then error "Failed to build CLI from source" fi @@ -668,7 +679,7 @@ else if [ -z "$CLI_VERSION" ] || [ "$CLI_VERSION" == "latest" ]; then info "Fetching latest CLI version with available artifacts..." - CLI_VERSION=$(find_release_with_artifact "$CLI_REPO" "hypeman" "$CLI_OS" "$ARCH" "$CLI_EXT" || true) + CLI_VERSION=$(find_release_with_artifact "$CLI_RELEASE_REPO" "hypeman" "$CLI_OS" "$ARCH" "$CLI_EXT" || true) if [ -z "$CLI_VERSION" ]; then warn "Failed to find a CLI release with artifacts for ${CLI_OS}/${ARCH}, skipping CLI installation" fi @@ -679,7 +690,7 @@ else CLI_VERSION_NUM="${CLI_VERSION#v}" CLI_ARCHIVE_NAME="hypeman_${CLI_VERSION_NUM}_${CLI_OS}_${ARCH}.${CLI_EXT}" - CLI_DOWNLOAD_URL="https://github.com/${CLI_REPO}/releases/download/${CLI_VERSION}/${CLI_ARCHIVE_NAME}" + CLI_DOWNLOAD_URL="https://github.com/${CLI_RELEASE_REPO}/releases/download/${CLI_VERSION}/${CLI_ARCHIVE_NAME}" info "Downloading CLI ${CLI_ARCHIVE_NAME}..." if curl -fsSL "$CLI_DOWNLOAD_URL" -o "${TMP_DIR}/${CLI_ARCHIVE_NAME}"; then diff --git a/scripts/utm/bootstrap-dev-environment.sh b/scripts/utm/bootstrap-dev-environment.sh index fcd191b5..7406ab77 100755 --- a/scripts/utm/bootstrap-dev-environment.sh +++ b/scripts/utm/bootstrap-dev-environment.sh @@ -100,9 +100,6 @@ mkdir -p ~/code REPOS=( "kernel/hypeman" - "kernel/hypeman-ts" - "kernel/hypeman-go" - "kernel/hypeman-cli" "kernel/linux" ) diff --git a/sdks/go/.devcontainer/devcontainer.json b/sdks/go/.devcontainer/devcontainer.json new file mode 100644 index 00000000..889ae347 --- /dev/null +++ b/sdks/go/.devcontainer/devcontainer.json @@ -0,0 +1,7 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/debian +{ + "name": "Development", + "image": "mcr.microsoft.com/devcontainers/go:1.23-bookworm", + "postCreateCommand": "go mod tidy" +} diff --git a/sdks/go/.github/workflows/ci.yml b/sdks/go/.github/workflows/ci.yml new file mode 100644 index 00000000..aa2972c5 --- /dev/null +++ b/sdks/go/.github/workflows/ci.yml @@ -0,0 +1,49 @@ +name: CI +on: + push: + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' + +jobs: + lint: + timeout-minutes: 10 + name: lint + runs-on: ${{ github.repository == 'stainless-sdks/hypeman-go' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + + steps: + - uses: actions/checkout@v6 + + - name: Setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.mod + + - name: Run lints + run: ./scripts/lint + test: + timeout-minutes: 10 + name: test + runs-on: ${{ github.repository == 'stainless-sdks/hypeman-go' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + steps: + - uses: actions/checkout@v6 + + - name: Setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.mod + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run tests + run: ./scripts/test diff --git a/sdks/go/.github/workflows/update-cli-coverage.yml b/sdks/go/.github/workflows/update-cli-coverage.yml new file mode 100644 index 00000000..1ced9fef --- /dev/null +++ b/sdks/go/.github/workflows/update-cli-coverage.yml @@ -0,0 +1,368 @@ +name: Update CLI Coverage + +on: + push: + branches: + - main + workflow_dispatch: + inputs: + pr_number: + description: 'PR number to use for context (leave empty to use most recent merged PR)' + required: false + type: string + +# Only run one instance at a time; cancel older runs when a new push arrives +concurrency: + group: update-cli-coverage + cancel-in-progress: true + +permissions: + contents: read + +jobs: + update-cli-coverage: + runs-on: ubuntu-latest + steps: + - name: Generate app token + id: app-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.ADMIN_APP_ID }} + private-key: ${{ secrets.ADMIN_APP_PRIVATE_KEY }} + owner: kernel + + - name: Get PR info for manual dispatch + id: pr-info + if: github.event_name == 'workflow_dispatch' + env: + GH_TOKEN: ${{ steps.app-token.outputs.token }} + run: | + if [ -n "${{ inputs.pr_number }}" ]; then + # Use provided PR number + PR_NUMBER="${{ inputs.pr_number }}" + echo "Using provided PR number: $PR_NUMBER" + else + # Get most recent merged PR + PR_NUMBER=$(gh pr list --repo ${{ github.repository }} --state merged --limit 1 --json number --jq '.[0].number') + echo "Using most recent merged PR: $PR_NUMBER" + fi + + if [ -z "$PR_NUMBER" ]; then + echo "No PR found, will use HEAD commit" + echo "has_pr=false" >> $GITHUB_OUTPUT + else + # Get PR details + PR_DATA=$(gh pr view "$PR_NUMBER" --repo ${{ github.repository }} --json mergeCommit,author,title) + MERGE_SHA=$(echo "$PR_DATA" | jq -r '.mergeCommit.oid // empty') + PR_AUTHOR=$(echo "$PR_DATA" | jq -r '.author.login // empty') + PR_TITLE=$(echo "$PR_DATA" | jq -r '.title // empty') + + echo "PR #$PR_NUMBER: $PR_TITLE" + echo "Merge commit: $MERGE_SHA" + echo "Author: $PR_AUTHOR" + + echo "has_pr=true" >> $GITHUB_OUTPUT + echo "pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT + echo "merge_sha=$MERGE_SHA" >> $GITHUB_OUTPUT + echo "pr_author=$PR_AUTHOR" >> $GITHUB_OUTPUT + fi + + - name: Checkout SDK repo + uses: actions/checkout@v4 + with: + fetch-depth: 2 + fetch-tags: true + # For manual dispatch with a specific PR, checkout the merge commit + ref: ${{ steps.pr-info.outputs.merge_sha || github.sha }} + + - name: Install Cursor CLI + run: | + curl https://cursor.com/install -fsS | bash + echo "$HOME/.cursor/bin" >> $GITHUB_PATH + + - name: Configure git identity + run: | + git config --global user.name "kernel-internal[bot]" + git config --global user.email "260533166+kernel-internal[bot]@users.noreply.github.com" + + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version: 'stable' + + - name: Clone API repo + env: + GH_TOKEN: ${{ steps.app-token.outputs.token }} + run: | + gh repo clone kernel/hypeman /tmp/hypeman -- --depth=1 + + - name: Clone CLI repo and checkout existing branch + env: + GH_TOKEN: ${{ steps.app-token.outputs.token }} + run: | + gh repo clone kernel/hypeman-cli /tmp/hypeman-cli + cd /tmp/hypeman-cli + + # Try to fetch the cli-coverage-update branch from remote + if git fetch origin cli-coverage-update 2>/dev/null; then + echo "Branch cli-coverage-update exists, checking it out..." + git checkout cli-coverage-update + # Merge latest main to keep it up to date + git merge origin/main -m "Merge main into cli-coverage-update" --no-edit || true + else + echo "Branch cli-coverage-update does not exist, creating from main..." + git checkout -b cli-coverage-update + fi + + - name: Get SDK version info + id: sdk-version + run: | + # Get the latest tag if available, otherwise use commit SHA + LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "") + if [ -n "$LATEST_TAG" ]; then + echo "version=$LATEST_TAG" >> $GITHUB_OUTPUT + echo "SDK version: $LATEST_TAG" + else + CURRENT_SHA="${{ steps.pr-info.outputs.merge_sha || github.sha }}" + echo "version=$CURRENT_SHA" >> $GITHUB_OUTPUT + echo "SDK version: $CURRENT_SHA (no tag)" + fi + + # Get the module path from go.mod + MODULE_PATH=$(head -1 go.mod | awk '{print $2}') + echo "module=$MODULE_PATH" >> $GITHUB_OUTPUT + echo "SDK module: $MODULE_PATH" + + # Determine the commit author (from PR info for manual dispatch, or from push event) + if [ -n "${{ steps.pr-info.outputs.pr_author }}" ]; then + echo "author=${{ steps.pr-info.outputs.pr_author }}" >> $GITHUB_OUTPUT + else + echo "author=${{ github.event.head_commit.author.username || github.actor }}" >> $GITHUB_OUTPUT + fi + + - name: Update CLI coverage + env: + CURSOR_API_KEY: ${{ secrets.CURSOR_API_KEY }} + GH_TOKEN: ${{ steps.app-token.outputs.token }} + BRANCH_PREFIX: cli-coverage-update + run: | + cursor-agent -p "You are a CLI updater that implements missing CLI commands based on SDK updates. + + The GitHub CLI is available as \`gh\` and authenticated via GH_TOKEN. Git is available. You have write access to the CLI repository (kernel/hypeman-cli). + + # Context + - SDK Repo: ${{ github.repository }} (current directory) + - SDK Module: ${{ steps.sdk-version.outputs.module }} + - SDK Version: ${{ steps.sdk-version.outputs.version }} + - Commit SHA: ${{ steps.pr-info.outputs.merge_sha || github.sha }} + - Commit Author: ${{ steps.sdk-version.outputs.author }} + - Trigger: ${{ github.event_name }} ${{ inputs.pr_number && format('(PR #{0})', inputs.pr_number) || '' }} + - API Repo Location: /tmp/hypeman + - CLI Repo Location: /tmp/hypeman-cli + - Update Branch Prefix: cli-coverage-update + + # Background + The Go SDK (this repo) was just updated by Stainless, and may contain new API methods. The CLI (kernel/hypeman-cli) needs to be updated to expose these new methods as CLI commands. + + # Source Files + - SDK api.md: Current directory - READ THIS FILE FIRST. This is the authoritative list of all SDK methods and their signatures. + - SDK *.go files: Current directory - Contains param structs (e.g., InstanceNewParams, ImageNewParams) with all available options/fields. + - API Spec: /tmp/hypeman/stainless.yaml - SDK configuration with resources and methods + - API Spec: /tmp/hypeman/openapi.yaml - Full OpenAPI specification. CHECK for x-cli-skip: true on endpoints - skip those from CLI coverage. + - CLI: /tmp/hypeman-cli - Existing CLI commands (in pkg/cmd/ directory) + + # CLI Architecture + The CLI uses urfave/cli/v3 (NOT cobra). Commands are defined in /tmp/hypeman-cli/pkg/cmd/: + - Root command: pkg/cmd/cmd.go + - Resource commands: pkg/cmd/{resource}cmd.go (e.g., imagecmd.go, volumecmd.go, devicecmd.go, ingresscmd.go) + - Top-level commands: pkg/cmd/{command}.go (e.g., run.go, ps.go, rm.go, logs.go, exec.go, cp.go) + - Lifecycle commands: pkg/cmd/lifecycle.go (stop, start, standby, restore) + - Build commands: pkg/cmd/build.go + - Utilities: pkg/cmd/cmdutil.go, pkg/cmd/format.go + - Entry point: cmd/hypeman/main.go + + # Task + + ## Step 1: Update SDK Version (ALWAYS DO THIS FIRST) + - Go to /tmp/hypeman-cli + - Update go.mod to require the latest SDK: ${{ steps.sdk-version.outputs.module }}@${{ steps.sdk-version.outputs.version }} + - Run: go get ${{ steps.sdk-version.outputs.module }}@${{ steps.sdk-version.outputs.version }} + - Run: go mod tidy + - This ensures the CLI always uses the latest SDK, even if no new commands are added + + ## Step 2: Full SDK Method Enumeration (CRITICAL - DO NOT SKIP) + You MUST perform a complete enumeration of ALL SDK methods and their parameters. Do NOT rely only on recent commits. + + 2a. Read the api.md file in the SDK repo root. This file lists EVERY SDK method in the format: + - \`client.Resource.Method(ctx, params)\` with links to param types + Extract a complete list of all methods. + + 2b. For EACH SDK method, read the corresponding param type from the Go source files. + For example: + - InstanceNewParams in instance.go -> lists all fields like \`Image\`, \`Region\`, \`VolumeMounts\`, etc. + - ImageNewParams in image.go -> lists all fields like \`Name\`, \`Tag\`, etc. + - VolumeNewParams in volume.go -> lists all fields like \`Name\`, \`Size\`, etc. + Each field in a Params struct represents an option that could be a CLI flag. + + 2c. Build a complete SDK coverage matrix: + | SDK Method | SDK Param Type | SDK Param Fields | + |------------|----------------|------------------| + | client.Instances.New | InstanceNewParams | Image, Region, VolumeMounts, ... | + | client.Instances.List | (none) | | + | client.Images.New | ImageNewParams | Name, Tag, ... | + | client.Volumes.New | VolumeNewParams | Name, Size, ... | + | ... | ... | ... | + + ## Step 3: Full CLI Command Enumeration (CRITICAL - DO NOT SKIP) + Enumerate ALL existing CLI commands and their flags. + + 3a. Look at pkg/cmd/ directory for existing command files + 3b. For each command file, extract: + - The command name/path (e.g., \`hypeman run\`, \`hypeman image list\`) + - All flags defined for that command + 3c. Build a CLI coverage matrix: + | CLI Command | CLI Flags | + |-------------|-----------| + | hypeman run | --name, --image, --quiet, ... | + | hypeman ps | --quiet, --format, ... | + | hypeman image list | --format, ... | + | hypeman volume create | --name, ... | + | ... | ... | + + ## Step 4: Gap Analysis (CRITICAL - DO NOT SKIP) + Compare the SDK matrix (Step 2) with the CLI matrix (Step 3) to identify: + + 4a. Missing commands: SDK methods with NO corresponding CLI command + 4b. Missing flags: SDK param fields with NO corresponding CLI flag + 4c. Create a gap report: + ## Missing Commands + - client.SomeResource.SomeMethod -> needs new CLI command + + ## Missing Flags + - InstanceNewParams.SomeNewField -> \`hypeman run\` needs --some-new-field + - VolumeNewParams.Region -> \`hypeman volume create\` needs --region + + ## Step 5: Implement Missing Coverage + For each gap identified in Step 4: + - Implement missing commands following existing patterns in pkg/cmd/ + - Add missing flags to existing commands + - Use urfave/cli/v3 for command and flag definitions (NOT cobra) + - Run \`go build ./...\` to verify the code compiles + + ## Step 6: Commit and Push + - You should already be on the cli-coverage-update branch (it was checked out during setup if it existed) + - If you're on main, create/switch to the cli-coverage-update branch + - Commit with message describing SDK version bump and any new commands/flags + - IMPORTANT: Do NOT force push! Use regular \`git push origin cli-coverage-update\` to preserve existing work on the branch + - If push fails due to divergence, pull and rebase first: \`git pull --rebase origin cli-coverage-update\` + - Create or update the PR in kernel/hypeman-cli + + # SDK Method -> CLI Command Mapping Guide + Instance operations use Docker-style top-level commands: + - client.Instances.New() -> hypeman run + - client.Instances.List() -> hypeman ps + - client.Instances.Delete() -> hypeman rm + - client.Instances.Get() -> (used internally by other commands) + - client.Instances.Logs() -> hypeman logs + - client.Instances.Stop() -> hypeman stop + - client.Instances.Start() -> hypeman start + - client.Instances.Standby() -> hypeman standby + - client.Instances.Restore() -> hypeman restore + - client.Instances.Stat() -> (used internally by cp) + + Resource group commands use subcommands: + - client.Images.New() -> hypeman image create + - client.Images.List() -> hypeman image list + - client.Images.Get() -> hypeman image get + - client.Images.Delete() -> hypeman image delete + - client.Volumes.New() -> hypeman volume create + - client.Volumes.List() -> hypeman volume list + - client.Volumes.Get() -> hypeman volume get + - client.Volumes.Delete() -> hypeman volume delete + - client.Volumes.NewFromArchive() -> (used internally by push) + - client.Instances.Volumes.Attach() -> hypeman volume attach + - client.Instances.Volumes.Detach() -> hypeman volume detach + - client.Devices.New() -> hypeman device register + - client.Devices.List() -> hypeman device list + - client.Devices.Get() -> hypeman device get + - client.Devices.Delete() -> hypeman device delete + - client.Devices.ListAvailable() -> hypeman device available + - client.Ingresses.New() -> hypeman ingress create + - client.Ingresses.List() -> hypeman ingress list + - client.Ingresses.Get() -> hypeman ingress get + - client.Ingresses.Delete() -> hypeman ingress delete + - client.Builds.New() -> hypeman build create (or used internally) + - client.Builds.List() -> hypeman build list + - client.Builds.Get() -> hypeman build get + - client.Builds.Cancel() -> hypeman build cancel + - client.Builds.Events() -> (used internally for streaming build output) + - client.Resources.Get() -> hypeman resources + - client.Health.Check() -> (internal, no CLI needed) + + # SDK Param Field -> CLI Flag Mapping Guide + - CamelCaseField -> --camel-case-field + - TimeoutSeconds -> --timeout-seconds + - IncludeDeleted -> --include-deleted + - Optional fields use hypeman.Opt() wrapper in SDK calls + + # Implementation Guidelines + - Follow the existing CLI code patterns in /tmp/hypeman-cli/pkg/cmd/ + - Use urfave/cli/v3 for command definitions (cli.Command struct with Flags, Action, etc.) + - Use the Hypeman Go SDK (this repo) for API calls: hypeman.NewClient(getDefaultRequestOptions(cmd)...) + - Use existing helpers: ShowJSON() for output, ResolveInstance() for name resolution, FormatTimeAgo() for timestamps + - Include proper flag definitions with descriptions matching SDK field comments + - Add help text for commands matching SDK method comments + - Handle errors appropriately + - Match the style of existing commands + + # Output Format + After pushing changes, create or update an evergreen PR using gh: + + 1. Check if a PR already exists for the cli-coverage-update branch: + gh pr list --repo kernel/hypeman-cli --head cli-coverage-update --json number + + 2. If PR exists, update it. If not, create a new one. + + If new commands or flags were added: + Title: 'CLI: Update hypeman SDK to and add new commands/flags' + Body: + 'This PR updates the Hypeman Go SDK to ${{ steps.sdk-version.outputs.version }} and adds CLI commands/flags for new SDK methods. + + ## SDK Update + - Updated hypeman-go to ${{ steps.sdk-version.outputs.version }} + + ## Coverage Analysis + This PR was generated by performing a full enumeration of SDK methods and CLI commands. + + ## New Commands + - \`hypeman \` for \`client.Resource.Action()\` + + ## New Flags + - \`--flag-name\` on \`hypeman \` for \`ResourceParams.FieldName\` + + Triggered by: kernel/hypeman-go@${{ steps.pr-info.outputs.merge_sha || github.sha }} + Reviewer: @' + + If only SDK version update (no coverage gaps found): + Title: 'CLI: Update Hypeman Go SDK to ${{ steps.sdk-version.outputs.version }}' + Body: + 'This PR updates the Hypeman Go SDK dependency to the latest version. + + ## SDK Update + - Updated hypeman-go to ${{ steps.sdk-version.outputs.version }} + + ## Coverage Analysis + A full enumeration of SDK methods and CLI commands was performed. No coverage gaps were found. + + Triggered by: kernel/hypeman-go@${{ steps.pr-info.outputs.merge_sha || github.sha }} + Reviewer: @' + + # Constraints + - ALWAYS update the SDK version in go.mod - this is the primary purpose + - ALWAYS perform the full enumeration (Steps 2-4) - this is critical for finding gaps + - ALL SDK methods in api.md MUST have corresponding CLI commands, EXCEPT those marked with x-cli-skip in openapi.yaml or noted as internal-only in the mapping guide above + - SKIP endpoints marked with x-cli-skip: true in openapi.yaml - these are internal endpoints not suitable for CLI + - Streaming methods may have different CLI implementations (e.g., build events are streamed internally) + - Even if no coverage gaps are found, still create a PR for the SDK version bump + - Ensure code compiles before pushing + " --model opus-4.6 --force --output-format=text diff --git a/sdks/go/.gitignore b/sdks/go/.gitignore new file mode 100644 index 00000000..c6d05015 --- /dev/null +++ b/sdks/go/.gitignore @@ -0,0 +1,4 @@ +.prism.log +codegen.log +Brewfile.lock.json +.idea/ diff --git a/sdks/go/.release-please-manifest.json b/sdks/go/.release-please-manifest.json new file mode 100644 index 00000000..d52d2b97 --- /dev/null +++ b/sdks/go/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.13.0" +} \ No newline at end of file diff --git a/sdks/go/.stats.yml b/sdks/go/.stats.yml new file mode 100644 index 00000000..aac8b522 --- /dev/null +++ b/sdks/go/.stats.yml @@ -0,0 +1,4 @@ +configured_endpoints: 37 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/kernel%2Fhypeman-57c5cb3d3dc57a03333475dc1817875a36eead6be9439c0203d1832c32b068d5.yml +openapi_spec_hash: 14f7b09e6e010020f42981dddeb0b415 +config_hash: d452c139da1e46a44a68b91e8a40de72 diff --git a/sdks/go/Brewfile b/sdks/go/Brewfile new file mode 100644 index 00000000..577e34a4 --- /dev/null +++ b/sdks/go/Brewfile @@ -0,0 +1 @@ +brew "go" diff --git a/sdks/go/CHANGELOG.md b/sdks/go/CHANGELOG.md new file mode 100644 index 00000000..da39ffc0 --- /dev/null +++ b/sdks/go/CHANGELOG.md @@ -0,0 +1,202 @@ +# Changelog + +## 0.13.0 (2026-02-26) + +Full Changelog: [v0.12.0...v0.13.0](https://github.com/kernel/hypeman-go/compare/v0.12.0...v0.13.0) + +### Features + +* wire up memory_mb and cpus in builds API ([ed09f5a](https://github.com/kernel/hypeman-go/commit/ed09f5a0bc3c60e7ccf6da1101d23311107c7118)) + +## 0.12.0 (2026-02-26) + +Full Changelog: [v0.11.0...v0.12.0](https://github.com/kernel/hypeman-go/compare/v0.11.0...v0.12.0) + +### Features + +* add metadata and state filtering to GET /instances ([8149c9f](https://github.com/kernel/hypeman-go/commit/8149c9fe5e9b36aa5709767e7f3986d6778bd432)) +* Disable default hotplug memory allocation ([4c65d5c](https://github.com/kernel/hypeman-go/commit/4c65d5c271ac3a620da549b47ece553e1860aaf6)) + + +### Bug Fixes + +* allow canceling a request while it is waiting to retry ([daa2281](https://github.com/kernel/hypeman-go/commit/daa2281e6e9833ae4dba2b9b6870014ceb0b2fff)) +* send query params for NewFromArchive ([a8c45a6](https://github.com/kernel/hypeman-go/commit/a8c45a69e83c96137c772d999a115972f0e6a003)) + + +### Chores + +* **internal:** move custom custom `json` tags to `api` ([d04f6ed](https://github.com/kernel/hypeman-go/commit/d04f6ed70c95357f323aa4e76b3a6ad8ebd12ec3)) +* **internal:** remove mock server code ([b511676](https://github.com/kernel/hypeman-go/commit/b51167627fc0cd0f947633cb8694f4ee0756c268)) +* update mock server docs ([d2ae478](https://github.com/kernel/hypeman-go/commit/d2ae478d46fb67550a3b35d4261218f9368709f0)) + +## 0.11.0 (2026-02-15) + +Full Changelog: [v0.10.0...v0.11.0](https://github.com/kernel/hypeman-go/compare/v0.10.0...v0.11.0) + +### Features + +* Add image_name parameter to builds ([36ea383](https://github.com/kernel/hypeman-go/commit/36ea383988ce81b907a893220bbc983e8143a1d2)) + +## 0.10.0 (2026-02-13) + +Full Changelog: [v0.9.8...v0.10.0](https://github.com/kernel/hypeman-go/compare/v0.9.8...v0.10.0) + +### Features + +* Add metadata field to instances ([8ce4014](https://github.com/kernel/hypeman-go/commit/8ce40145c0f6db8e928efa9c2909521a6d452579)) +* Better stop behavior ([12cfb4e](https://github.com/kernel/hypeman-go/commit/12cfb4e4ddfc198c65b09efeb73c8db0fd609f5f)) + +## 0.9.8 (2026-02-11) + +Full Changelog: [v0.9.7...v0.9.8](https://github.com/kernel/hypeman-go/compare/v0.9.7...v0.9.8) + +## 0.9.7 (2026-02-11) + +Full Changelog: [v0.9.6...v0.9.7](https://github.com/kernel/hypeman-go/compare/v0.9.6...v0.9.7) + +### Bug Fixes + +* **encoder:** correctly serialize NullStruct ([e693834](https://github.com/kernel/hypeman-go/commit/e693834704b3541d4a5f260b547026bae8a19b1b)) + + +### Refactors + +* cross-platform foundation for macOS support ([8adc4f3](https://github.com/kernel/hypeman-go/commit/8adc4f38026abee34ad85c15509e90f47644a0d0)) + +## 0.9.6 (2026-01-30) + +Full Changelog: [v0.9.0...v0.9.6](https://github.com/kernel/hypeman-go/compare/v0.9.0...v0.9.6) + +### Features + +* add boot time optimizations for faster VM startup ([3992761](https://github.com/kernel/hypeman-go/commit/3992761e3ad8ebb0cc22fb7408199b068e9d8013)) +* Add to stainless config new API endpoints ([de008e8](https://github.com/kernel/hypeman-go/commit/de008e89fadbaedde6554181618fb03c71b49465)) +* **api:** manual updates ([f60e600](https://github.com/kernel/hypeman-go/commit/f60e60015bb9ce18c7083963d9ecd11c980de495)) +* **builds:** implement two-tier build cache with per-repo token scopes ([0e29d03](https://github.com/kernel/hypeman-go/commit/0e29d03d94cf50a0d0e83c323f7ed9f2e15f3e61)) +* **client:** add a convenient param.SetJSON helper ([7fea166](https://github.com/kernel/hypeman-go/commit/7fea1660f3d17d8a35f5d2f6aa352b553785624b)) +* Use resources module for input validation ([af678e8](https://github.com/kernel/hypeman-go/commit/af678e8c794307a6bd47476acff3ca42a7a52546)) + +## 0.9.0 (2026-01-05) + +Full Changelog: [v0.8.0...v0.9.0](https://github.com/kernel/hypeman-go/compare/v0.8.0...v0.9.0) + +### Features + +* QEMU support ([d708091](https://github.com/kernel/hypeman-go/commit/d70809169d136df3f1efbf961f2a90084e1f9fa5)) +* Resource accounting ([4141287](https://github.com/kernel/hypeman-go/commit/414128770e8137ed2a40d404f0f4ac06ea1a0731)) + +## 0.8.0 (2025-12-23) + +Full Changelog: [v0.7.0...v0.8.0](https://github.com/kernel/hypeman-go/compare/v0.7.0...v0.8.0) + +### Features + +* add hypeman cp for file copy to/from running VMs ([49ea898](https://github.com/kernel/hypeman-go/commit/49ea89852eed5e0893febc4c68d295a0d1a8bfe5)) +* **encoder:** support bracket encoding form-data object members ([8ab31e8](https://github.com/kernel/hypeman-go/commit/8ab31e89c70baa967842c1c160d0b49db44b089a)) +* gpu passthrough ([067a01b](https://github.com/kernel/hypeman-go/commit/067a01b4ac06e82c2db6b165127144afa18a691d)) + + +### Bug Fixes + +* skip usage tests that don't work with Prism ([d62b246](https://github.com/kernel/hypeman-go/commit/d62b2466715247e7d083ab7ef33040e5da036bd8)) + + +### Chores + +* add float64 to valid types for RegisterFieldValidator ([b4666fd](https://github.com/kernel/hypeman-go/commit/b4666fd1bfcdd17b0a4d4bf88541670cd40c8b1c)) + +## 0.7.0 (2025-12-11) + +Full Changelog: [v0.6.0...v0.7.0](https://github.com/kernel/hypeman-go/compare/v0.6.0...v0.7.0) + +### Features + +* Operational logs over API: hypeman.log, vmm.log ([ec614f5](https://github.com/kernel/hypeman-go/commit/ec614f5bdc0e110f31cec905d6deb7f1d460305b)) +* Support TLS for ingress ([973a5d8](https://github.com/kernel/hypeman-go/commit/973a5d8b65601e70801ed4570f76980d01c92198)) + + +### Bug Fixes + +* incorrect reporting of Stopped, add better error reporting ([dc27cbd](https://github.com/kernel/hypeman-go/commit/dc27cbdc7985c1db74b19501f1eb7a5da6442041)) + +## 0.6.0 (2025-12-06) + +Full Changelog: [v0.5.0...v0.6.0](https://github.com/kernel/hypeman-go/compare/v0.5.0...v0.6.0) + +### Features + +* Start and Stop VM ([b992228](https://github.com/kernel/hypeman-go/commit/b99222818b197010ba324c2e2477047e5bf13802)) + + +### Bug Fixes + +* **mcp:** correct code tool API endpoint ([0d87152](https://github.com/kernel/hypeman-go/commit/0d8715273698dab9bb6c276352a13605ddd272a5)) +* rename param to avoid collision ([f1ec9d5](https://github.com/kernel/hypeman-go/commit/f1ec9d52e3f5f6c8398bdded04a4ed9cfbd8151b)) + + +### Chores + +* elide duplicate aliases ([9be276f](https://github.com/kernel/hypeman-go/commit/9be276faa6d683ddffe3a21c969b44f13acface0)) +* **internal:** codegen related update ([f3de06d](https://github.com/kernel/hypeman-go/commit/f3de06d220faf866b70829862cd1b76ee4e8fbf8)) + +## 0.5.0 (2025-12-05) + +Full Changelog: [v0.4.0...v0.5.0](https://github.com/kernel/hypeman-go/compare/v0.4.0...v0.5.0) + +### Features + +* add Push and PushImage functions for OCI registry push ([7417cc8](https://github.com/kernel/hypeman-go/commit/7417cc8a56c7d11c535ac7ab9a7b3d21d80bd2b4)) +* Ingress ([c751d1a](https://github.com/kernel/hypeman-go/commit/c751d1a6bba5ca619c03f833f27251c6d3b855a7)) +* Initialize volume with data ([32d4047](https://github.com/kernel/hypeman-go/commit/32d404746df0a3e9d83e7651105e6c6daa16476f)) +* try to fix name collision in codegen ([8173a73](https://github.com/kernel/hypeman-go/commit/8173a73d0317d35870d5a3cec8f3fdec56fcf362)) +* Volume readonly multi-attach ([bac3fd2](https://github.com/kernel/hypeman-go/commit/bac3fd2cee3325dc3d1b31e6077ad1f1ce13340c)) +* Volumes ([099f9b8](https://github.com/kernel/hypeman-go/commit/099f9b8a2553087e117c8c8a9731900081d713f0)) + +## 0.4.0 (2025-11-26) + +Full Changelog: [v0.3.0...v0.4.0](https://github.com/kernel/hypeman-go/compare/v0.3.0...v0.4.0) + +### Features + +* Generate log streaming ([f444c22](https://github.com/kernel/hypeman-go/commit/f444c22bd9eb0ad06e66b3ca167171ddec2836e4)) + +## 0.3.0 (2025-11-26) + +Full Changelog: [v0.2.0...v0.3.0](https://github.com/kernel/hypeman-go/compare/v0.2.0...v0.3.0) + +### Features + +* Remove exec from openapi spec ([ee8d1bb](https://github.com/kernel/hypeman-go/commit/ee8d1bb586a130c0b6629603ca4edb489f671889)) + +## 0.2.0 (2025-11-26) + +Full Changelog: [v0.1.0...v0.2.0](https://github.com/kernel/hypeman-go/compare/v0.1.0...v0.2.0) + +### Features + +* **api:** add exec ([f3992ff](https://github.com/kernel/hypeman-go/commit/f3992ffe807e7006a25ae2211cd5cb25fb599bff)) + +## 0.1.0 (2025-11-26) + +Full Changelog: [v0.0.3...v0.1.0](https://github.com/kernel/hypeman-go/compare/v0.0.3...v0.1.0) + +### Features + +* Network manager ([7864aba](https://github.com/kernel/hypeman-go/commit/7864abadad29bcfbb61d2c35a7135ef2407d6c47)) + +## 0.0.3 (2025-11-19) + +Full Changelog: [v0.0.2...v0.0.3](https://github.com/kernel/hypeman-go/compare/v0.0.2...v0.0.3) + +### Bug Fixes + +* **client:** correctly specify Accept header with */* instead of empty ([ac1a646](https://github.com/kernel/hypeman-go/commit/ac1a64697c333aecdc6a463fe760b99635ba8b72)) + +## 0.0.2 (2025-11-11) + +Full Changelog: [v0.0.1...v0.0.2](https://github.com/kernel/hypeman-go/compare/v0.0.1...v0.0.2) + +### Chores + +* update SDK settings ([ecdeb35](https://github.com/kernel/hypeman-go/commit/ecdeb354a1d6a82a1d2afc1742ca02b25eb3218f)) diff --git a/sdks/go/CONTRIBUTING.md b/sdks/go/CONTRIBUTING.md new file mode 100644 index 00000000..7feadf47 --- /dev/null +++ b/sdks/go/CONTRIBUTING.md @@ -0,0 +1,59 @@ +## Setting up the environment + +To set up the repository, run: + +```sh +$ ./scripts/bootstrap +$ ./scripts/lint +``` + +This will install all the required dependencies and build the SDK. + +You can also [install go 1.22+ manually](https://go.dev/doc/install). + +## Modifying/Adding code + +Most of the SDK is generated code. Modifications to code will be persisted between generations, but may +result in merge conflicts between manual patches and changes from the generator. The generator will never +modify the contents of the `lib/` and `examples/` directories. + +## Adding and running examples + +All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. + +```go +# add an example to examples//main.go + +package main + +func main() { + // ... +} +``` + +```sh +$ go run ./examples/ +``` + +## Using the repository from source + +To use a local version of this library from source in another project, edit the `go.mod` with a replace +directive. This can be done through the CLI with the following: + +```sh +$ go mod edit -replace github.com/kernel/hypeman-go=/path/to/hypeman-go +``` + +## Running tests + +```sh +$ ./scripts/test +``` + +## Formatting + +This library uses the standard gofmt code formatter: + +```sh +$ ./scripts/format +``` diff --git a/sdks/go/LICENSE b/sdks/go/LICENSE new file mode 100644 index 00000000..d8dff65d --- /dev/null +++ b/sdks/go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2026 Hypeman + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sdks/go/README.md b/sdks/go/README.md new file mode 100644 index 00000000..bc895a29 --- /dev/null +++ b/sdks/go/README.md @@ -0,0 +1,515 @@ +# Hypeman Go API Library + + + +Go Reference + + + +The Hypeman Go library provides convenient access to the Hypeman REST API +from applications written in Go. + +It is generated with [Stainless](https://www.stainless.com/). + +## Installation + + + +```go +import ( + "github.com/kernel/hypeman-go" // imported as hypeman +) +``` + + + +Or to pin the version: + + + +```sh +go get -u 'github.com/kernel/hypeman-go@v0.13.0' +``` + + + +## Requirements + +This library requires Go 1.22+. + +## Usage + +The full API of this library can be found in [api.md](api.md). + +```go +package main + +import ( + "context" + "fmt" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/option" +) + +func main() { + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), // defaults to os.LookupEnv("HYPEMAN_API_KEY") + ) + response, err := client.Health.Check(context.TODO()) + if err != nil { + panic(err.Error()) + } + fmt.Printf("%+v\n", response.Status) +} + +``` + +### Request fields + +The hypeman library uses the [`omitzero`](https://tip.golang.org/doc/go1.24#encodingjsonpkgencodingjson) +semantics from the Go 1.24+ `encoding/json` release for request fields. + +Required primitive fields (`int64`, `string`, etc.) feature the tag \`json:"...,required"\`. These +fields are always serialized, even their zero values. + +Optional primitive types are wrapped in a `param.Opt[T]`. These fields can be set with the provided constructors, `hypeman.String(string)`, `hypeman.Int(int64)`, etc. + +Any `param.Opt[T]`, map, slice, struct or string enum uses the +tag \`json:"...,omitzero"\`. Its zero value is considered omitted. + +The `param.IsOmitted(any)` function can confirm the presence of any `omitzero` field. + +```go +p := hypeman.ExampleParams{ + ID: "id_xxx", // required property + Name: hypeman.String("..."), // optional property + + Point: hypeman.Point{ + X: 0, // required field will serialize as 0 + Y: hypeman.Int(1), // optional field will serialize as 1 + // ... omitted non-required fields will not be serialized + }, + + Origin: hypeman.Origin{}, // the zero value of [Origin] is considered omitted +} +``` + +To send `null` instead of a `param.Opt[T]`, use `param.Null[T]()`. +To send `null` instead of a struct `T`, use `param.NullStruct[T]()`. + +```go +p.Name = param.Null[string]() // 'null' instead of string +p.Point = param.NullStruct[Point]() // 'null' instead of struct + +param.IsNull(p.Name) // true +param.IsNull(p.Point) // true +``` + +Request structs contain a `.SetExtraFields(map[string]any)` method which can send non-conforming +fields in the request body. Extra fields overwrite any struct fields with a matching +key. For security reasons, only use `SetExtraFields` with trusted data. + +To send a custom value instead of a struct, use `param.Override[T](value)`. + +```go +// In cases where the API specifies a given type, +// but you want to send something else, use [SetExtraFields]: +p.SetExtraFields(map[string]any{ + "x": 0.01, // send "x" as a float instead of int +}) + +// Send a number instead of an object +custom := param.Override[hypeman.FooParams](12) +``` + +### Request unions + +Unions are represented as a struct with fields prefixed by "Of" for each of its variants, +only one field can be non-zero. The non-zero field will be serialized. + +Sub-properties of the union can be accessed via methods on the union struct. +These methods return a mutable pointer to the underlying data, if present. + +```go +// Only one field can be non-zero, use param.IsOmitted() to check if a field is set +type AnimalUnionParam struct { + OfCat *Cat `json:",omitzero,inline` + OfDog *Dog `json:",omitzero,inline` +} + +animal := AnimalUnionParam{ + OfCat: &Cat{ + Name: "Whiskers", + Owner: PersonParam{ + Address: AddressParam{Street: "3333 Coyote Hill Rd", Zip: 0}, + }, + }, +} + +// Mutating a field +if address := animal.GetOwner().GetAddress(); address != nil { + address.ZipCode = 94304 +} +``` + +### Response objects + +All fields in response structs are ordinary value types (not pointers or wrappers). +Response structs also include a special `JSON` field containing metadata about +each property. + +```go +type Animal struct { + Name string `json:"name,nullable"` + Owners int `json:"owners"` + Age int `json:"age"` + JSON struct { + Name respjson.Field + Owner respjson.Field + Age respjson.Field + ExtraFields map[string]respjson.Field + } `json:"-"` +} +``` + +To handle optional data, use the `.Valid()` method on the JSON field. +`.Valid()` returns true if a field is not `null`, not present, or couldn't be marshaled. + +If `.Valid()` is false, the corresponding field will simply be its zero value. + +```go +raw := `{"owners": 1, "name": null}` + +var res Animal +json.Unmarshal([]byte(raw), &res) + +// Accessing regular fields + +res.Owners // 1 +res.Name // "" +res.Age // 0 + +// Optional field checks + +res.JSON.Owners.Valid() // true +res.JSON.Name.Valid() // false +res.JSON.Age.Valid() // false + +// Raw JSON values + +res.JSON.Owners.Raw() // "1" +res.JSON.Name.Raw() == "null" // true +res.JSON.Name.Raw() == respjson.Null // true +res.JSON.Age.Raw() == "" // true +res.JSON.Age.Raw() == respjson.Omitted // true +``` + +These `.JSON` structs also include an `ExtraFields` map containing +any properties in the json response that were not specified +in the struct. This can be useful for API features not yet +present in the SDK. + +```go +body := res.JSON.ExtraFields["my_unexpected_field"].Raw() +``` + +### Response Unions + +In responses, unions are represented by a flattened struct containing all possible fields from each of the +object variants. +To convert it to a variant use the `.AsFooVariant()` method or the `.AsAny()` method if present. + +If a response value union contains primitive values, primitive fields will be alongside +the properties but prefixed with `Of` and feature the tag `json:"...,inline"`. + +```go +type AnimalUnion struct { + // From variants [Dog], [Cat] + Owner Person `json:"owner"` + // From variant [Dog] + DogBreed string `json:"dog_breed"` + // From variant [Cat] + CatBreed string `json:"cat_breed"` + // ... + + JSON struct { + Owner respjson.Field + // ... + } `json:"-"` +} + +// If animal variant +if animal.Owner.Address.ZipCode == "" { + panic("missing zip code") +} + +// Switch on the variant +switch variant := animal.AsAny().(type) { +case Dog: +case Cat: +default: + panic("unexpected type") +} +``` + +### RequestOptions + +This library uses the functional options pattern. Functions defined in the +`option` package return a `RequestOption`, which is a closure that mutates a +`RequestConfig`. These options can be supplied to the client or at individual +requests. For example: + +```go +client := hypeman.NewClient( + // Adds a header to every request made by the client + option.WithHeader("X-Some-Header", "custom_header_info"), +) + +client.Health.Check(context.TODO(), ..., + // Override the header + option.WithHeader("X-Some-Header", "some_other_custom_header_info"), + // Add an undocumented field to the request body, using sjson syntax + option.WithJSONSet("some.json.path", map[string]string{"my": "object"}), +) +``` + +The request option `option.WithDebugLog(nil)` may be helpful while debugging. + +See the [full list of request options](https://pkg.go.dev/github.com/kernel/hypeman-go/option). + +### Pagination + +This library provides some conveniences for working with paginated list endpoints. + +You can use `.ListAutoPaging()` methods to iterate through items across all pages: + +Or you can use simple `.List()` methods to fetch a single page and receive a standard response object +with additional helper methods like `.GetNextPage()`, e.g.: + +### Errors + +When the API returns a non-success status code, we return an error with type +`*hypeman.Error`. This contains the `StatusCode`, `*http.Request`, and +`*http.Response` values of the request, as well as the JSON of the error body +(much like other response objects in the SDK). + +To handle errors, we recommend that you use the `errors.As` pattern: + +```go +_, err := client.Health.Check(context.TODO()) +if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + println(string(apierr.DumpRequest(true))) // Prints the serialized HTTP request + println(string(apierr.DumpResponse(true))) // Prints the serialized HTTP response + } + panic(err.Error()) // GET "/health": 400 Bad Request { ... } +} +``` + +When other errors occur, they are returned unwrapped; for example, +if HTTP transport fails, you might receive `*url.Error` wrapping `*net.OpError`. + +### Timeouts + +Requests do not time out by default; use context to configure a timeout for a request lifecycle. + +Note that if a request is [retried](#retries), the context timeout does not start over. +To set a per-retry timeout, use `option.WithRequestTimeout()`. + +```go +// This sets the timeout for the request, including all the retries. +ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) +defer cancel() +client.Health.Check( + ctx, + // This sets the per-retry timeout + option.WithRequestTimeout(20*time.Second), +) +``` + +### File uploads + +Request parameters that correspond to file uploads in multipart requests are typed as +`io.Reader`. The contents of the `io.Reader` will by default be sent as a multipart form +part with the file name of "anonymous_file" and content-type of "application/octet-stream". + +The file name and content-type can be customized by implementing `Name() string` or `ContentType() +string` on the run-time type of `io.Reader`. Note that `os.File` implements `Name() string`, so a +file returned by `os.Open` will be sent with the file name on disk. + +We also provide a helper `hypeman.File(reader io.Reader, filename string, contentType string)` +which can be used to wrap any `io.Reader` with the appropriate file name and content type. + +```go +// A file from the file system +file, err := os.Open("/path/to/file") +hypeman.BuildNewParams{ + Source: file, +} + +// A file from a string +hypeman.BuildNewParams{ + Source: strings.NewReader("my file contents"), +} + +// With a custom filename and contentType +hypeman.BuildNewParams{ + Source: hypeman.File(strings.NewReader(`{"hello": "foo"}`), "file.go", "application/json"), +} +``` + +### Retries + +Certain errors will be automatically retried 2 times by default, with a short exponential backoff. +We retry by default all connection errors, 408 Request Timeout, 409 Conflict, 429 Rate Limit, +and >=500 Internal errors. + +You can use the `WithMaxRetries` option to configure or disable this: + +```go +// Configure the default for all requests: +client := hypeman.NewClient( + option.WithMaxRetries(0), // default is 2 +) + +// Override per-request: +client.Health.Check(context.TODO(), option.WithMaxRetries(5)) +``` + +### Accessing raw response data (e.g. response headers) + +You can access the raw HTTP response data by using the `option.WithResponseInto()` request option. This is useful when +you need to examine response headers, status codes, or other details. + +```go +// Create a variable to store the HTTP response +var response *http.Response +response, err := client.Health.Check(context.TODO(), option.WithResponseInto(&response)) +if err != nil { + // handle error +} +fmt.Printf("%+v\n", response) + +fmt.Printf("Status Code: %d\n", response.StatusCode) +fmt.Printf("Headers: %+#v\n", response.Header) +``` + +### Making custom/undocumented requests + +This library is typed for convenient access to the documented API. If you need to access undocumented +endpoints, params, or response properties, the library can still be used. + +#### Undocumented endpoints + +To make requests to undocumented endpoints, you can use `client.Get`, `client.Post`, and other HTTP verbs. +`RequestOptions` on the client, such as retries, will be respected when making these requests. + +```go +var ( + // params can be an io.Reader, a []byte, an encoding/json serializable object, + // or a "…Params" struct defined in this library. + params map[string]any + + // result can be an []byte, *http.Response, a encoding/json deserializable object, + // or a model defined in this library. + result *http.Response +) +err := client.Post(context.Background(), "/unspecified", params, &result) +if err != nil { + … +} +``` + +#### Undocumented request params + +To make requests using undocumented parameters, you may use either the `option.WithQuerySet()` +or the `option.WithJSONSet()` methods. + +```go +params := FooNewParams{ + ID: "id_xxxx", + Data: FooNewParamsData{ + FirstName: hypeman.String("John"), + }, +} +client.Foo.New(context.Background(), params, option.WithJSONSet("data.last_name", "Doe")) +``` + +#### Undocumented response properties + +To access undocumented response properties, you may either access the raw JSON of the response as a string +with `result.JSON.RawJSON()`, or get the raw JSON of a particular field on the result with +`result.JSON.Foo.Raw()`. + +Any fields that are not present on the response struct will be saved and can be accessed by `result.JSON.ExtraFields()` which returns the extra fields as a `map[string]Field`. + +### Middleware + +We provide `option.WithMiddleware` which applies the given +middleware to requests. + +```go +func Logger(req *http.Request, next option.MiddlewareNext) (res *http.Response, err error) { + // Before the request + start := time.Now() + LogReq(req) + + // Forward the request to the next handler + res, err = next(req) + + // Handle stuff after the request + end := time.Now() + LogRes(res, err, start - end) + + return res, err +} + +client := hypeman.NewClient( + option.WithMiddleware(Logger), +) +``` + +When multiple middlewares are provided as variadic arguments, the middlewares +are applied left to right. If `option.WithMiddleware` is given +multiple times, for example first in the client then the method, the +middleware in the client will run first and the middleware given in the method +will run next. + +You may also replace the default `http.Client` with +`option.WithHTTPClient(client)`. Only one http client is +accepted (this overwrites any previous client) and receives requests after any +middleware has been applied. + +## Semantic versioning + +This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: + +1. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_ +2. Changes that we do not expect to impact the vast majority of users in practice. + +We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. + +We are keen for your feedback; please open an [issue](https://www.github.com/kernel/hypeman-go/issues) with questions, bugs, or suggestions. + +## Development + +### Testing Preview Branches + +When developing features in the main [hypeman](https://github.com/kernel/hypeman) repo, Stainless automatically creates preview branches in `stainless-sdks/hypeman-go` with your API changes. You can check out these branches locally to test the SDK changes: + +```bash +# Checkout preview/ (e.g., if working on "devices" branch in hypeman) +./scripts/checkout-preview devices + +# Checkout an exact branch name +./scripts/checkout-preview -b main +./scripts/checkout-preview -b preview/my-feature +``` + +The script automatically adds the `stainless` remote if it doesn't exist. + +## Contributing + +See [the contributing documentation](./CONTRIBUTING.md). diff --git a/sdks/go/SECURITY.md b/sdks/go/SECURITY.md new file mode 100644 index 00000000..94a5b008 --- /dev/null +++ b/sdks/go/SECURITY.md @@ -0,0 +1,23 @@ +# Security Policy + +## Reporting Security Issues + +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. + +To report a security issue, please contact the Stainless team at security@stainless.com. + +## Responsible Disclosure + +We appreciate the efforts of security researchers and individuals who help us maintain the security of +SDKs we generate. If you believe you have found a security vulnerability, please adhere to responsible +disclosure practices by allowing us a reasonable amount of time to investigate and address the issue +before making any information public. + +## Reporting Non-SDK Related Security Issues + +If you encounter security issues that are not directly related to SDKs but pertain to the services +or products provided by Hypeman, please follow the respective company's security reporting guidelines. + +--- + +Thank you for helping us keep the SDKs and systems they interact with secure. diff --git a/sdks/go/aliases.go b/sdks/go/aliases.go new file mode 100644 index 00000000..1eefe6d2 --- /dev/null +++ b/sdks/go/aliases.go @@ -0,0 +1,16 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "github.com/kernel/hypeman-go/internal/apierror" + "github.com/kernel/hypeman-go/packages/param" +) + +// aliased to make [param.APIUnion] private when embedding +type paramUnion = param.APIUnion + +// aliased to make [param.APIObject] private when embedding +type paramObj = param.APIObject + +type Error = apierror.Error diff --git a/sdks/go/api.md b/sdks/go/api.md new file mode 100644 index 00000000..dfd33e23 --- /dev/null +++ b/sdks/go/api.md @@ -0,0 +1,140 @@ +# Health + +Response Types: + +- hypeman.HealthCheckResponse + +Methods: + +- client.Health.Check(ctx context.Context) (\*hypeman.HealthCheckResponse, error) + +# Images + +Response Types: + +- hypeman.Image + +Methods: + +- client.Images.New(ctx context.Context, body hypeman.ImageNewParams) (\*hypeman.Image, error) +- client.Images.List(ctx context.Context) (\*[]hypeman.Image, error) +- client.Images.Delete(ctx context.Context, name string) error +- client.Images.Get(ctx context.Context, name string) (\*hypeman.Image, error) + +# Instances + +Params Types: + +- hypeman.VolumeMountParam + +Response Types: + +- hypeman.Instance +- hypeman.PathInfo +- hypeman.VolumeMount + +Methods: + +- client.Instances.New(ctx context.Context, body hypeman.InstanceNewParams) (\*hypeman.Instance, error) +- client.Instances.List(ctx context.Context, query hypeman.InstanceListParams) (\*[]hypeman.Instance, error) +- client.Instances.Delete(ctx context.Context, id string) error +- client.Instances.Get(ctx context.Context, id string) (\*hypeman.Instance, error) +- client.Instances.Logs(ctx context.Context, id string, query hypeman.InstanceLogsParams) (\*string, error) +- client.Instances.Restore(ctx context.Context, id string) (\*hypeman.Instance, error) +- client.Instances.Standby(ctx context.Context, id string) (\*hypeman.Instance, error) +- client.Instances.Start(ctx context.Context, id string, body hypeman.InstanceStartParams) (\*hypeman.Instance, error) +- client.Instances.Stat(ctx context.Context, id string, query hypeman.InstanceStatParams) (\*hypeman.PathInfo, error) +- client.Instances.Stop(ctx context.Context, id string) (\*hypeman.Instance, error) + +## Volumes + +Methods: + +- client.Instances.Volumes.Attach(ctx context.Context, volumeID string, params hypeman.InstanceVolumeAttachParams) (\*hypeman.Instance, error) +- client.Instances.Volumes.Detach(ctx context.Context, volumeID string, body hypeman.InstanceVolumeDetachParams) (\*hypeman.Instance, error) + +# Volumes + +Response Types: + +- hypeman.Volume +- hypeman.VolumeAttachment + +Methods: + +- client.Volumes.New(ctx context.Context, body hypeman.VolumeNewParams) (\*hypeman.Volume, error) +- client.Volumes.List(ctx context.Context) (\*[]hypeman.Volume, error) +- client.Volumes.Delete(ctx context.Context, id string) error +- client.Volumes.NewFromArchive(ctx context.Context, body io.Reader, params hypeman.VolumeNewFromArchiveParams) (\*hypeman.Volume, error) +- client.Volumes.Get(ctx context.Context, id string) (\*hypeman.Volume, error) + +# Devices + +Response Types: + +- hypeman.AvailableDevice +- hypeman.Device +- hypeman.DeviceType + +Methods: + +- client.Devices.New(ctx context.Context, body hypeman.DeviceNewParams) (\*hypeman.Device, error) +- client.Devices.Get(ctx context.Context, id string) (\*hypeman.Device, error) +- client.Devices.List(ctx context.Context) (\*[]hypeman.Device, error) +- client.Devices.Delete(ctx context.Context, id string) error +- client.Devices.ListAvailable(ctx context.Context) (\*[]hypeman.AvailableDevice, error) + +# Ingresses + +Params Types: + +- hypeman.IngressMatchParam +- hypeman.IngressRuleParam +- hypeman.IngressTargetParam + +Response Types: + +- hypeman.Ingress +- hypeman.IngressMatch +- hypeman.IngressRule +- hypeman.IngressTarget + +Methods: + +- client.Ingresses.New(ctx context.Context, body hypeman.IngressNewParams) (\*hypeman.Ingress, error) +- client.Ingresses.List(ctx context.Context) (\*[]hypeman.Ingress, error) +- client.Ingresses.Delete(ctx context.Context, id string) error +- client.Ingresses.Get(ctx context.Context, id string) (\*hypeman.Ingress, error) + +# Resources + +Response Types: + +- hypeman.DiskBreakdown +- hypeman.GPUProfile +- hypeman.GPUResourceStatus +- hypeman.PassthroughDevice +- hypeman.ResourceAllocation +- hypeman.ResourceStatus +- hypeman.Resources + +Methods: + +- client.Resources.Get(ctx context.Context) (\*hypeman.Resources, error) + +# Builds + +Response Types: + +- hypeman.Build +- hypeman.BuildEvent +- hypeman.BuildProvenance +- hypeman.BuildStatus + +Methods: + +- client.Builds.New(ctx context.Context, body hypeman.BuildNewParams) (\*hypeman.Build, error) +- client.Builds.List(ctx context.Context) (\*[]hypeman.Build, error) +- client.Builds.Cancel(ctx context.Context, id string) error +- client.Builds.Events(ctx context.Context, id string, query hypeman.BuildEventsParams) (\*hypeman.BuildEvent, error) +- client.Builds.Get(ctx context.Context, id string) (\*hypeman.Build, error) diff --git a/sdks/go/build.go b/sdks/go/build.go new file mode 100644 index 00000000..48de63b1 --- /dev/null +++ b/sdks/go/build.go @@ -0,0 +1,304 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + "slices" + "time" + + "github.com/kernel/hypeman-go/internal/apiform" + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/internal/apiquery" + "github.com/kernel/hypeman-go/internal/requestconfig" + "github.com/kernel/hypeman-go/option" + "github.com/kernel/hypeman-go/packages/param" + "github.com/kernel/hypeman-go/packages/respjson" + "github.com/kernel/hypeman-go/packages/ssestream" +) + +// BuildService contains methods and other services that help with interacting with +// the hypeman API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBuildService] method instead. +type BuildService struct { + Options []option.RequestOption +} + +// NewBuildService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewBuildService(opts ...option.RequestOption) (r BuildService) { + r = BuildService{} + r.Options = opts + return +} + +// Creates a new build job. Source code should be uploaded as a tar.gz archive in +// the multipart form data. +func (r *BuildService) New(ctx context.Context, body BuildNewParams, opts ...option.RequestOption) (res *Build, err error) { + opts = slices.Concat(r.Options, opts) + path := "builds" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// List builds +func (r *BuildService) List(ctx context.Context, opts ...option.RequestOption) (res *[]Build, err error) { + opts = slices.Concat(r.Options, opts) + path := "builds" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Cancel build +func (r *BuildService) Cancel(ctx context.Context, id string, opts ...option.RequestOption) (err error) { + opts = slices.Concat(r.Options, opts) + opts = append([]option.RequestOption{option.WithHeader("Accept", "*/*")}, opts...) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("builds/%s", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, nil, opts...) + return +} + +// Streams build events as Server-Sent Events. Events include: +// +// - `log`: Build log lines with timestamp and content +// - `status`: Build status changes (queued→building→pushing→ready/failed) +// - `heartbeat`: Keep-alive events sent every 30s to prevent connection timeouts +// +// Returns existing logs as events, then continues streaming if follow=true. +func (r *BuildService) EventsStreaming(ctx context.Context, id string, query BuildEventsParams, opts ...option.RequestOption) (stream *ssestream.Stream[BuildEvent]) { + var ( + raw *http.Response + err error + ) + opts = slices.Concat(r.Options, opts) + opts = append([]option.RequestOption{option.WithHeader("Accept", "text/event-stream")}, opts...) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("builds/%s/events", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, query, &raw, opts...) + return ssestream.NewStream[BuildEvent](ssestream.NewDecoder(raw), err) +} + +// Get build details +func (r *BuildService) Get(ctx context.Context, id string, opts ...option.RequestOption) (res *Build, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("builds/%s", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +type Build struct { + // Build job identifier + ID string `json:"id" api:"required"` + // Build creation timestamp + CreatedAt time.Time `json:"created_at" api:"required" format:"date-time"` + // Build job status + // + // Any of "queued", "building", "pushing", "ready", "failed", "cancelled". + Status BuildStatus `json:"status" api:"required"` + // Instance ID of the builder VM (for debugging) + BuilderInstanceID string `json:"builder_instance_id" api:"nullable"` + // Build completion timestamp + CompletedAt time.Time `json:"completed_at" api:"nullable" format:"date-time"` + // Build duration in milliseconds + DurationMs int64 `json:"duration_ms" api:"nullable"` + // Error message (only when status is failed) + Error string `json:"error" api:"nullable"` + // Digest of built image (only when status is ready) + ImageDigest string `json:"image_digest" api:"nullable"` + // Full image reference (only when status is ready) + ImageRef string `json:"image_ref" api:"nullable"` + Provenance BuildProvenance `json:"provenance"` + // Position in build queue (only when status is queued) + QueuePosition int64 `json:"queue_position" api:"nullable"` + // Build start timestamp + StartedAt time.Time `json:"started_at" api:"nullable" format:"date-time"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + ID respjson.Field + CreatedAt respjson.Field + Status respjson.Field + BuilderInstanceID respjson.Field + CompletedAt respjson.Field + DurationMs respjson.Field + Error respjson.Field + ImageDigest respjson.Field + ImageRef respjson.Field + Provenance respjson.Field + QueuePosition respjson.Field + StartedAt respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r Build) RawJSON() string { return r.JSON.raw } +func (r *Build) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type BuildEvent struct { + // Event timestamp + Timestamp time.Time `json:"timestamp" api:"required" format:"date-time"` + // Event type + // + // Any of "log", "status", "heartbeat". + Type BuildEventType `json:"type" api:"required"` + // Log line content (only for type=log) + Content string `json:"content"` + // New build status (only for type=status) + // + // Any of "queued", "building", "pushing", "ready", "failed", "cancelled". + Status BuildStatus `json:"status"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + Timestamp respjson.Field + Type respjson.Field + Content respjson.Field + Status respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r BuildEvent) RawJSON() string { return r.JSON.raw } +func (r *BuildEvent) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// Event type +type BuildEventType string + +const ( + BuildEventTypeLog BuildEventType = "log" + BuildEventTypeStatus BuildEventType = "status" + BuildEventTypeHeartbeat BuildEventType = "heartbeat" +) + +type BuildProvenance struct { + // Pinned base image digest used + BaseImageDigest string `json:"base_image_digest"` + // BuildKit version used + BuildkitVersion string `json:"buildkit_version"` + // Map of lockfile names to SHA256 hashes + LockfileHashes map[string]string `json:"lockfile_hashes"` + // SHA256 hash of source tarball + SourceHash string `json:"source_hash"` + // Build completion timestamp + Timestamp time.Time `json:"timestamp" format:"date-time"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + BaseImageDigest respjson.Field + BuildkitVersion respjson.Field + LockfileHashes respjson.Field + SourceHash respjson.Field + Timestamp respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r BuildProvenance) RawJSON() string { return r.JSON.raw } +func (r *BuildProvenance) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// Build job status +type BuildStatus string + +const ( + BuildStatusQueued BuildStatus = "queued" + BuildStatusBuilding BuildStatus = "building" + BuildStatusPushing BuildStatus = "pushing" + BuildStatusReady BuildStatus = "ready" + BuildStatusFailed BuildStatus = "failed" + BuildStatusCancelled BuildStatus = "cancelled" +) + +type BuildNewParams struct { + // Source tarball (tar.gz) containing application code and optionally a Dockerfile + Source io.Reader `json:"source,omitzero" api:"required" format:"binary"` + // Optional pinned base image digest + BaseImageDigest param.Opt[string] `json:"base_image_digest,omitzero"` + // Tenant-specific cache key prefix + CacheScope param.Opt[string] `json:"cache_scope,omitzero"` + // Number of vCPUs for builder VM (default 2) + CPUs param.Opt[int64] `json:"cpus,omitzero"` + // Dockerfile content. Required if not included in the source tarball. + Dockerfile param.Opt[string] `json:"dockerfile,omitzero"` + // Global cache identifier (e.g., "node", "python", "ubuntu", "browser"). When + // specified, the build will import from cache/global/{key}. Admin builds will also + // export to this location. + GlobalCacheKey param.Opt[string] `json:"global_cache_key,omitzero"` + // Custom image name for the build output. When set, the image is pushed to + // {registry}/{image_name} instead of {registry}/builds/{id}. + ImageName param.Opt[string] `json:"image_name,omitzero"` + // Set to "true" to grant push access to global cache (operator-only). Admin builds + // can populate the shared global cache that all tenant builds read from. + IsAdminBuild param.Opt[string] `json:"is_admin_build,omitzero"` + // Memory limit for builder VM in MB (default 2048) + MemoryMB param.Opt[int64] `json:"memory_mb,omitzero"` + // JSON array of secret references to inject during build. Each object has "id" + // (required) for use with --mount=type=secret,id=... Example: [{"id": + // "npm_token"}, {"id": "github_token"}] + Secrets param.Opt[string] `json:"secrets,omitzero"` + // Build timeout (default 600) + TimeoutSeconds param.Opt[int64] `json:"timeout_seconds,omitzero"` + paramObj +} + +func (r BuildNewParams) MarshalMultipart() (data []byte, contentType string, err error) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + err = apiform.MarshalRoot(r, writer) + if err == nil { + err = apiform.WriteExtras(writer, r.ExtraFields()) + } + if err != nil { + writer.Close() + return nil, "", err + } + err = writer.Close() + if err != nil { + return nil, "", err + } + return buf.Bytes(), writer.FormDataContentType(), nil +} + +type BuildEventsParams struct { + // Continue streaming new events after initial output + Follow param.Opt[bool] `query:"follow,omitzero" json:"-"` + paramObj +} + +// URLQuery serializes [BuildEventsParams]'s query parameters as `url.Values`. +func (r BuildEventsParams) URLQuery() (v url.Values, err error) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} diff --git a/sdks/go/build_test.go b/sdks/go/build_test.go new file mode 100644 index 00000000..250c1e3d --- /dev/null +++ b/sdks/go/build_test.go @@ -0,0 +1,120 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman_test + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "testing" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/internal/testutil" + "github.com/kernel/hypeman-go/option" +) + +func TestBuildNewWithOptionalParams(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Builds.New(context.TODO(), hypeman.BuildNewParams{ + Source: io.Reader(bytes.NewBuffer([]byte("some file contents"))), + BaseImageDigest: hypeman.String("base_image_digest"), + CacheScope: hypeman.String("cache_scope"), + CPUs: hypeman.Int(0), + Dockerfile: hypeman.String("dockerfile"), + GlobalCacheKey: hypeman.String("global_cache_key"), + ImageName: hypeman.String("image_name"), + IsAdminBuild: hypeman.String("is_admin_build"), + MemoryMB: hypeman.Int(0), + Secrets: hypeman.String("secrets"), + TimeoutSeconds: hypeman.Int(0), + }) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBuildList(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Builds.List(context.TODO()) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBuildCancel(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + err := client.Builds.Cancel(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBuildGet(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Builds.Get(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/sdks/go/client.go b/sdks/go/client.go new file mode 100644 index 00000000..eeef91ea --- /dev/null +++ b/sdks/go/client.go @@ -0,0 +1,131 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "context" + "net/http" + "os" + "slices" + + "github.com/kernel/hypeman-go/internal/requestconfig" + "github.com/kernel/hypeman-go/option" +) + +// Client creates a struct with services and top level methods that help with +// interacting with the hypeman API. You should not instantiate this client +// directly, and instead use the [NewClient] method instead. +type Client struct { + Options []option.RequestOption + Health HealthService + Images ImageService + Instances InstanceService + Volumes VolumeService + Devices DeviceService + Ingresses IngressService + Resources ResourceService + Builds BuildService +} + +// DefaultClientOptions read from the environment (HYPEMAN_API_KEY, +// HYPEMAN_BASE_URL). This should be used to initialize new clients. +func DefaultClientOptions() []option.RequestOption { + defaults := []option.RequestOption{option.WithEnvironmentProduction()} + if o, ok := os.LookupEnv("HYPEMAN_BASE_URL"); ok { + defaults = append(defaults, option.WithBaseURL(o)) + } + if o, ok := os.LookupEnv("HYPEMAN_API_KEY"); ok { + defaults = append(defaults, option.WithAPIKey(o)) + } + return defaults +} + +// NewClient generates a new client with the default option read from the +// environment (HYPEMAN_API_KEY, HYPEMAN_BASE_URL). The option passed in as +// arguments are applied after these default arguments, and all option will be +// passed down to the services and requests that this client makes. +func NewClient(opts ...option.RequestOption) (r Client) { + opts = append(DefaultClientOptions(), opts...) + + r = Client{Options: opts} + + r.Health = NewHealthService(opts...) + r.Images = NewImageService(opts...) + r.Instances = NewInstanceService(opts...) + r.Volumes = NewVolumeService(opts...) + r.Devices = NewDeviceService(opts...) + r.Ingresses = NewIngressService(opts...) + r.Resources = NewResourceService(opts...) + r.Builds = NewBuildService(opts...) + + return +} + +// Execute makes a request with the given context, method, URL, request params, +// response, and request options. This is useful for hitting undocumented endpoints +// while retaining the base URL, auth, retries, and other options from the client. +// +// If a byte slice or an [io.Reader] is supplied to params, it will be used as-is +// for the request body. +// +// The params is by default serialized into the body using [encoding/json]. If your +// type implements a MarshalJSON function, it will be used instead to serialize the +// request. If a URLQuery method is implemented, the returned [url.Values] will be +// used as query strings to the url. +// +// If your params struct uses [param.Field], you must provide either [MarshalJSON], +// [URLQuery], and/or [MarshalForm] functions. It is undefined behavior to use a +// struct uses [param.Field] without specifying how it is serialized. +// +// Any "…Params" object defined in this library can be used as the request +// argument. Note that 'path' arguments will not be forwarded into the url. +// +// The response body will be deserialized into the res variable, depending on its +// type: +// +// - A pointer to a [*http.Response] is populated by the raw response. +// - A pointer to a byte array will be populated with the contents of the request +// body. +// - A pointer to any other type uses this library's default JSON decoding, which +// respects UnmarshalJSON if it is defined on the type. +// - A nil value will not read the response body. +// +// For even greater flexibility, see [option.WithResponseInto] and +// [option.WithResponseBodyInto]. +func (r *Client) Execute(ctx context.Context, method string, path string, params any, res any, opts ...option.RequestOption) error { + opts = slices.Concat(r.Options, opts) + return requestconfig.ExecuteNewRequest(ctx, method, path, params, res, opts...) +} + +// Get makes a GET request with the given URL, params, and optionally deserializes +// to a response. See [Execute] documentation on the params and response. +func (r *Client) Get(ctx context.Context, path string, params any, res any, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodGet, path, params, res, opts...) +} + +// Post makes a POST request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Post(ctx context.Context, path string, params any, res any, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPost, path, params, res, opts...) +} + +// Put makes a PUT request with the given URL, params, and optionally deserializes +// to a response. See [Execute] documentation on the params and response. +func (r *Client) Put(ctx context.Context, path string, params any, res any, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPut, path, params, res, opts...) +} + +// Patch makes a PATCH request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Patch(ctx context.Context, path string, params any, res any, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPatch, path, params, res, opts...) +} + +// Delete makes a DELETE request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Delete(ctx context.Context, path string, params any, res any, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodDelete, path, params, res, opts...) +} diff --git a/sdks/go/client_test.go b/sdks/go/client_test.go new file mode 100644 index 00000000..cd17cef0 --- /dev/null +++ b/sdks/go/client_test.go @@ -0,0 +1,351 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman_test + +import ( + "context" + "fmt" + "io" + "net/http" + "reflect" + "testing" + "time" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/internal" + "github.com/kernel/hypeman-go/option" +) + +type closureTransport struct { + fn func(req *http.Request) (*http.Response, error) +} + +func (t *closureTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.fn(req) +} + +func TestUserAgentHeader(t *testing.T) { + var userAgent string + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + userAgent = req.Header.Get("User-Agent") + return &http.Response{ + StatusCode: http.StatusOK, + }, nil + }, + }, + }), + ) + client.Health.Check(context.Background()) + if userAgent != fmt.Sprintf("Hypeman/Go %s", internal.PackageVersion) { + t.Errorf("Expected User-Agent to be correct, but got: %#v", userAgent) + } +} + +func TestRetryAfter(t *testing.T) { + retryCountHeaders := make([]string, 0) + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + retryCountHeaders = append(retryCountHeaders, req.Header.Get("X-Stainless-Retry-Count")) + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After"): []string{"0.1"}, + }, + }, nil + }, + }, + }), + ) + _, err := client.Health.Check(context.Background()) + if err == nil { + t.Error("Expected there to be a cancel error") + } + + attempts := len(retryCountHeaders) + if attempts != 3 { + t.Errorf("Expected %d attempts, got %d", 3, attempts) + } + + expectedRetryCountHeaders := []string{"0", "1", "2"} + if !reflect.DeepEqual(retryCountHeaders, expectedRetryCountHeaders) { + t.Errorf("Expected %v retry count headers, got %v", expectedRetryCountHeaders, retryCountHeaders) + } +} + +func TestDeleteRetryCountHeader(t *testing.T) { + retryCountHeaders := make([]string, 0) + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + retryCountHeaders = append(retryCountHeaders, req.Header.Get("X-Stainless-Retry-Count")) + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After"): []string{"0.1"}, + }, + }, nil + }, + }, + }), + option.WithHeaderDel("X-Stainless-Retry-Count"), + ) + _, err := client.Health.Check(context.Background()) + if err == nil { + t.Error("Expected there to be a cancel error") + } + + expectedRetryCountHeaders := []string{"", "", ""} + if !reflect.DeepEqual(retryCountHeaders, expectedRetryCountHeaders) { + t.Errorf("Expected %v retry count headers, got %v", expectedRetryCountHeaders, retryCountHeaders) + } +} + +func TestOverwriteRetryCountHeader(t *testing.T) { + retryCountHeaders := make([]string, 0) + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + retryCountHeaders = append(retryCountHeaders, req.Header.Get("X-Stainless-Retry-Count")) + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After"): []string{"0.1"}, + }, + }, nil + }, + }, + }), + option.WithHeader("X-Stainless-Retry-Count", "42"), + ) + _, err := client.Health.Check(context.Background()) + if err == nil { + t.Error("Expected there to be a cancel error") + } + + expectedRetryCountHeaders := []string{"42", "42", "42"} + if !reflect.DeepEqual(retryCountHeaders, expectedRetryCountHeaders) { + t.Errorf("Expected %v retry count headers, got %v", expectedRetryCountHeaders, retryCountHeaders) + } +} + +func TestRetryAfterMs(t *testing.T) { + attempts := 0 + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + attempts++ + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After-Ms"): []string{"100"}, + }, + }, nil + }, + }, + }), + ) + _, err := client.Health.Check(context.Background()) + if err == nil { + t.Error("Expected there to be a cancel error") + } + if want := 3; attempts != want { + t.Errorf("Expected %d attempts, got %d", want, attempts) + } +} + +func TestContextCancel(t *testing.T) { + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + cancelCtx, cancel := context.WithCancel(context.Background()) + cancel() + _, err := client.Health.Check(cancelCtx) + if err == nil { + t.Error("Expected there to be a cancel error") + } +} + +func TestContextCancelDelay(t *testing.T) { + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + cancelCtx, cancel := context.WithTimeout(context.Background(), 2*time.Millisecond) + defer cancel() + _, err := client.Health.Check(cancelCtx) + if err == nil { + t.Error("expected there to be a cancel error") + } +} + +func TestContextDeadline(t *testing.T) { + testTimeout := time.After(3 * time.Second) + testDone := make(chan struct{}) + + deadline := time.Now().Add(100 * time.Millisecond) + deadlineCtx, cancel := context.WithDeadline(context.Background(), deadline) + defer cancel() + + go func() { + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + _, err := client.Health.Check(deadlineCtx) + if err == nil { + t.Error("expected there to be a deadline error") + } + close(testDone) + }() + + select { + case <-testTimeout: + t.Fatal("client didn't finish in time") + case <-testDone: + if diff := time.Since(deadline); diff < -30*time.Millisecond || 30*time.Millisecond < diff { + t.Fatalf("client did not return within 30ms of context deadline, got %s", diff) + } + } +} + +func TestContextDeadlineStreaming(t *testing.T) { + testTimeout := time.After(3 * time.Second) + testDone := make(chan struct{}) + + deadline := time.Now().Add(100 * time.Millisecond) + deadlineCtx, cancel := context.WithDeadline(context.Background(), deadline) + defer cancel() + + go func() { + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Status: "200 OK", + Body: io.NopCloser( + io.Reader(readerFunc(func([]byte) (int, error) { + <-req.Context().Done() + return 0, req.Context().Err() + })), + ), + }, nil + }, + }, + }), + ) + stream := client.Instances.LogsStreaming( + deadlineCtx, + "id", + hypeman.InstanceLogsParams{}, + ) + for stream.Next() { + _ = stream.Current() + } + if stream.Err() == nil { + t.Error("expected there to be a deadline error") + } + close(testDone) + }() + + select { + case <-testTimeout: + t.Fatal("client didn't finish in time") + case <-testDone: + if diff := time.Since(deadline); diff < -30*time.Millisecond || 30*time.Millisecond < diff { + t.Fatalf("client did not return within 30ms of context deadline, got %s", diff) + } + } +} + +func TestContextDeadlineStreamingWithRequestTimeout(t *testing.T) { + testTimeout := time.After(3 * time.Second) + testDone := make(chan struct{}) + deadline := time.Now().Add(100 * time.Millisecond) + + go func() { + client := hypeman.NewClient( + option.WithAPIKey("My API Key"), + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Status: "200 OK", + Body: io.NopCloser( + io.Reader(readerFunc(func([]byte) (int, error) { + <-req.Context().Done() + return 0, req.Context().Err() + })), + ), + }, nil + }, + }, + }), + ) + stream := client.Instances.LogsStreaming( + context.Background(), + "id", + hypeman.InstanceLogsParams{}, + option.WithRequestTimeout((100 * time.Millisecond)), + ) + for stream.Next() { + _ = stream.Current() + } + if stream.Err() == nil { + t.Error("expected there to be a deadline error") + } + close(testDone) + }() + + select { + case <-testTimeout: + t.Fatal("client didn't finish in time") + case <-testDone: + if diff := time.Since(deadline); diff < -30*time.Millisecond || 30*time.Millisecond < diff { + t.Fatalf("client did not return within 30ms of context deadline, got %s", diff) + } + } +} + +type readerFunc func([]byte) (int, error) + +func (f readerFunc) Read(p []byte) (int, error) { return f(p) } +func (f readerFunc) Close() error { return nil } diff --git a/sdks/go/device.go b/sdks/go/device.go new file mode 100644 index 00000000..c754587d --- /dev/null +++ b/sdks/go/device.go @@ -0,0 +1,198 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "context" + "errors" + "fmt" + "net/http" + "slices" + "time" + + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/internal/requestconfig" + "github.com/kernel/hypeman-go/option" + "github.com/kernel/hypeman-go/packages/param" + "github.com/kernel/hypeman-go/packages/respjson" +) + +// DeviceService contains methods and other services that help with interacting +// with the hypeman API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewDeviceService] method instead. +type DeviceService struct { + Options []option.RequestOption +} + +// NewDeviceService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewDeviceService(opts ...option.RequestOption) (r DeviceService) { + r = DeviceService{} + r.Options = opts + return +} + +// Register a device for passthrough +func (r *DeviceService) New(ctx context.Context, body DeviceNewParams, opts ...option.RequestOption) (res *Device, err error) { + opts = slices.Concat(r.Options, opts) + path := "devices" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Get device details +func (r *DeviceService) Get(ctx context.Context, id string, opts ...option.RequestOption) (res *Device, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("devices/%s", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// List registered devices +func (r *DeviceService) List(ctx context.Context, opts ...option.RequestOption) (res *[]Device, err error) { + opts = slices.Concat(r.Options, opts) + path := "devices" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Unregister device +func (r *DeviceService) Delete(ctx context.Context, id string, opts ...option.RequestOption) (err error) { + opts = slices.Concat(r.Options, opts) + opts = append([]option.RequestOption{option.WithHeader("Accept", "*/*")}, opts...) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("devices/%s", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, nil, opts...) + return +} + +// Discover passthrough-capable devices on host +func (r *DeviceService) ListAvailable(ctx context.Context, opts ...option.RequestOption) (res *[]AvailableDevice, err error) { + opts = slices.Concat(r.Options, opts) + path := "devices/available" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +type AvailableDevice struct { + // PCI device ID (hex) + DeviceID string `json:"device_id" api:"required"` + // IOMMU group number + IommuGroup int64 `json:"iommu_group" api:"required"` + // PCI address + PciAddress string `json:"pci_address" api:"required"` + // PCI vendor ID (hex) + VendorID string `json:"vendor_id" api:"required"` + // Currently bound driver (null if none) + CurrentDriver string `json:"current_driver" api:"nullable"` + // Human-readable device name + DeviceName string `json:"device_name"` + // Human-readable vendor name + VendorName string `json:"vendor_name"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + DeviceID respjson.Field + IommuGroup respjson.Field + PciAddress respjson.Field + VendorID respjson.Field + CurrentDriver respjson.Field + DeviceName respjson.Field + VendorName respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r AvailableDevice) RawJSON() string { return r.JSON.raw } +func (r *AvailableDevice) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type Device struct { + // Auto-generated unique identifier (CUID2 format) + ID string `json:"id" api:"required"` + // Whether the device is currently bound to the vfio-pci driver, which is required + // for VM passthrough. + // + // - true: Device is bound to vfio-pci and ready for (or currently in use by) a VM. + // The device's native driver has been unloaded. + // - false: Device is using its native driver (e.g., nvidia) or no driver. Hypeman + // will automatically bind to vfio-pci when attaching to an instance. + BoundToVfio bool `json:"bound_to_vfio" api:"required"` + // Registration timestamp (RFC3339) + CreatedAt time.Time `json:"created_at" api:"required" format:"date-time"` + // PCI device ID (hex) + DeviceID string `json:"device_id" api:"required"` + // IOMMU group number + IommuGroup int64 `json:"iommu_group" api:"required"` + // PCI address + PciAddress string `json:"pci_address" api:"required"` + // Type of PCI device + // + // Any of "gpu", "pci". + Type DeviceType `json:"type" api:"required"` + // PCI vendor ID (hex) + VendorID string `json:"vendor_id" api:"required"` + // Instance ID if attached + AttachedTo string `json:"attached_to" api:"nullable"` + // Device name (user-provided or auto-generated from PCI address) + Name string `json:"name"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + ID respjson.Field + BoundToVfio respjson.Field + CreatedAt respjson.Field + DeviceID respjson.Field + IommuGroup respjson.Field + PciAddress respjson.Field + Type respjson.Field + VendorID respjson.Field + AttachedTo respjson.Field + Name respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r Device) RawJSON() string { return r.JSON.raw } +func (r *Device) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// Type of PCI device +type DeviceType string + +const ( + DeviceTypeGPU DeviceType = "gpu" + DeviceTypePci DeviceType = "pci" +) + +type DeviceNewParams struct { + // PCI address of the device (required, e.g., "0000:a2:00.0") + PciAddress string `json:"pci_address" api:"required"` + // Optional globally unique device name. If not provided, a name is auto-generated + // from the PCI address (e.g., "pci-0000-a2-00-0") + Name param.Opt[string] `json:"name,omitzero"` + paramObj +} + +func (r DeviceNewParams) MarshalJSON() (data []byte, err error) { + type shadow DeviceNewParams + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *DeviceNewParams) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} diff --git a/sdks/go/device_test.go b/sdks/go/device_test.go new file mode 100644 index 00000000..1c092572 --- /dev/null +++ b/sdks/go/device_test.go @@ -0,0 +1,132 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/internal/testutil" + "github.com/kernel/hypeman-go/option" +) + +func TestDeviceNewWithOptionalParams(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Devices.New(context.TODO(), hypeman.DeviceNewParams{ + PciAddress: "0000:a2:00.0", + Name: hypeman.String("l4-gpu"), + }) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestDeviceGet(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Devices.Get(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestDeviceList(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Devices.List(context.TODO()) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestDeviceDelete(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + err := client.Devices.Delete(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestDeviceListAvailable(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Devices.ListAvailable(context.TODO()) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/sdks/go/examples/.keep b/sdks/go/examples/.keep new file mode 100644 index 00000000..d8c73e93 --- /dev/null +++ b/sdks/go/examples/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store example files demonstrating usage of this SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/sdks/go/examples/push/main.go b/sdks/go/examples/push/main.go new file mode 100644 index 00000000..f5a17478 --- /dev/null +++ b/sdks/go/examples/push/main.go @@ -0,0 +1,100 @@ +// Example: Push a local Docker image to hypeman +// +// This example demonstrates how to push images to hypeman's registry using the SDK. +// It shows three approaches: +// 1. Push from local Docker daemon (most common for development) +// 2. Push from a remote registry (pull and push) +// 3. Push using PushFromURL helper (standalone scripts) +// +// Usage: +// +// export HYPEMAN_API_KEY="your-jwt-token" +// export HYPEMAN_BASE_URL="http://localhost:8080" +// go run ./examples/push myapp:latest +package main + +import ( + "context" + "fmt" + "os" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + hypeman "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/lib" +) + +func main() { + if len(os.Args) < 2 { + fmt.Fprintln(os.Stderr, "Usage: push [target-name]") + fmt.Fprintln(os.Stderr, " image: Local Docker image reference (e.g., myapp:latest)") + fmt.Fprintln(os.Stderr, " target-name: Optional name in hypeman (defaults to image)") + os.Exit(1) + } + + sourceImage := os.Args[1] + targetName := sourceImage + if len(os.Args) > 2 { + targetName = os.Args[2] + } + + ctx := context.Background() + + // Create a hypeman client (reads HYPEMAN_API_KEY and HYPEMAN_BASE_URL from env) + client := hypeman.NewClient() + + // Extract push configuration from client options + cfg, err := lib.ExtractPushConfig(client.Options) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + + fmt.Printf("Pushing %s to hypeman as %s...\n", sourceImage, targetName) + + // Push from local Docker daemon + err = lib.Push(ctx, cfg, sourceImage, targetName) + if err != nil { + fmt.Fprintf(os.Stderr, "Push failed: %v\n", err) + os.Exit(1) + } + + fmt.Println("Push successful!") +} + +// Example: Push from a remote registry (pull from Docker Hub, push to hypeman) +func examplePushFromRemote(ctx context.Context, cfg lib.PushConfig) error { + // Pull from Docker Hub + ref, err := name.ParseReference("docker.io/library/alpine:latest") + if err != nil { + return err + } + + img, err := remote.Image(ref) + if err != nil { + return err + } + + // Push to hypeman + return lib.PushImage(ctx, cfg, img, "alpine:latest") +} + +// Example: Using PushFromURL for standalone scripts +func examplePushFromURL(ctx context.Context) error { + baseURL := os.Getenv("HYPEMAN_BASE_URL") + apiKey := os.Getenv("HYPEMAN_API_KEY") + + // Pull image from remote + ref, err := name.ParseReference("docker.io/library/nginx:alpine") + if err != nil { + return err + } + + img, err := remote.Image(ref) + if err != nil { + return err + } + + // Push directly using URL (no client needed) + return lib.PushFromURL(ctx, baseURL, apiKey, img, "nginx:alpine") +} diff --git a/sdks/go/field.go b/sdks/go/field.go new file mode 100644 index 00000000..2051a0bd --- /dev/null +++ b/sdks/go/field.go @@ -0,0 +1,45 @@ +package hypeman + +import ( + "github.com/kernel/hypeman-go/packages/param" + "io" + "time" +) + +func String(s string) param.Opt[string] { return param.NewOpt(s) } +func Int(i int64) param.Opt[int64] { return param.NewOpt(i) } +func Bool(b bool) param.Opt[bool] { return param.NewOpt(b) } +func Float(f float64) param.Opt[float64] { return param.NewOpt(f) } +func Time(t time.Time) param.Opt[time.Time] { return param.NewOpt(t) } + +func Opt[T comparable](v T) param.Opt[T] { return param.NewOpt(v) } +func Ptr[T any](v T) *T { return &v } + +func IntPtr(v int64) *int64 { return &v } +func BoolPtr(v bool) *bool { return &v } +func FloatPtr(v float64) *float64 { return &v } +func StringPtr(v string) *string { return &v } +func TimePtr(v time.Time) *time.Time { return &v } + +func File(rdr io.Reader, filename string, contentType string) file { + return file{rdr, filename, contentType} +} + +type file struct { + io.Reader + name string + contentType string +} + +func (f file) Filename() string { + if f.name != "" { + return f.name + } else if named, ok := f.Reader.(interface{ Name() string }); ok { + return named.Name() + } + return "" +} + +func (f file) ContentType() string { + return f.contentType +} diff --git a/sdks/go/go.mod b/sdks/go/go.mod new file mode 100644 index 00000000..68dc94ac --- /dev/null +++ b/sdks/go/go.mod @@ -0,0 +1,57 @@ +module github.com/kernel/hypeman-go + +go 1.24.0 + +require ( + github.com/google/go-containerregistry v0.20.7 + github.com/gorilla/websocket v1.5.3 + github.com/stretchr/testify v1.11.1 + github.com/tidwall/gjson v1.18.0 + github.com/tidwall/sjson v1.2.5 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.9.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/sdks/go/go.sum b/sdks/go/go.sum new file mode 100644 index 00000000..fa2fd63a --- /dev/null +++ b/sdks/go/go.sum @@ -0,0 +1,142 @@ +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= diff --git a/sdks/go/health.go b/sdks/go/health.go new file mode 100644 index 00000000..9e6cc0f3 --- /dev/null +++ b/sdks/go/health.go @@ -0,0 +1,64 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "context" + "net/http" + "slices" + + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/internal/requestconfig" + "github.com/kernel/hypeman-go/option" + "github.com/kernel/hypeman-go/packages/respjson" +) + +// HealthService contains methods and other services that help with interacting +// with the hypeman API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewHealthService] method instead. +type HealthService struct { + Options []option.RequestOption +} + +// NewHealthService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewHealthService(opts ...option.RequestOption) (r HealthService) { + r = HealthService{} + r.Options = opts + return +} + +// Health check +func (r *HealthService) Check(ctx context.Context, opts ...option.RequestOption) (res *HealthCheckResponse, err error) { + opts = slices.Concat(r.Options, opts) + path := "health" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +type HealthCheckResponse struct { + // Any of "ok". + Status HealthCheckResponseStatus `json:"status" api:"required"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + Status respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r HealthCheckResponse) RawJSON() string { return r.JSON.raw } +func (r *HealthCheckResponse) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type HealthCheckResponseStatus string + +const ( + HealthCheckResponseStatusOk HealthCheckResponseStatus = "ok" +) diff --git a/sdks/go/health_test.go b/sdks/go/health_test.go new file mode 100644 index 00000000..2b588c83 --- /dev/null +++ b/sdks/go/health_test.go @@ -0,0 +1,37 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/internal/testutil" + "github.com/kernel/hypeman-go/option" +) + +func TestHealthCheck(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Health.Check(context.TODO()) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/sdks/go/image.go b/sdks/go/image.go new file mode 100644 index 00000000..5c116487 --- /dev/null +++ b/sdks/go/image.go @@ -0,0 +1,152 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "context" + "errors" + "fmt" + "net/http" + "slices" + "time" + + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/internal/requestconfig" + "github.com/kernel/hypeman-go/option" + "github.com/kernel/hypeman-go/packages/param" + "github.com/kernel/hypeman-go/packages/respjson" +) + +// ImageService contains methods and other services that help with interacting with +// the hypeman API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewImageService] method instead. +type ImageService struct { + Options []option.RequestOption +} + +// NewImageService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewImageService(opts ...option.RequestOption) (r ImageService) { + r = ImageService{} + r.Options = opts + return +} + +// Pull and convert OCI image +func (r *ImageService) New(ctx context.Context, body ImageNewParams, opts ...option.RequestOption) (res *Image, err error) { + opts = slices.Concat(r.Options, opts) + path := "images" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// List images +func (r *ImageService) List(ctx context.Context, opts ...option.RequestOption) (res *[]Image, err error) { + opts = slices.Concat(r.Options, opts) + path := "images" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Delete image +func (r *ImageService) Delete(ctx context.Context, name string, opts ...option.RequestOption) (err error) { + opts = slices.Concat(r.Options, opts) + opts = append([]option.RequestOption{option.WithHeader("Accept", "*/*")}, opts...) + if name == "" { + err = errors.New("missing required name parameter") + return + } + path := fmt.Sprintf("images/%s", name) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, nil, opts...) + return +} + +// Get image details +func (r *ImageService) Get(ctx context.Context, name string, opts ...option.RequestOption) (res *Image, err error) { + opts = slices.Concat(r.Options, opts) + if name == "" { + err = errors.New("missing required name parameter") + return + } + path := fmt.Sprintf("images/%s", name) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +type Image struct { + // Creation timestamp (RFC3339) + CreatedAt time.Time `json:"created_at" api:"required" format:"date-time"` + // Resolved manifest digest + Digest string `json:"digest" api:"required"` + // Normalized OCI image reference (tag or digest) + Name string `json:"name" api:"required"` + // Build status + // + // Any of "pending", "pulling", "converting", "ready", "failed". + Status ImageStatus `json:"status" api:"required"` + // CMD from container metadata + Cmd []string `json:"cmd" api:"nullable"` + // Entrypoint from container metadata + Entrypoint []string `json:"entrypoint" api:"nullable"` + // Environment variables from container metadata + Env map[string]string `json:"env"` + // Error message if status is failed + Error string `json:"error" api:"nullable"` + // Position in build queue (null if not queued) + QueuePosition int64 `json:"queue_position" api:"nullable"` + // Disk size in bytes (null until ready) + SizeBytes int64 `json:"size_bytes" api:"nullable"` + // Working directory from container metadata + WorkingDir string `json:"working_dir" api:"nullable"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + CreatedAt respjson.Field + Digest respjson.Field + Name respjson.Field + Status respjson.Field + Cmd respjson.Field + Entrypoint respjson.Field + Env respjson.Field + Error respjson.Field + QueuePosition respjson.Field + SizeBytes respjson.Field + WorkingDir respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r Image) RawJSON() string { return r.JSON.raw } +func (r *Image) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// Build status +type ImageStatus string + +const ( + ImageStatusPending ImageStatus = "pending" + ImageStatusPulling ImageStatus = "pulling" + ImageStatusConverting ImageStatus = "converting" + ImageStatusReady ImageStatus = "ready" + ImageStatusFailed ImageStatus = "failed" +) + +type ImageNewParams struct { + // OCI image reference (e.g., docker.io/library/nginx:latest) + Name string `json:"name" api:"required"` + paramObj +} + +func (r ImageNewParams) MarshalJSON() (data []byte, err error) { + type shadow ImageNewParams + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *ImageNewParams) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} diff --git a/sdks/go/image_test.go b/sdks/go/image_test.go new file mode 100644 index 00000000..e799946d --- /dev/null +++ b/sdks/go/image_test.go @@ -0,0 +1,108 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/internal/testutil" + "github.com/kernel/hypeman-go/option" +) + +func TestImageNew(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Images.New(context.TODO(), hypeman.ImageNewParams{ + Name: "docker.io/library/nginx:latest", + }) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestImageList(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Images.List(context.TODO()) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestImageDelete(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + err := client.Images.Delete(context.TODO(), "name") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestImageGet(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Images.Get(context.TODO(), "name") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/sdks/go/ingress.go b/sdks/go/ingress.go new file mode 100644 index 00000000..860f31c8 --- /dev/null +++ b/sdks/go/ingress.go @@ -0,0 +1,296 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "slices" + "time" + + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/internal/requestconfig" + "github.com/kernel/hypeman-go/option" + "github.com/kernel/hypeman-go/packages/param" + "github.com/kernel/hypeman-go/packages/respjson" +) + +// IngressService contains methods and other services that help with interacting +// with the hypeman API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewIngressService] method instead. +type IngressService struct { + Options []option.RequestOption +} + +// NewIngressService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewIngressService(opts ...option.RequestOption) (r IngressService) { + r = IngressService{} + r.Options = opts + return +} + +// Create ingress +func (r *IngressService) New(ctx context.Context, body IngressNewParams, opts ...option.RequestOption) (res *Ingress, err error) { + opts = slices.Concat(r.Options, opts) + path := "ingresses" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// List ingresses +func (r *IngressService) List(ctx context.Context, opts ...option.RequestOption) (res *[]Ingress, err error) { + opts = slices.Concat(r.Options, opts) + path := "ingresses" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Delete ingress +func (r *IngressService) Delete(ctx context.Context, id string, opts ...option.RequestOption) (err error) { + opts = slices.Concat(r.Options, opts) + opts = append([]option.RequestOption{option.WithHeader("Accept", "*/*")}, opts...) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("ingresses/%s", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, nil, opts...) + return +} + +// Get ingress details +func (r *IngressService) Get(ctx context.Context, id string, opts ...option.RequestOption) (res *Ingress, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("ingresses/%s", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +type Ingress struct { + // Auto-generated unique identifier + ID string `json:"id" api:"required"` + // Creation timestamp (RFC3339) + CreatedAt time.Time `json:"created_at" api:"required" format:"date-time"` + // Human-readable name + Name string `json:"name" api:"required"` + // Routing rules for this ingress + Rules []IngressRule `json:"rules" api:"required"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + ID respjson.Field + CreatedAt respjson.Field + Name respjson.Field + Rules respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r Ingress) RawJSON() string { return r.JSON.raw } +func (r *Ingress) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type IngressMatch struct { + // Hostname to match. Can be: + // + // - Literal: "api.example.com" (exact match on Host header) + // - Pattern: "{instance}.example.com" (dynamic routing based on subdomain) + // + // Pattern hostnames use named captures in curly braces (e.g., {instance}, {app}) + // that extract parts of the hostname for routing. The extracted values can be + // referenced in the target.instance field. + Hostname string `json:"hostname" api:"required"` + // Host port to listen on for this rule (default 80) + Port int64 `json:"port"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + Hostname respjson.Field + Port respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r IngressMatch) RawJSON() string { return r.JSON.raw } +func (r *IngressMatch) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// ToParam converts this IngressMatch to a IngressMatchParam. +// +// Warning: the fields of the param type will not be present. ToParam should only +// be used at the last possible moment before sending a request. Test for this with +// IngressMatchParam.Overrides() +func (r IngressMatch) ToParam() IngressMatchParam { + return param.Override[IngressMatchParam](json.RawMessage(r.RawJSON())) +} + +// The property Hostname is required. +type IngressMatchParam struct { + // Hostname to match. Can be: + // + // - Literal: "api.example.com" (exact match on Host header) + // - Pattern: "{instance}.example.com" (dynamic routing based on subdomain) + // + // Pattern hostnames use named captures in curly braces (e.g., {instance}, {app}) + // that extract parts of the hostname for routing. The extracted values can be + // referenced in the target.instance field. + Hostname string `json:"hostname" api:"required"` + // Host port to listen on for this rule (default 80) + Port param.Opt[int64] `json:"port,omitzero"` + paramObj +} + +func (r IngressMatchParam) MarshalJSON() (data []byte, err error) { + type shadow IngressMatchParam + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *IngressMatchParam) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type IngressRule struct { + Match IngressMatch `json:"match" api:"required"` + Target IngressTarget `json:"target" api:"required"` + // Auto-create HTTP to HTTPS redirect for this hostname (only applies when tls is + // enabled) + RedirectHTTP bool `json:"redirect_http"` + // Enable TLS termination (certificate auto-issued via ACME). + Tls bool `json:"tls"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + Match respjson.Field + Target respjson.Field + RedirectHTTP respjson.Field + Tls respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r IngressRule) RawJSON() string { return r.JSON.raw } +func (r *IngressRule) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// ToParam converts this IngressRule to a IngressRuleParam. +// +// Warning: the fields of the param type will not be present. ToParam should only +// be used at the last possible moment before sending a request. Test for this with +// IngressRuleParam.Overrides() +func (r IngressRule) ToParam() IngressRuleParam { + return param.Override[IngressRuleParam](json.RawMessage(r.RawJSON())) +} + +// The properties Match, Target are required. +type IngressRuleParam struct { + Match IngressMatchParam `json:"match,omitzero" api:"required"` + Target IngressTargetParam `json:"target,omitzero" api:"required"` + // Auto-create HTTP to HTTPS redirect for this hostname (only applies when tls is + // enabled) + RedirectHTTP param.Opt[bool] `json:"redirect_http,omitzero"` + // Enable TLS termination (certificate auto-issued via ACME). + Tls param.Opt[bool] `json:"tls,omitzero"` + paramObj +} + +func (r IngressRuleParam) MarshalJSON() (data []byte, err error) { + type shadow IngressRuleParam + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *IngressRuleParam) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type IngressTarget struct { + // Target instance name, ID, or capture reference. + // + // - For literal hostnames: Use the instance name or ID directly (e.g., "my-api") + // - For pattern hostnames: Reference a capture from the hostname (e.g., + // "{instance}") + // + // When using pattern hostnames, the instance is resolved dynamically at request + // time. + Instance string `json:"instance" api:"required"` + // Target port on the instance + Port int64 `json:"port" api:"required"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + Instance respjson.Field + Port respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r IngressTarget) RawJSON() string { return r.JSON.raw } +func (r *IngressTarget) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// ToParam converts this IngressTarget to a IngressTargetParam. +// +// Warning: the fields of the param type will not be present. ToParam should only +// be used at the last possible moment before sending a request. Test for this with +// IngressTargetParam.Overrides() +func (r IngressTarget) ToParam() IngressTargetParam { + return param.Override[IngressTargetParam](json.RawMessage(r.RawJSON())) +} + +// The properties Instance, Port are required. +type IngressTargetParam struct { + // Target instance name, ID, or capture reference. + // + // - For literal hostnames: Use the instance name or ID directly (e.g., "my-api") + // - For pattern hostnames: Reference a capture from the hostname (e.g., + // "{instance}") + // + // When using pattern hostnames, the instance is resolved dynamically at request + // time. + Instance string `json:"instance" api:"required"` + // Target port on the instance + Port int64 `json:"port" api:"required"` + paramObj +} + +func (r IngressTargetParam) MarshalJSON() (data []byte, err error) { + type shadow IngressTargetParam + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *IngressTargetParam) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type IngressNewParams struct { + // Human-readable name (lowercase letters, digits, and dashes only; cannot start or + // end with a dash) + Name string `json:"name" api:"required"` + // Routing rules for this ingress + Rules []IngressRuleParam `json:"rules,omitzero" api:"required"` + paramObj +} + +func (r IngressNewParams) MarshalJSON() (data []byte, err error) { + type shadow IngressNewParams + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *IngressNewParams) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} diff --git a/sdks/go/ingress_test.go b/sdks/go/ingress_test.go new file mode 100644 index 00000000..28bbab5f --- /dev/null +++ b/sdks/go/ingress_test.go @@ -0,0 +1,120 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/internal/testutil" + "github.com/kernel/hypeman-go/option" +) + +func TestIngressNew(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Ingresses.New(context.TODO(), hypeman.IngressNewParams{ + Name: "my-api-ingress", + Rules: []hypeman.IngressRuleParam{{ + Match: hypeman.IngressMatchParam{ + Hostname: "{instance}.example.com", + Port: hypeman.Int(8080), + }, + Target: hypeman.IngressTargetParam{ + Instance: "{instance}", + Port: 8080, + }, + RedirectHTTP: hypeman.Bool(true), + Tls: hypeman.Bool(true), + }}, + }) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestIngressList(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Ingresses.List(context.TODO()) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestIngressDelete(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + err := client.Ingresses.Delete(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestIngressGet(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Ingresses.Get(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/sdks/go/instance.go b/sdks/go/instance.go new file mode 100644 index 00000000..c1eabf63 --- /dev/null +++ b/sdks/go/instance.go @@ -0,0 +1,658 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "slices" + "time" + + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/internal/apiquery" + "github.com/kernel/hypeman-go/internal/requestconfig" + "github.com/kernel/hypeman-go/option" + "github.com/kernel/hypeman-go/packages/param" + "github.com/kernel/hypeman-go/packages/respjson" + "github.com/kernel/hypeman-go/packages/ssestream" +) + +// InstanceService contains methods and other services that help with interacting +// with the hypeman API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewInstanceService] method instead. +type InstanceService struct { + Options []option.RequestOption + Volumes InstanceVolumeService +} + +// NewInstanceService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewInstanceService(opts ...option.RequestOption) (r InstanceService) { + r = InstanceService{} + r.Options = opts + r.Volumes = NewInstanceVolumeService(opts...) + return +} + +// Create and start instance +func (r *InstanceService) New(ctx context.Context, body InstanceNewParams, opts ...option.RequestOption) (res *Instance, err error) { + opts = slices.Concat(r.Options, opts) + path := "instances" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// List instances +func (r *InstanceService) List(ctx context.Context, query InstanceListParams, opts ...option.RequestOption) (res *[]Instance, err error) { + opts = slices.Concat(r.Options, opts) + path := "instances" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, query, &res, opts...) + return +} + +// Stop and delete instance +func (r *InstanceService) Delete(ctx context.Context, id string, opts ...option.RequestOption) (err error) { + opts = slices.Concat(r.Options, opts) + opts = append([]option.RequestOption{option.WithHeader("Accept", "*/*")}, opts...) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("instances/%s", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, nil, opts...) + return +} + +// Get instance details +func (r *InstanceService) Get(ctx context.Context, id string, opts ...option.RequestOption) (res *Instance, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("instances/%s", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Streams instance logs as Server-Sent Events. Use the `source` parameter to +// select which log to stream: +// +// - `app` (default): Guest application logs (serial console) +// - `vmm`: Cloud Hypervisor VMM logs +// - `hypeman`: Hypeman operations log +// +// Returns the last N lines (controlled by `tail` parameter), then optionally +// continues streaming new lines if `follow=true`. +func (r *InstanceService) LogsStreaming(ctx context.Context, id string, query InstanceLogsParams, opts ...option.RequestOption) (stream *ssestream.Stream[string]) { + var ( + raw *http.Response + err error + ) + opts = slices.Concat(r.Options, opts) + opts = append([]option.RequestOption{option.WithHeader("Accept", "text/event-stream")}, opts...) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("instances/%s/logs", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, query, &raw, opts...) + return ssestream.NewStream[string](ssestream.NewDecoder(raw), err) +} + +// Restore instance from standby +func (r *InstanceService) Restore(ctx context.Context, id string, opts ...option.RequestOption) (res *Instance, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("instances/%s/restore", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...) + return +} + +// Put instance in standby (pause, snapshot, delete VMM) +func (r *InstanceService) Standby(ctx context.Context, id string, opts ...option.RequestOption) (res *Instance, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("instances/%s/standby", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...) + return +} + +// Start a stopped instance +func (r *InstanceService) Start(ctx context.Context, id string, body InstanceStartParams, opts ...option.RequestOption) (res *Instance, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("instances/%s/start", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Returns information about a path in the guest filesystem. Useful for checking if +// a path exists, its type, and permissions before performing file operations. +func (r *InstanceService) Stat(ctx context.Context, id string, query InstanceStatParams, opts ...option.RequestOption) (res *PathInfo, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("instances/%s/stat", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, query, &res, opts...) + return +} + +// Stop instance (graceful shutdown) +func (r *InstanceService) Stop(ctx context.Context, id string, opts ...option.RequestOption) (res *Instance, err error) { + opts = slices.Concat(r.Options, opts) + if id == "" { + err = errors.New("missing required id parameter") + return + } + path := fmt.Sprintf("instances/%s/stop", id) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, nil, &res, opts...) + return +} + +type Instance struct { + // Auto-generated unique identifier (CUID2 format) + ID string `json:"id" api:"required"` + // Creation timestamp (RFC3339) + CreatedAt time.Time `json:"created_at" api:"required" format:"date-time"` + // OCI image reference + Image string `json:"image" api:"required"` + // Human-readable name + Name string `json:"name" api:"required"` + // Instance state: + // + // - Created: VMM created but not started (Cloud Hypervisor native) + // - Running: VM is actively running (Cloud Hypervisor native) + // - Paused: VM is paused (Cloud Hypervisor native) + // - Shutdown: VM shut down but VMM exists (Cloud Hypervisor native) + // - Stopped: No VMM running, no snapshot exists + // - Standby: No VMM running, snapshot exists (can be restored) + // - Unknown: Failed to determine state (see state_error for details) + // + // Any of "Created", "Running", "Paused", "Shutdown", "Stopped", "Standby", + // "Unknown". + State InstanceState `json:"state" api:"required"` + // Disk I/O rate limit (human-readable, e.g., "100MB/s") + DiskIoBps string `json:"disk_io_bps"` + // Environment variables + Env map[string]string `json:"env"` + // App exit code (null if VM hasn't exited) + ExitCode int64 `json:"exit_code" api:"nullable"` + // Human-readable description of exit (e.g., "command not found", "killed by signal + // 9 (SIGKILL) - OOM") + ExitMessage string `json:"exit_message"` + // GPU information attached to the instance + GPU InstanceGPU `json:"gpu"` + // Whether a snapshot exists for this instance + HasSnapshot bool `json:"has_snapshot"` + // Hotplug memory size (human-readable) + HotplugSize string `json:"hotplug_size"` + // Hypervisor running this instance + // + // Any of "cloud-hypervisor", "qemu", "vz". + Hypervisor InstanceHypervisor `json:"hypervisor"` + // User-defined key-value metadata + Metadata map[string]string `json:"metadata"` + // Network configuration of the instance + Network InstanceNetwork `json:"network"` + // Writable overlay disk size (human-readable) + OverlaySize string `json:"overlay_size"` + // Base memory size (human-readable) + Size string `json:"size"` + // Start timestamp (RFC3339) + StartedAt time.Time `json:"started_at" api:"nullable" format:"date-time"` + // Error message if state couldn't be determined (only set when state is Unknown) + StateError string `json:"state_error" api:"nullable"` + // Stop timestamp (RFC3339) + StoppedAt time.Time `json:"stopped_at" api:"nullable" format:"date-time"` + // Number of virtual CPUs + Vcpus int64 `json:"vcpus"` + // Volumes attached to the instance + Volumes []VolumeMount `json:"volumes"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + ID respjson.Field + CreatedAt respjson.Field + Image respjson.Field + Name respjson.Field + State respjson.Field + DiskIoBps respjson.Field + Env respjson.Field + ExitCode respjson.Field + ExitMessage respjson.Field + GPU respjson.Field + HasSnapshot respjson.Field + HotplugSize respjson.Field + Hypervisor respjson.Field + Metadata respjson.Field + Network respjson.Field + OverlaySize respjson.Field + Size respjson.Field + StartedAt respjson.Field + StateError respjson.Field + StoppedAt respjson.Field + Vcpus respjson.Field + Volumes respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r Instance) RawJSON() string { return r.JSON.raw } +func (r *Instance) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// Instance state: +// +// - Created: VMM created but not started (Cloud Hypervisor native) +// - Running: VM is actively running (Cloud Hypervisor native) +// - Paused: VM is paused (Cloud Hypervisor native) +// - Shutdown: VM shut down but VMM exists (Cloud Hypervisor native) +// - Stopped: No VMM running, no snapshot exists +// - Standby: No VMM running, snapshot exists (can be restored) +// - Unknown: Failed to determine state (see state_error for details) +type InstanceState string + +const ( + InstanceStateCreated InstanceState = "Created" + InstanceStateRunning InstanceState = "Running" + InstanceStatePaused InstanceState = "Paused" + InstanceStateShutdown InstanceState = "Shutdown" + InstanceStateStopped InstanceState = "Stopped" + InstanceStateStandby InstanceState = "Standby" + InstanceStateUnknown InstanceState = "Unknown" +) + +// GPU information attached to the instance +type InstanceGPU struct { + // mdev device UUID + MdevUuid string `json:"mdev_uuid"` + // vGPU profile name + Profile string `json:"profile"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + MdevUuid respjson.Field + Profile respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r InstanceGPU) RawJSON() string { return r.JSON.raw } +func (r *InstanceGPU) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// Hypervisor running this instance +type InstanceHypervisor string + +const ( + InstanceHypervisorCloudHypervisor InstanceHypervisor = "cloud-hypervisor" + InstanceHypervisorQemu InstanceHypervisor = "qemu" + InstanceHypervisorVz InstanceHypervisor = "vz" +) + +// Network configuration of the instance +type InstanceNetwork struct { + // Download bandwidth limit (human-readable, e.g., "1Gbps", "125MB/s") + BandwidthDownload string `json:"bandwidth_download"` + // Upload bandwidth limit (human-readable, e.g., "1Gbps", "125MB/s") + BandwidthUpload string `json:"bandwidth_upload"` + // Whether instance is attached to the default network + Enabled bool `json:"enabled"` + // Assigned IP address (null if no network) + IP string `json:"ip" api:"nullable"` + // Assigned MAC address (null if no network) + Mac string `json:"mac" api:"nullable"` + // Network name (always "default" when enabled) + Name string `json:"name"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + BandwidthDownload respjson.Field + BandwidthUpload respjson.Field + Enabled respjson.Field + IP respjson.Field + Mac respjson.Field + Name respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r InstanceNetwork) RawJSON() string { return r.JSON.raw } +func (r *InstanceNetwork) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type PathInfo struct { + // Whether the path exists + Exists bool `json:"exists" api:"required"` + // Error message if stat failed (e.g., permission denied). Only set when exists is + // false due to an error rather than the path not existing. + Error string `json:"error" api:"nullable"` + // True if this is a directory + IsDir bool `json:"is_dir"` + // True if this is a regular file + IsFile bool `json:"is_file"` + // True if this is a symbolic link (only set when follow_links=false) + IsSymlink bool `json:"is_symlink"` + // Symlink target path (only set when is_symlink=true) + LinkTarget string `json:"link_target" api:"nullable"` + // File mode (Unix permissions) + Mode int64 `json:"mode"` + // File size in bytes + Size int64 `json:"size"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + Exists respjson.Field + Error respjson.Field + IsDir respjson.Field + IsFile respjson.Field + IsSymlink respjson.Field + LinkTarget respjson.Field + Mode respjson.Field + Size respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r PathInfo) RawJSON() string { return r.JSON.raw } +func (r *PathInfo) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type VolumeMount struct { + // Path where volume is mounted in the guest + MountPath string `json:"mount_path" api:"required"` + // Volume identifier + VolumeID string `json:"volume_id" api:"required"` + // Create per-instance overlay for writes (requires readonly=true) + Overlay bool `json:"overlay"` + // Max overlay size as human-readable string (e.g., "1GB"). Required if + // overlay=true. + OverlaySize string `json:"overlay_size"` + // Whether volume is mounted read-only + Readonly bool `json:"readonly"` + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + MountPath respjson.Field + VolumeID respjson.Field + Overlay respjson.Field + OverlaySize respjson.Field + Readonly respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +// Returns the unmodified JSON received from the API +func (r VolumeMount) RawJSON() string { return r.JSON.raw } +func (r *VolumeMount) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// ToParam converts this VolumeMount to a VolumeMountParam. +// +// Warning: the fields of the param type will not be present. ToParam should only +// be used at the last possible moment before sending a request. Test for this with +// VolumeMountParam.Overrides() +func (r VolumeMount) ToParam() VolumeMountParam { + return param.Override[VolumeMountParam](json.RawMessage(r.RawJSON())) +} + +// The properties MountPath, VolumeID are required. +type VolumeMountParam struct { + // Path where volume is mounted in the guest + MountPath string `json:"mount_path" api:"required"` + // Volume identifier + VolumeID string `json:"volume_id" api:"required"` + // Create per-instance overlay for writes (requires readonly=true) + Overlay param.Opt[bool] `json:"overlay,omitzero"` + // Max overlay size as human-readable string (e.g., "1GB"). Required if + // overlay=true. + OverlaySize param.Opt[string] `json:"overlay_size,omitzero"` + // Whether volume is mounted read-only + Readonly param.Opt[bool] `json:"readonly,omitzero"` + paramObj +} + +func (r VolumeMountParam) MarshalJSON() (data []byte, err error) { + type shadow VolumeMountParam + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *VolumeMountParam) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type InstanceNewParams struct { + // OCI image reference + Image string `json:"image" api:"required"` + // Human-readable name (lowercase letters, digits, and dashes only; cannot start or + // end with a dash) + Name string `json:"name" api:"required"` + // Disk I/O rate limit (e.g., "100MB/s", "500MB/s"). Defaults to proportional share + // based on CPU allocation if configured. + DiskIoBps param.Opt[string] `json:"disk_io_bps,omitzero"` + // Additional memory for hotplug (human-readable format like "3GB", "1G"). Omit to + // disable hotplug memory. + HotplugSize param.Opt[string] `json:"hotplug_size,omitzero"` + // Writable overlay disk size (human-readable format like "10GB", "50G") + OverlaySize param.Opt[string] `json:"overlay_size,omitzero"` + // Base memory size (human-readable format like "1GB", "512MB", "2G") + Size param.Opt[string] `json:"size,omitzero"` + // Skip guest-agent installation during boot. When true, the exec and stat APIs + // will not work for this instance. The instance will still run, but remote command + // execution will be unavailable. + SkipGuestAgent param.Opt[bool] `json:"skip_guest_agent,omitzero"` + // Skip kernel headers installation during boot for faster startup. When true, DKMS + // (Dynamic Kernel Module Support) will not work, preventing compilation of + // out-of-tree kernel modules (e.g., NVIDIA vGPU drivers). Recommended for + // workloads that don't need kernel module compilation. + SkipKernelHeaders param.Opt[bool] `json:"skip_kernel_headers,omitzero"` + // Number of virtual CPUs + Vcpus param.Opt[int64] `json:"vcpus,omitzero"` + // Override image CMD (like docker run ). Omit to use image + // default. + Cmd []string `json:"cmd,omitzero"` + // Device IDs or names to attach for GPU/PCI passthrough + Devices []string `json:"devices,omitzero"` + // Override image entrypoint (like docker run --entrypoint). Omit to use image + // default. + Entrypoint []string `json:"entrypoint,omitzero"` + // Environment variables + Env map[string]string `json:"env,omitzero"` + // GPU configuration for the instance + GPU InstanceNewParamsGPU `json:"gpu,omitzero"` + // Hypervisor to use for this instance. Defaults to server configuration. + // + // Any of "cloud-hypervisor", "qemu", "vz". + Hypervisor InstanceNewParamsHypervisor `json:"hypervisor,omitzero"` + // User-defined key-value metadata for the instance + Metadata map[string]string `json:"metadata,omitzero"` + // Network configuration for the instance + Network InstanceNewParamsNetwork `json:"network,omitzero"` + // Volumes to attach to the instance at creation time + Volumes []VolumeMountParam `json:"volumes,omitzero"` + paramObj +} + +func (r InstanceNewParams) MarshalJSON() (data []byte, err error) { + type shadow InstanceNewParams + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *InstanceNewParams) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// GPU configuration for the instance +type InstanceNewParamsGPU struct { + // vGPU profile name (e.g., "L40S-1Q"). Only used in vGPU mode. + Profile param.Opt[string] `json:"profile,omitzero"` + paramObj +} + +func (r InstanceNewParamsGPU) MarshalJSON() (data []byte, err error) { + type shadow InstanceNewParamsGPU + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *InstanceNewParamsGPU) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +// Hypervisor to use for this instance. Defaults to server configuration. +type InstanceNewParamsHypervisor string + +const ( + InstanceNewParamsHypervisorCloudHypervisor InstanceNewParamsHypervisor = "cloud-hypervisor" + InstanceNewParamsHypervisorQemu InstanceNewParamsHypervisor = "qemu" + InstanceNewParamsHypervisorVz InstanceNewParamsHypervisor = "vz" +) + +// Network configuration for the instance +type InstanceNewParamsNetwork struct { + // Download bandwidth limit (external→VM, e.g., "1Gbps", "125MB/s"). Defaults to + // proportional share based on CPU allocation. + BandwidthDownload param.Opt[string] `json:"bandwidth_download,omitzero"` + // Upload bandwidth limit (VM→external, e.g., "1Gbps", "125MB/s"). Defaults to + // proportional share based on CPU allocation. + BandwidthUpload param.Opt[string] `json:"bandwidth_upload,omitzero"` + // Whether to attach instance to the default network + Enabled param.Opt[bool] `json:"enabled,omitzero"` + paramObj +} + +func (r InstanceNewParamsNetwork) MarshalJSON() (data []byte, err error) { + type shadow InstanceNewParamsNetwork + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *InstanceNewParamsNetwork) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type InstanceListParams struct { + // Filter instances by metadata key-value pairs. Uses deepObject style: + // ?metadata[team]=backend&metadata[env]=staging Multiple entries are ANDed + // together. All specified key-value pairs must match. + Metadata map[string]string `query:"metadata,omitzero" json:"-"` + // Filter instances by state (e.g., Running, Stopped) + // + // Any of "Created", "Running", "Paused", "Shutdown", "Stopped", "Standby", + // "Unknown". + State InstanceListParamsState `query:"state,omitzero" json:"-"` + paramObj +} + +// URLQuery serializes [InstanceListParams]'s query parameters as `url.Values`. +func (r InstanceListParams) URLQuery() (v url.Values, err error) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Filter instances by state (e.g., Running, Stopped) +type InstanceListParamsState string + +const ( + InstanceListParamsStateCreated InstanceListParamsState = "Created" + InstanceListParamsStateRunning InstanceListParamsState = "Running" + InstanceListParamsStatePaused InstanceListParamsState = "Paused" + InstanceListParamsStateShutdown InstanceListParamsState = "Shutdown" + InstanceListParamsStateStopped InstanceListParamsState = "Stopped" + InstanceListParamsStateStandby InstanceListParamsState = "Standby" + InstanceListParamsStateUnknown InstanceListParamsState = "Unknown" +) + +type InstanceLogsParams struct { + // Continue streaming new lines after initial output + Follow param.Opt[bool] `query:"follow,omitzero" json:"-"` + // Number of lines to return from end + Tail param.Opt[int64] `query:"tail,omitzero" json:"-"` + // Log source to stream: + // + // - app: Guest application logs (serial console output) + // - vmm: Cloud Hypervisor VMM logs (hypervisor stdout+stderr) + // - hypeman: Hypeman operations log (actions taken on this instance) + // + // Any of "app", "vmm", "hypeman". + Source InstanceLogsParamsSource `query:"source,omitzero" json:"-"` + paramObj +} + +// URLQuery serializes [InstanceLogsParams]'s query parameters as `url.Values`. +func (r InstanceLogsParams) URLQuery() (v url.Values, err error) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} + +// Log source to stream: +// +// - app: Guest application logs (serial console output) +// - vmm: Cloud Hypervisor VMM logs (hypervisor stdout+stderr) +// - hypeman: Hypeman operations log (actions taken on this instance) +type InstanceLogsParamsSource string + +const ( + InstanceLogsParamsSourceApp InstanceLogsParamsSource = "app" + InstanceLogsParamsSourceVmm InstanceLogsParamsSource = "vmm" + InstanceLogsParamsSourceHypeman InstanceLogsParamsSource = "hypeman" +) + +type InstanceStartParams struct { + // Override image CMD for this run. Omit to keep previous value. + Cmd []string `json:"cmd,omitzero"` + // Override image entrypoint for this run. Omit to keep previous value. + Entrypoint []string `json:"entrypoint,omitzero"` + paramObj +} + +func (r InstanceStartParams) MarshalJSON() (data []byte, err error) { + type shadow InstanceStartParams + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *InstanceStartParams) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type InstanceStatParams struct { + // Path to stat in the guest filesystem + Path string `query:"path" api:"required" json:"-"` + // Follow symbolic links (like stat vs lstat) + FollowLinks param.Opt[bool] `query:"follow_links,omitzero" json:"-"` + paramObj +} + +// URLQuery serializes [InstanceStatParams]'s query parameters as `url.Values`. +func (r InstanceStatParams) URLQuery() (v url.Values, err error) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} diff --git a/sdks/go/instance_test.go b/sdks/go/instance_test.go new file mode 100644 index 00000000..4434e31f --- /dev/null +++ b/sdks/go/instance_test.go @@ -0,0 +1,277 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/internal/testutil" + "github.com/kernel/hypeman-go/option" +) + +func TestInstanceNewWithOptionalParams(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.New(context.TODO(), hypeman.InstanceNewParams{ + Image: "docker.io/library/alpine:latest", + Name: "my-workload-1", + Cmd: []string{"echo", "hello"}, + Devices: []string{"l4-gpu"}, + DiskIoBps: hypeman.String("100MB/s"), + Entrypoint: []string{"/bin/sh", "-c"}, + Env: map[string]string{ + "PORT": "3000", + "NODE_ENV": "production", + }, + GPU: hypeman.InstanceNewParamsGPU{ + Profile: hypeman.String("L40S-1Q"), + }, + HotplugSize: hypeman.String("2GB"), + Hypervisor: hypeman.InstanceNewParamsHypervisorCloudHypervisor, + Metadata: map[string]string{ + "team": "backend", + "purpose": "staging", + }, + Network: hypeman.InstanceNewParamsNetwork{ + BandwidthDownload: hypeman.String("1Gbps"), + BandwidthUpload: hypeman.String("1Gbps"), + Enabled: hypeman.Bool(true), + }, + OverlaySize: hypeman.String("20GB"), + Size: hypeman.String("2GB"), + SkipGuestAgent: hypeman.Bool(false), + SkipKernelHeaders: hypeman.Bool(true), + Vcpus: hypeman.Int(2), + Volumes: []hypeman.VolumeMountParam{{ + MountPath: "/mnt/data", + VolumeID: "vol-abc123", + Overlay: hypeman.Bool(true), + OverlaySize: hypeman.String("1GB"), + Readonly: hypeman.Bool(true), + }}, + }) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceListWithOptionalParams(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.List(context.TODO(), hypeman.InstanceListParams{ + Metadata: map[string]string{ + "foo": "string", + }, + State: hypeman.InstanceListParamsStateCreated, + }) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceDelete(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + err := client.Instances.Delete(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceGet(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.Get(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceRestore(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.Restore(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceStandby(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.Standby(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceStartWithOptionalParams(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.Start( + context.TODO(), + "id", + hypeman.InstanceStartParams{ + Cmd: []string{"string"}, + Entrypoint: []string{"string"}, + }, + ) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceStatWithOptionalParams(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.Stat( + context.TODO(), + "id", + hypeman.InstanceStatParams{ + Path: "path", + FollowLinks: hypeman.Bool(true), + }, + ) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceStop(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.Stop(context.TODO(), "id") + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/sdks/go/instancevolume.go b/sdks/go/instancevolume.go new file mode 100644 index 00000000..2bc78d5e --- /dev/null +++ b/sdks/go/instancevolume.go @@ -0,0 +1,89 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman + +import ( + "context" + "errors" + "fmt" + "net/http" + "slices" + + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/internal/requestconfig" + "github.com/kernel/hypeman-go/option" + "github.com/kernel/hypeman-go/packages/param" +) + +// InstanceVolumeService contains methods and other services that help with +// interacting with the hypeman API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewInstanceVolumeService] method instead. +type InstanceVolumeService struct { + Options []option.RequestOption +} + +// NewInstanceVolumeService generates a new service that applies the given options +// to each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewInstanceVolumeService(opts ...option.RequestOption) (r InstanceVolumeService) { + r = InstanceVolumeService{} + r.Options = opts + return +} + +// Attach volume to instance +func (r *InstanceVolumeService) Attach(ctx context.Context, volumeID string, params InstanceVolumeAttachParams, opts ...option.RequestOption) (res *Instance, err error) { + opts = slices.Concat(r.Options, opts) + if params.ID == "" { + err = errors.New("missing required id parameter") + return + } + if volumeID == "" { + err = errors.New("missing required volumeId parameter") + return + } + path := fmt.Sprintf("instances/%s/volumes/%s", params.ID, volumeID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, params, &res, opts...) + return +} + +// Detach volume from instance +func (r *InstanceVolumeService) Detach(ctx context.Context, volumeID string, body InstanceVolumeDetachParams, opts ...option.RequestOption) (res *Instance, err error) { + opts = slices.Concat(r.Options, opts) + if body.ID == "" { + err = errors.New("missing required id parameter") + return + } + if volumeID == "" { + err = errors.New("missing required volumeId parameter") + return + } + path := fmt.Sprintf("instances/%s/volumes/%s", body.ID, volumeID) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodDelete, path, nil, &res, opts...) + return +} + +type InstanceVolumeAttachParams struct { + ID string `path:"id" api:"required" json:"-"` + // Path where volume should be mounted + MountPath string `json:"mount_path" api:"required"` + // Mount as read-only + Readonly param.Opt[bool] `json:"readonly,omitzero"` + paramObj +} + +func (r InstanceVolumeAttachParams) MarshalJSON() (data []byte, err error) { + type shadow InstanceVolumeAttachParams + return param.MarshalObject(r, (*shadow)(&r)) +} +func (r *InstanceVolumeAttachParams) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +type InstanceVolumeDetachParams struct { + ID string `path:"id" api:"required" json:"-"` + paramObj +} diff --git a/sdks/go/instancevolume_test.go b/sdks/go/instancevolume_test.go new file mode 100644 index 00000000..30ce7d0e --- /dev/null +++ b/sdks/go/instancevolume_test.go @@ -0,0 +1,74 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package hypeman_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/kernel/hypeman-go" + "github.com/kernel/hypeman-go/internal/testutil" + "github.com/kernel/hypeman-go/option" +) + +func TestInstanceVolumeAttachWithOptionalParams(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.Volumes.Attach( + context.TODO(), + "volumeId", + hypeman.InstanceVolumeAttachParams{ + ID: "id", + MountPath: "/mnt/data", + Readonly: hypeman.Bool(true), + }, + ) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestInstanceVolumeDetach(t *testing.T) { + t.Skip("Mock server tests are disabled") + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := hypeman.NewClient( + option.WithBaseURL(baseURL), + option.WithAPIKey("My API Key"), + ) + _, err := client.Instances.Volumes.Detach( + context.TODO(), + "volumeId", + hypeman.InstanceVolumeDetachParams{ + ID: "id", + }, + ) + if err != nil { + var apierr *hypeman.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/sdks/go/internal/apierror/apierror.go b/sdks/go/internal/apierror/apierror.go new file mode 100644 index 00000000..aa8b694e --- /dev/null +++ b/sdks/go/internal/apierror/apierror.go @@ -0,0 +1,50 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package apierror + +import ( + "fmt" + "net/http" + "net/http/httputil" + + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/packages/respjson" +) + +// Error represents an error that originates from the API, i.e. when a request is +// made and the API returns a response with a HTTP status code. Other errors are +// not wrapped by this SDK. +type Error struct { + // JSON contains metadata for fields, check presence with [respjson.Field.Valid]. + JSON struct { + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` + StatusCode int + Request *http.Request + Response *http.Response +} + +// Returns the unmodified JSON received from the API +func (r Error) RawJSON() string { return r.JSON.raw } +func (r *Error) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +func (r *Error) Error() string { + // Attempt to re-populate the response body + return fmt.Sprintf("%s %q: %d %s %s", r.Request.Method, r.Request.URL, r.Response.StatusCode, http.StatusText(r.Response.StatusCode), r.JSON.raw) +} + +func (r *Error) DumpRequest(body bool) []byte { + if r.Request.GetBody != nil { + r.Request.Body, _ = r.Request.GetBody() + } + out, _ := httputil.DumpRequestOut(r.Request, body) + return out +} + +func (r *Error) DumpResponse(body bool) []byte { + out, _ := httputil.DumpResponse(r.Response, body) + return out +} diff --git a/sdks/go/internal/apiform/encoder.go b/sdks/go/internal/apiform/encoder.go new file mode 100644 index 00000000..a2f1516c --- /dev/null +++ b/sdks/go/internal/apiform/encoder.go @@ -0,0 +1,473 @@ +package apiform + +import ( + "fmt" + "io" + "mime/multipart" + "net/textproto" + "path" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/kernel/hypeman-go/packages/param" +) + +var encoders sync.Map // map[encoderEntry]encoderFunc + +func Marshal(value any, writer *multipart.Writer) error { + e := &encoder{ + dateFormat: time.RFC3339, + arrayFmt: "comma", + } + return e.marshal(value, writer) +} + +func MarshalRoot(value any, writer *multipart.Writer) error { + e := &encoder{ + root: true, + dateFormat: time.RFC3339, + arrayFmt: "comma", + } + return e.marshal(value, writer) +} + +func MarshalWithSettings(value any, writer *multipart.Writer, arrayFormat string) error { + e := &encoder{ + arrayFmt: arrayFormat, + dateFormat: time.RFC3339, + } + return e.marshal(value, writer) +} + +type encoder struct { + arrayFmt string + dateFormat string + root bool +} + +type encoderFunc func(key string, value reflect.Value, writer *multipart.Writer) error + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + arrayFmt string + root bool +} + +func (e *encoder) marshal(value any, writer *multipart.Writer) error { + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil + } + typ := val.Type() + enc := e.typeEncoder(typ) + return enc("", val, writer) +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + arrayFmt: e.arrayFmt, + root: e.root, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(key string, v reflect.Value, writer *multipart.Writer) error { + wg.Wait() + return f(key, v, writer) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder() + } + if t.Implements(reflect.TypeOf((*io.Reader)(nil)).Elem()) { + return e.newReaderTypeEncoder() + } + e.root = false + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.typeEncoder(inner) + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if !v.IsValid() || v.IsNil() { + return nil + } + return innerEncoder(key, v.Elem(), writer) + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Slice, reflect.Array: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + // Note that we could use `gjson` to encode these types but it would complicate our + // code more and this current code shouldn't cause any issues + case reflect.String: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, v.String()) + } + case reflect.Bool: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if v.Bool() { + return writer.WriteField(key, "true") + } + return writer.WriteField(key, "false") + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatInt(v.Int(), 10)) + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatUint(v.Uint(), 10)) + } + case reflect.Float32: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatFloat(v.Float(), 'f', -1, 32)) + } + case reflect.Float64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + } + default: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return fmt.Errorf("unknown type received at primitive encoder: %s", t.String()) + } + } +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + itemEncoder := e.typeEncoder(t.Elem()) + keyFn := e.arrayKeyEncoder() + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if keyFn == nil { + return fmt.Errorf("apiform: unsupported array format") + } + for i := 0; i < v.Len(); i++ { + err := itemEncoder(keyFn(key, i), v.Index(i), writer) + if err != nil { + return err + } + } + return nil + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + if t.Implements(reflect.TypeOf((*param.Optional)(nil)).Elem()) { + return e.newRichFieldTypeEncoder(t) + } + + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type == paramUnionType && t.Field(i).Anonymous { + return e.newStructUnionTypeEncoder(t) + } + } + + encoderFields := []encoderField{} + extraEncoder := (*encoderField)(nil) + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseFormStructTag(field) + if !ok { + continue + } + // We only want to support unexported field if they're tagged with + // `extras` because that field shouldn't be part of the public API. We + // also want to only keep the top level extras + if ptag.extras && len(index) == 0 { + extraEncoder = &encoderField{ptag, e.typeEncoder(field.Type.Elem()), idx} + continue + } + if ptag.name == "-" || ptag.name == "" { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + + var encoderFn encoderFunc + if ptag.omitzero { + typeEncoderFn := e.typeEncoder(field.Type) + encoderFn = func(key string, value reflect.Value, writer *multipart.Writer) error { + if value.IsZero() { + return nil + } + return typeEncoderFn(key, value, writer) + } + } else { + encoderFn = e.typeEncoder(field.Type) + } + encoderFields = append(encoderFields, encoderField{ptag, encoderFn, idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + // Ensure deterministic output by sorting by lexicographic order + sort.Slice(encoderFields, func(i, j int) bool { + return encoderFields[i].tag.name < encoderFields[j].tag.name + }) + + return func(key string, value reflect.Value, writer *multipart.Writer) error { + keyFn := e.objKeyEncoder(key) + for _, ef := range encoderFields { + field := value.FieldByIndex(ef.idx) + err := ef.fn(keyFn(ef.tag.name), field, writer) + if err != nil { + return err + } + } + + if extraEncoder != nil { + err := e.encodeMapEntries(key, value.FieldByIndex(extraEncoder.idx), writer) + if err != nil { + return err + } + } + + return nil + } +} + +var paramUnionType = reflect.TypeOf((*param.APIUnion)(nil)).Elem() + +func (e *encoder) newStructUnionTypeEncoder(t reflect.Type) encoderFunc { + var fieldEncoders []encoderFunc + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.Type == paramUnionType && field.Anonymous { + fieldEncoders = append(fieldEncoders, nil) + continue + } + fieldEncoders = append(fieldEncoders, e.typeEncoder(field.Type)) + } + + return func(key string, value reflect.Value, writer *multipart.Writer) error { + for i := 0; i < t.NumField(); i++ { + if value.Field(i).Type() == paramUnionType { + continue + } + if !value.Field(i).IsZero() { + return fieldEncoders[i](key, value.Field(i), writer) + } + } + return fmt.Errorf("apiform: union %s has no field set", t.String()) + } +} + +func (e *encoder) newTimeTypeEncoder() encoderFunc { + format := e.dateFormat + return func(key string, value reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format)) + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + value = value.Elem() + if !value.IsValid() { + return nil + } + return e.typeEncoder(value.Type())(key, value, writer) + } +} + +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} + +func (e *encoder) newReaderTypeEncoder() encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + reader, ok := value.Convert(reflect.TypeOf((*io.Reader)(nil)).Elem()).Interface().(io.Reader) + if !ok { + return nil + } + filename := "anonymous_file" + contentType := "application/octet-stream" + if named, ok := reader.(interface{ Filename() string }); ok { + filename = named.Filename() + } else if named, ok := reader.(interface{ Name() string }); ok { + filename = path.Base(named.Name()) + } + if typed, ok := reader.(interface{ ContentType() string }); ok { + contentType = typed.ContentType() + } + + // Below is taken almost 1-for-1 from [multipart.CreateFormFile] + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, escapeQuotes(key), escapeQuotes(filename))) + h.Set("Content-Type", contentType) + filewriter, err := writer.CreatePart(h) + if err != nil { + return err + } + _, err = io.Copy(filewriter, reader) + return err + } +} + +func (e encoder) arrayKeyEncoder() func(string, int) string { + var keyFn func(string, int) string + switch e.arrayFmt { + case "comma", "repeat": + keyFn = func(k string, _ int) string { return k } + case "brackets": + keyFn = func(key string, _ int) string { return key + "[]" } + case "indices:dots": + keyFn = func(k string, i int) string { + if k == "" { + return strconv.Itoa(i) + } + return k + "." + strconv.Itoa(i) + } + case "indices:brackets": + keyFn = func(k string, i int) string { + if k == "" { + return strconv.Itoa(i) + } + return k + "[" + strconv.Itoa(i) + "]" + } + } + return keyFn +} + +func (e encoder) objKeyEncoder(parent string) func(string) string { + if parent == "" { + return func(child string) string { return child } + } + switch e.arrayFmt { + case "brackets": + return func(child string) string { return parent + "[" + child + "]" } + default: + return func(child string) string { return parent + "." + child } + } +} + +// Given a []byte of json (may either be an empty object or an object that already contains entries) +// encode all of the entries in the map to the json byte array. +func (e *encoder) encodeMapEntries(key string, v reflect.Value, writer *multipart.Writer) error { + type mapPair struct { + key string + value reflect.Value + } + + pairs := []mapPair{} + + iter := v.MapRange() + for iter.Next() { + if iter.Key().Type().Kind() == reflect.String { + pairs = append(pairs, mapPair{key: iter.Key().String(), value: iter.Value()}) + } else { + return fmt.Errorf("cannot encode a map with a non string key") + } + } + + // Ensure deterministic output + sort.Slice(pairs, func(i, j int) bool { + return pairs[i].key < pairs[j].key + }) + + elementEncoder := e.typeEncoder(v.Type().Elem()) + keyFn := e.objKeyEncoder(key) + for _, p := range pairs { + err := elementEncoder(keyFn(p.key), p.value, writer) + if err != nil { + return err + } + } + + return nil +} + +func (e *encoder) newMapEncoder(_ reflect.Type) encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + return e.encodeMapEntries(key, value, writer) + } +} + +func WriteExtras(writer *multipart.Writer, extras map[string]any) (err error) { + for k, v := range extras { + str, ok := v.(string) + if !ok { + break + } + err = writer.WriteField(k, str) + if err != nil { + break + } + } + return +} diff --git a/sdks/go/internal/apiform/form.go b/sdks/go/internal/apiform/form.go new file mode 100644 index 00000000..5445116e --- /dev/null +++ b/sdks/go/internal/apiform/form.go @@ -0,0 +1,5 @@ +package apiform + +type Marshaler interface { + MarshalMultipart() ([]byte, string, error) +} diff --git a/sdks/go/internal/apiform/form_test.go b/sdks/go/internal/apiform/form_test.go new file mode 100644 index 00000000..2884602a --- /dev/null +++ b/sdks/go/internal/apiform/form_test.go @@ -0,0 +1,609 @@ +package apiform + +import ( + "bytes" + "github.com/kernel/hypeman-go/packages/param" + "io" + "mime/multipart" + "strings" + "testing" + "time" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `form:"a"` + B int `form:"b"` + C uint `form:"c"` + D float64 `form:"d"` + E float32 `form:"e"` + F []int `form:"f"` +} + +// These aliases are necessary to bypass the cache. +// This only relevant during testing. +type int_ int +type PrimitivesBrackets struct { + F []int_ `form:"f"` +} + +type PrimitivePointers struct { + A *bool `form:"a"` + B *int `form:"b"` + C *uint `form:"c"` + D *float64 `form:"d"` + E *float32 `form:"e"` + F *[]int `form:"f"` +} + +type Slices struct { + Slice []Primitives `form:"slices"` +} + +type DateTime struct { + Date time.Time `form:"date" format:"date"` + DateTime time.Time `form:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `form:"a"` + Extras map[string]any `form:"-" api:"extrafields"` +} + +type TypedAdditionalProperties struct { + A bool `form:"a"` + Extras map[string]int `form:"-" api:"extrafields"` +} + +type EmbeddedStructs struct { + AdditionalProperties + A *int `form:"number2"` + Extras map[string]any `form:"-" api:"extrafields"` +} + +type Recursive struct { + Name string `form:"name"` + Child *Recursive `form:"child"` +} + +type UnknownStruct struct { + Unknown any `form:"unknown"` +} + +type UnionStruct struct { + Union Union `form:"union" format:"date"` +} + +type Union interface { + union() +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionStructA struct { + Type string `form:"type"` + A string `form:"a"` + B string `form:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `form:"type"` + A string `form:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +type ReaderStruct struct { + File io.Reader `form:"file"` +} + +type NamedEnum string + +const NamedEnumFoo NamedEnum = "foo" + +type StructUnionWrapper struct { + Union StructUnion `form:"union"` +} + +type StructUnion struct { + OfInt param.Opt[int64] `form:",omitzero,inline"` + OfString param.Opt[string] `form:",omitzero,inline"` + OfEnum param.Opt[NamedEnum] `form:",omitzero,inline"` + OfA UnionStructA `form:",omitzero,inline"` + OfB UnionStructB `form:",omitzero,inline"` + param.APIUnion +} + +type MultipartMarshalerParent struct { + Middle MultipartMarshalerMiddleNext `form:"middle"` +} + +type MultipartMarshalerMiddleNext struct { + MiddleNext MultipartMarshalerMiddle `form:"middleNext"` +} + +type MultipartMarshalerMiddle struct { + Child int `form:"child"` +} + +var tests = map[string]struct { + buf string + val any +}{ + "file": { + buf: `--xxx +Content-Disposition: form-data; name="file"; filename="anonymous_file" +Content-Type: application/octet-stream + +some file contents... +--xxx-- +`, + val: ReaderStruct{ + File: io.Reader(bytes.NewBuffer([]byte("some file contents..."))), + }, + }, + "map_string": { + `--xxx +Content-Disposition: form-data; name="foo" + +bar +--xxx-- +`, + map[string]string{"foo": "bar"}, + }, + + "map_interface": { + `--xxx +Content-Disposition: form-data; name="a" + +1 +--xxx +Content-Disposition: form-data; name="b" + +str +--xxx +Content-Disposition: form-data; name="c" + +false +--xxx-- +`, + map[string]any{"a": float64(1), "b": "str", "c": false}, + }, + + "primitive_struct": { + `--xxx +Content-Disposition: form-data; name="a" + +false +--xxx +Content-Disposition: form-data; name="b" + +237628372683 +--xxx +Content-Disposition: form-data; name="c" + +654 +--xxx +Content-Disposition: form-data; name="d" + +9999.43 +--xxx +Content-Disposition: form-data; name="e" + +43.76 +--xxx +Content-Disposition: form-data; name="f.0" + +1 +--xxx +Content-Disposition: form-data; name="f.1" + +2 +--xxx +Content-Disposition: form-data; name="f.2" + +3 +--xxx +Content-Disposition: form-data; name="f.3" + +4 +--xxx-- +`, + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + "primitive_struct,brackets": { + `--xxx +Content-Disposition: form-data; name="f[]" + +1 +--xxx +Content-Disposition: form-data; name="f[]" + +2 +--xxx +Content-Disposition: form-data; name="f[]" + +3 +--xxx +Content-Disposition: form-data; name="f[]" + +4 +--xxx-- +`, + PrimitivesBrackets{F: []int_{1, 2, 3, 4}}, + }, + + "slices": { + `--xxx +Content-Disposition: form-data; name="slices.0.a" + +false +--xxx +Content-Disposition: form-data; name="slices.0.b" + +237628372683 +--xxx +Content-Disposition: form-data; name="slices.0.c" + +654 +--xxx +Content-Disposition: form-data; name="slices.0.d" + +9999.43 +--xxx +Content-Disposition: form-data; name="slices.0.e" + +43.76 +--xxx +Content-Disposition: form-data; name="slices.0.f.0" + +1 +--xxx +Content-Disposition: form-data; name="slices.0.f.1" + +2 +--xxx +Content-Disposition: form-data; name="slices.0.f.2" + +3 +--xxx +Content-Disposition: form-data; name="slices.0.f.3" + +4 +--xxx-- +`, + Slices{ + Slice: []Primitives{{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}}, + }, + }, + "primitive_pointer_struct": { + `--xxx +Content-Disposition: form-data; name="a" + +false +--xxx +Content-Disposition: form-data; name="b" + +237628372683 +--xxx +Content-Disposition: form-data; name="c" + +654 +--xxx +Content-Disposition: form-data; name="d" + +9999.43 +--xxx +Content-Disposition: form-data; name="e" + +43.76 +--xxx +Content-Disposition: form-data; name="f.0" + +1 +--xxx +Content-Disposition: form-data; name="f.1" + +2 +--xxx +Content-Disposition: form-data; name="f.2" + +3 +--xxx +Content-Disposition: form-data; name="f.3" + +4 +--xxx +Content-Disposition: form-data; name="f.4" + +5 +--xxx-- +`, + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + }, + + "datetime_struct": { + `--xxx +Content-Disposition: form-data; name="date" + +2006-01-02 +--xxx +Content-Disposition: form-data; name="date-time" + +2006-01-02T15:04:05Z +--xxx-- +`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + }, + + "additional_properties": { + `--xxx +Content-Disposition: form-data; name="a" + +true +--xxx +Content-Disposition: form-data; name="bar" + +value +--xxx +Content-Disposition: form-data; name="foo" + +true +--xxx-- +`, + AdditionalProperties{ + A: true, + Extras: map[string]any{ + "bar": "value", + "foo": true, + }, + }, + }, + "recursive_struct,brackets": { + `--xxx +Content-Disposition: form-data; name="child[name]" + +Alex +--xxx +Content-Disposition: form-data; name="name" + +Robert +--xxx-- +`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + }, + + "recursive_struct": { + `--xxx +Content-Disposition: form-data; name="child.name" + +Alex +--xxx +Content-Disposition: form-data; name="name" + +Robert +--xxx-- +`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + }, + + "unknown_struct_number": { + `--xxx +Content-Disposition: form-data; name="unknown" + +12 +--xxx-- +`, + UnknownStruct{ + Unknown: 12., + }, + }, + + "unknown_struct_map": { + `--xxx +Content-Disposition: form-data; name="unknown.foo" + +bar +--xxx-- +`, + UnknownStruct{ + Unknown: map[string]any{ + "foo": "bar", + }, + }, + }, + + "struct_union_integer": { + `--xxx +Content-Disposition: form-data; name="union" + +12 +--xxx-- +`, + StructUnionWrapper{ + Union: StructUnion{OfInt: param.NewOpt[int64](12)}, + }, + }, + + "union_integer": { + `--xxx +Content-Disposition: form-data; name="union" + +12 +--xxx-- +`, + UnionStruct{ + Union: UnionInteger(12), + }, + }, + + "struct_union_struct_discriminated_a": { + `--xxx +Content-Disposition: form-data; name="union.a" + +foo +--xxx +Content-Disposition: form-data; name="union.b" + +bar +--xxx +Content-Disposition: form-data; name="union.type" + +typeA +--xxx-- +`, + StructUnionWrapper{ + Union: StructUnion{OfA: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }}, + }, + }, + + "union_struct_discriminated_a": { + `--xxx +Content-Disposition: form-data; name="union.a" + +foo +--xxx +Content-Disposition: form-data; name="union.b" + +bar +--xxx +Content-Disposition: form-data; name="union.type" + +typeA +--xxx-- +`, + + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + }, + + "struct_union_struct_discriminated_b": { + `--xxx +Content-Disposition: form-data; name="union.a" + +foo +--xxx +Content-Disposition: form-data; name="union.type" + +typeB +--xxx-- +`, + StructUnionWrapper{ + Union: StructUnion{OfB: UnionStructB{ + Type: "typeB", + A: "foo", + }}, + }, + }, + + "union_struct_discriminated_b": { + `--xxx +Content-Disposition: form-data; name="union.a" + +foo +--xxx +Content-Disposition: form-data; name="union.type" + +typeB +--xxx-- +`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + }, + + "union_struct_time": { + `--xxx +Content-Disposition: form-data; name="union" + +2010-05-23 +--xxx-- +`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + }, + "deeply-nested-struct,brackets": { + `--xxx +Content-Disposition: form-data; name="middle[middleNext][child]" + +10 +--xxx-- +`, + MultipartMarshalerParent{ + Middle: MultipartMarshalerMiddleNext{ + MiddleNext: MultipartMarshalerMiddle{ + Child: 10, + }, + }, + }, + }, + "deeply-nested-map,brackets": { + `--xxx +Content-Disposition: form-data; name="middle[middleNext][child]" + +10 +--xxx-- +`, + map[string]any{"middle": map[string]any{"middleNext": map[string]any{"child": 10}}}, + }, +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + writer.SetBoundary("xxx") + + var arrayFmt string = "indices:dots" + if tags := strings.Split(name, ","); len(tags) > 1 { + arrayFmt = tags[1] + } + + err := MarshalWithSettings(test.val, writer, arrayFmt) + if err != nil { + t.Errorf("serialization of %v failed with error %v", test.val, err) + } + err = writer.Close() + if err != nil { + t.Errorf("serialization of %v failed with error %v", test.val, err) + } + raw := buf.Bytes() + if string(raw) != strings.ReplaceAll(test.buf, "\n", "\r\n") { + t.Errorf("expected %+#v to serialize to '%s' but got '%s' (with format %s)", test.val, test.buf, string(raw), arrayFmt) + } + }) + } +} diff --git a/sdks/go/internal/apiform/richparam.go b/sdks/go/internal/apiform/richparam.go new file mode 100644 index 00000000..fab4fa15 --- /dev/null +++ b/sdks/go/internal/apiform/richparam.go @@ -0,0 +1,20 @@ +package apiform + +import ( + "github.com/kernel/hypeman-go/packages/param" + "mime/multipart" + "reflect" +) + +func (e *encoder) newRichFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.newPrimitiveTypeEncoder(f.Type) + return func(key string, value reflect.Value, writer *multipart.Writer) error { + if opt, ok := value.Interface().(param.Optional); ok && opt.Valid() { + return enc(key, value.FieldByIndex(f.Index), writer) + } else if ok && param.IsNull(opt) { + return writer.WriteField(key, "null") + } + return nil + } +} diff --git a/sdks/go/internal/apiform/tag.go b/sdks/go/internal/apiform/tag.go new file mode 100644 index 00000000..b3536176 --- /dev/null +++ b/sdks/go/internal/apiform/tag.go @@ -0,0 +1,70 @@ +package apiform + +import ( + "reflect" + "strings" +) + +const apiStructTag = "api" +const jsonStructTag = "json" +const formStructTag = "form" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + required bool + extras bool + metadata bool + omitzero bool +} + +func parseFormStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(formStructTag) + if !ok { + raw, ok = field.Tag.Lookup(jsonStructTag) + } + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "required": + tag.required = true + case "extras": + tag.extras = true + case "metadata": + tag.metadata = true + case "omitzero": + tag.omitzero = true + } + } + + parseApiStructTag(field, &tag) + return +} + +func parseApiStructTag(field reflect.StructField, tag *parsedStructTag) { + raw, ok := field.Tag.Lookup(apiStructTag) + if !ok { + return + } + parts := strings.Split(raw, ",") + for _, part := range parts { + switch part { + case "extrafields": + tag.extras = true + case "required": + tag.required = true + } + } +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/sdks/go/internal/apijson/decodeparam_test.go b/sdks/go/internal/apijson/decodeparam_test.go new file mode 100644 index 00000000..d2a7c71b --- /dev/null +++ b/sdks/go/internal/apijson/decodeparam_test.go @@ -0,0 +1,498 @@ +package apijson_test + +import ( + "encoding/json" + "fmt" + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/packages/param" + "reflect" + "testing" +) + +func TestOptionalDecoders(t *testing.T) { + cases := map[string]struct { + buf string + val any + }{ + + "opt_string_present": { + `"hello"`, + param.NewOpt("hello"), + }, + "opt_string_empty_present": { + `""`, + param.NewOpt(""), + }, + "opt_string_null": { + `null`, + param.Null[string](), + }, + "opt_string_null_with_whitespace": { + ` null `, + param.Null[string](), + }, + } + + for name, test := range cases { + t.Run(name, func(t *testing.T) { + result := reflect.New(reflect.TypeOf(test.val)) + if err := json.Unmarshal([]byte(test.buf), result.Interface()); err != nil { + t.Fatalf("deserialization of %v failed with error %v", result, err) + } + + if !reflect.DeepEqual(result.Elem().Interface(), test.val) { + t.Fatalf("expected '%s' to deserialize to \n%#v\nbut got\n%#v", test.buf, test.val, result.Elem().Interface()) + } + }) + } +} + +type paramObject = param.APIObject + +type BasicObject struct { + ReqInt int64 `json:"req_int" api:"required"` + ReqFloat float64 `json:"req_float" api:"required"` + ReqString string `json:"req_string" api:"required"` + ReqBool bool `json:"req_bool" api:"required"` + + OptInt param.Opt[int64] `json:"opt_int"` + OptFloat param.Opt[float64] `json:"opt_float"` + OptString param.Opt[string] `json:"opt_string"` + OptBool param.Opt[bool] `json:"opt_bool"` + + paramObject +} + +func (o *BasicObject) UnmarshalJSON(data []byte) error { return apijson.UnmarshalRoot(data, o) } + +func TestBasicObjectWithNull(t *testing.T) { + raw := `{"opt_int":null,"opt_string":null,"opt_bool":null}` + var dst BasicObject + target := BasicObject{ + OptInt: param.Null[int64](), + // OptFloat: param.Opt[float64]{}, + OptString: param.Null[string](), + OptBool: param.Null[bool](), + } + + err := json.Unmarshal([]byte(raw), &dst) + if err != nil { + t.Fatalf("failed unmarshal") + } + + if !reflect.DeepEqual(dst, target) { + t.Fatalf("failed equality check %#v", dst) + } +} + +func TestBasicObject(t *testing.T) { + raw := `{"req_int":1,"req_float":1.3,"req_string":"test","req_bool":true,"opt_int":2,"opt_float":2.0,"opt_string":"test","opt_bool":false}` + var dst BasicObject + target := BasicObject{ + ReqInt: 1, + ReqFloat: 1.3, + ReqString: "test", + ReqBool: true, + OptInt: param.NewOpt[int64](2), + OptFloat: param.NewOpt(2.0), + OptString: param.NewOpt("test"), + OptBool: param.NewOpt(false), + } + + err := json.Unmarshal([]byte(raw), &dst) + if err != nil { + t.Fatalf("failed unmarshal") + } + + if !reflect.DeepEqual(dst, target) { + t.Fatalf("failed equality check %#v", dst) + } +} + +type ComplexObject struct { + Basic BasicObject `json:"basic" api:"required"` + Enum string `json:"enum"` + paramObject +} + +func (o *ComplexObject) UnmarshalJSON(data []byte) error { return apijson.UnmarshalRoot(data, o) } + +func init() { + apijson.RegisterFieldValidator[ComplexObject]("enum", "a", "b", "c") +} + +func TestComplexObject(t *testing.T) { + raw := `{"basic":{"req_int":1,"req_float":1.3,"req_string":"test","req_bool":true,"opt_int":2,"opt_float":2.0,"opt_string":"test","opt_bool":false},"enum":"a"}` + var dst ComplexObject + + target := ComplexObject{ + Basic: BasicObject{ + ReqInt: 1, + ReqFloat: 1.3, + ReqString: "test", + ReqBool: true, + OptInt: param.NewOpt[int64](2), + OptFloat: param.NewOpt(2.0), + OptString: param.NewOpt("test"), + OptBool: param.NewOpt(false), + }, + Enum: "a", + } + + err := json.Unmarshal([]byte(raw), &dst) + if err != nil { + t.Fatalf("failed unmarshal") + } + + if !reflect.DeepEqual(dst, target) { + t.Fatalf("failed equality check %#v", dst) + } +} + +type paramUnion = param.APIUnion + +type MemberA struct { + Name string `json:"name" api:"required"` + Age int `json:"age" api:"required"` +} + +type MemberB struct { + Name string `json:"name" api:"required"` + Age string `json:"age" api:"required"` +} + +type MemberC struct { + Name string `json:"name" api:"required"` + Age int `json:"age" api:"required"` + Status string `json:"status"` +} + +type MemberD struct { + Cost int `json:"cost" api:"required"` + Status string `json:"status" api:"required"` +} + +type MemberE struct { + Cost int `json:"cost" api:"required"` + Status string `json:"status" api:"required"` +} + +type MemberF struct { + D int `json:"d"` + E string `json:"e"` + F float64 `json:"f"` + G param.Opt[int] `json:"g"` +} + +type MemberG struct { + D int `json:"d"` + E string `json:"e"` + F float64 `json:"f"` + G param.Opt[bool] `json:"g"` +} + +func init() { + apijson.RegisterFieldValidator[MemberD]("status", "good", "ok", "bad") + apijson.RegisterFieldValidator[MemberE]("status", "GOOD", "OK", "BAD") +} + +type UnionStruct struct { + OfMemberA *MemberA `json:",inline"` + OfMemberB *MemberB `json:",inline"` + OfMemberC *MemberC `json:",inline"` + OfMemberD *MemberD `json:",inline"` + OfMemberE *MemberE `json:",inline"` + OfMemberF *MemberF `json:",inline"` + OfMemberG *MemberG `json:",inline"` + OfString param.Opt[string] `json:",inline"` + + paramUnion +} + +func (union *UnionStruct) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, union) +} + +func TestUnionStruct(t *testing.T) { + tests := map[string]struct { + raw string + target UnionStruct + shouldFail bool + }{ + "fail": { + raw: `1200`, + target: UnionStruct{}, + shouldFail: true, + }, + "easy": { + raw: `{"age":30}`, + target: UnionStruct{OfMemberA: &MemberA{Age: 30}}, + }, + "less-easy": { + raw: `{"age":"thirty"}`, + target: UnionStruct{OfMemberB: &MemberB{Age: "thirty"}}, + }, + "even-less-easy": { + raw: `{"age":"30"}`, + target: UnionStruct{OfMemberB: &MemberB{Age: "30"}}, + }, + "medium": { + raw: `{"name":"jacob","age":30}`, + target: UnionStruct{OfMemberA: &MemberA{ + Age: 30, + Name: "jacob", + }}, + }, + "less-medium": { + raw: `{"name":"jacob","age":"thirty"}`, + target: UnionStruct{OfMemberB: &MemberB{ + Age: "thirty", + Name: "jacob", + }}, + }, + "even-less-medium": { + raw: `{"name":"jacob","age":"30"}`, + target: UnionStruct{OfMemberB: &MemberB{ + Name: "jacob", + Age: "30", + }}, + }, + "hard": { + raw: `{"name":"jacob","age":30,"status":"active"}`, + target: UnionStruct{OfMemberC: &MemberC{ + Name: "jacob", + Age: 30, + Status: "active", + }}, + }, + "inline-string": { + raw: `"hello there"`, + target: UnionStruct{OfString: param.NewOpt("hello there")}, + }, + "enum-field": { + raw: `{"cost":100,"status":"ok"}`, + target: UnionStruct{OfMemberD: &MemberD{Cost: 100, Status: "ok"}}, + }, + "other-enum-field": { + raw: `{"cost":100,"status":"GOOD"}`, + target: UnionStruct{OfMemberE: &MemberE{Cost: 100, Status: "GOOD"}}, + }, + "tricky-extra-fields": { + raw: `{"d":12,"e":"hello","f":1.00}`, + target: UnionStruct{OfMemberF: &MemberF{D: 12, E: "hello", F: 1.00}}, + }, + "optional-fields": { + raw: `{"d":12,"e":"hello","f":1.00,"g":12}`, + target: UnionStruct{OfMemberF: &MemberF{D: 12, E: "hello", F: 1.00, G: param.NewOpt(12)}}, + }, + "optional-fields-2": { + raw: `{"d":12,"e":"hello","f":1.00,"g":false}`, + target: UnionStruct{OfMemberG: &MemberG{D: 12, E: "hello", F: 1.00, G: param.NewOpt(false)}}, + }, + } + + for name, test := range tests { + var dst UnionStruct + t.Run(name, func(t *testing.T) { + err := json.Unmarshal([]byte(test.raw), &dst) + if err != nil && !test.shouldFail { + t.Fatalf("failed unmarshal with err: %v %#v", err, dst) + } + + if !reflect.DeepEqual(dst, test.target) { + if dst.OfMemberA != nil { + fmt.Printf("%#v", dst.OfMemberA) + } + t.Fatalf("failed equality, got %#v but expected %#v", dst, test.target) + } + }) + } +} + +type ConstantA string +type ConstantB string +type ConstantC string + +func (c ConstantA) Default() string { return "A" } +func (c ConstantB) Default() string { return "B" } +func (c ConstantC) Default() string { return "C" } + +type DiscVariantA struct { + Name string `json:"name" api:"required"` + Age int `json:"age" api:"required"` + Type ConstantA `json:"type" api:"required"` +} + +type DiscVariantB struct { + Name string `json:"name" api:"required"` + Age int `json:"age" api:"required"` + Type ConstantB `json:"type" api:"required"` +} + +type DiscVariantC struct { + Name string `json:"name" api:"required"` + Age float64 `json:"age" api:"required"` + Type ConstantC `json:"type" api:"required"` +} + +type DiscriminatedUnion struct { + OfA *DiscVariantA `json:",inline"` + OfB *DiscVariantB `json:",inline"` + OfC *DiscVariantC `json:",inline"` + + paramUnion +} + +func init() { + apijson.RegisterDiscriminatedUnion[DiscriminatedUnion]("type", map[string]reflect.Type{ + "A": reflect.TypeOf(DiscVariantA{}), + "B": reflect.TypeOf(DiscVariantB{}), + "C": reflect.TypeOf(DiscVariantC{}), + }) +} + +type FooVariant struct { + Type string `json:"type" api:"required"` + Value string `json:"value" api:"required"` +} + +type BarVariant struct { + Type string `json:"type" api:"required"` + Enable bool `json:"enable" api:"required"` +} + +type MultiDiscriminatorUnion struct { + OfFoo *FooVariant `json:",inline"` + OfBar *BarVariant `json:",inline"` + + paramUnion +} + +func init() { + apijson.RegisterDiscriminatedUnion[MultiDiscriminatorUnion]("type", map[string]reflect.Type{ + "foo": reflect.TypeOf(FooVariant{}), + "foo_v2": reflect.TypeOf(FooVariant{}), + "bar": reflect.TypeOf(BarVariant{}), + "bar_legacy": reflect.TypeOf(BarVariant{}), + }) +} + +func (m *MultiDiscriminatorUnion) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, m) +} + +func (d *DiscriminatedUnion) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, d) +} + +func TestDiscriminatedUnion(t *testing.T) { + tests := map[string]struct { + raw string + target DiscriminatedUnion + shouldFail bool + }{ + "variant_a": { + raw: `{"name":"Alice","age":25,"type":"A"}`, + target: DiscriminatedUnion{OfA: &DiscVariantA{ + Name: "Alice", + Age: 25, + Type: "A", + }}, + }, + "variant_b": { + raw: `{"name":"Bob","age":30,"type":"B"}`, + target: DiscriminatedUnion{OfB: &DiscVariantB{ + Name: "Bob", + Age: 30, + Type: "B", + }}, + }, + "variant_c": { + raw: `{"name":"Charlie","age":35.5,"type":"C"}`, + target: DiscriminatedUnion{OfC: &DiscVariantC{ + Name: "Charlie", + Age: 35.5, + Type: "C", + }}, + }, + "invalid_type": { + raw: `{"name":"Unknown","age":40,"type":"D"}`, + target: DiscriminatedUnion{}, + shouldFail: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var dst DiscriminatedUnion + err := json.Unmarshal([]byte(test.raw), &dst) + if err != nil && !test.shouldFail { + t.Fatalf("failed unmarshal with err: %v", err) + } + if err == nil && test.shouldFail { + t.Fatalf("expected unmarshal to fail but it succeeded") + } + if !reflect.DeepEqual(dst, test.target) { + t.Fatalf("failed equality, got %#v but expected %#v", dst, test.target) + } + }) + } +} + +func TestMultiDiscriminatorUnion(t *testing.T) { + tests := map[string]struct { + raw string + target MultiDiscriminatorUnion + shouldFail bool + }{ + "foo_variant": { + raw: `{"type":"foo","value":"test"}`, + target: MultiDiscriminatorUnion{OfFoo: &FooVariant{ + Type: "foo", + Value: "test", + }}, + }, + "foo_v2_variant": { + raw: `{"type":"foo_v2","value":"test_v2"}`, + target: MultiDiscriminatorUnion{OfFoo: &FooVariant{ + Type: "foo_v2", + Value: "test_v2", + }}, + }, + "bar_variant": { + raw: `{"type":"bar","enable":true}`, + target: MultiDiscriminatorUnion{OfBar: &BarVariant{ + Type: "bar", + Enable: true, + }}, + }, + "bar_legacy_variant": { + raw: `{"type":"bar_legacy","enable":false}`, + target: MultiDiscriminatorUnion{OfBar: &BarVariant{ + Type: "bar_legacy", + Enable: false, + }}, + }, + "invalid_type": { + raw: `{"type":"unknown","value":"test"}`, + target: MultiDiscriminatorUnion{}, + shouldFail: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var dst MultiDiscriminatorUnion + err := json.Unmarshal([]byte(test.raw), &dst) + if err != nil && !test.shouldFail { + t.Fatalf("failed unmarshal with err: %v", err) + } + if err == nil && test.shouldFail { + t.Fatalf("expected unmarshal to fail but it succeeded") + } + if !reflect.DeepEqual(dst, test.target) { + t.Fatalf("failed equality, got %#v but expected %#v", dst, test.target) + } + }) + } +} diff --git a/sdks/go/internal/apijson/decoder.go b/sdks/go/internal/apijson/decoder.go new file mode 100644 index 00000000..95c9c287 --- /dev/null +++ b/sdks/go/internal/apijson/decoder.go @@ -0,0 +1,691 @@ +// The deserialization algorithm from apijson may be subject to improvements +// between minor versions, particularly with respect to calling [json.Unmarshal] +// into param unions. + +package apijson + +import ( + "encoding/json" + "fmt" + "github.com/kernel/hypeman-go/packages/param" + "reflect" + "strconv" + "sync" + "time" + "unsafe" + + "github.com/tidwall/gjson" +) + +// decoders is a synchronized map with roughly the following type: +// map[reflect.Type]decoderFunc +var decoders sync.Map + +// Unmarshal is similar to [encoding/json.Unmarshal] and parses the JSON-encoded +// data and stores it in the given pointer. +func Unmarshal(raw []byte, to any) error { + d := &decoderBuilder{dateFormat: time.RFC3339} + return d.unmarshal(raw, to) +} + +// UnmarshalRoot is like Unmarshal, but doesn't try to call MarshalJSON on the +// root element. Useful if a struct's UnmarshalJSON is overrode to use the +// behavior of this encoder versus the standard library. +func UnmarshalRoot(raw []byte, to any) error { + d := &decoderBuilder{dateFormat: time.RFC3339, root: true} + return d.unmarshal(raw, to) +} + +// decoderBuilder contains the 'compile-time' state of the decoder. +type decoderBuilder struct { + // Whether or not this is the first element and called by [UnmarshalRoot], see + // the documentation there to see why this is necessary. + root bool + // The dateFormat (a format string for [time.Format]) which is chosen by the + // last struct tag that was seen. + dateFormat string +} + +// decoderState contains the 'run-time' state of the decoder. +type decoderState struct { + strict bool + exactness exactness + validator *validationEntry +} + +// Exactness refers to how close to the type the result was if deserialization +// was successful. This is useful in deserializing unions, where you want to try +// each entry, first with strict, then with looser validation, without actually +// having to do a lot of redundant work by marshalling twice (or maybe even more +// times). +type exactness int8 + +const ( + // Some values had to fudged a bit, for example by converting a string to an + // int, or an enum with extra values. + loose exactness = iota + // There are some extra arguments, but other wise it matches the union. + extras + // Exactly right. + exact +) + +type decoderFunc func(node gjson.Result, value reflect.Value, state *decoderState) error + +type decoderField struct { + tag parsedStructTag + fn decoderFunc + idx []int + goname string +} + +type decoderEntry struct { + reflect.Type + dateFormat string + root bool +} + +func (d *decoderBuilder) unmarshal(raw []byte, to any) error { + value := reflect.ValueOf(to).Elem() + result := gjson.ParseBytes(raw) + if !value.IsValid() { + return fmt.Errorf("apijson: cannot marshal into invalid value") + } + return d.typeDecoder(value.Type())(result, value, &decoderState{strict: false, exactness: exact}) +} + +// unmarshalWithExactness is used for internal testing purposes. +func (d *decoderBuilder) unmarshalWithExactness(raw []byte, to any) (exactness, error) { + value := reflect.ValueOf(to).Elem() + result := gjson.ParseBytes(raw) + if !value.IsValid() { + return 0, fmt.Errorf("apijson: cannot marshal into invalid value") + } + state := decoderState{strict: false, exactness: exact} + err := d.typeDecoder(value.Type())(result, value, &state) + return state.exactness, err +} + +func (d *decoderBuilder) typeDecoder(t reflect.Type) decoderFunc { + entry := decoderEntry{ + Type: t, + dateFormat: d.dateFormat, + root: d.root, + } + + if fi, ok := decoders.Load(entry); ok { + return fi.(decoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f decoderFunc + ) + wg.Add(1) + fi, loaded := decoders.LoadOrStore(entry, decoderFunc(func(node gjson.Result, v reflect.Value, state *decoderState) error { + wg.Wait() + return f(node, v, state) + })) + if loaded { + return fi.(decoderFunc) + } + + // Compute the real decoder and replace the indirect func with it. + f = d.newTypeDecoder(t) + wg.Done() + decoders.Store(entry, f) + return f +} + +// validatedTypeDecoder wraps the type decoder with a validator. This is helpful +// for ensuring that enum fields are correct. +func (d *decoderBuilder) validatedTypeDecoder(t reflect.Type, entry *validationEntry) decoderFunc { + dec := d.typeDecoder(t) + if entry == nil { + return dec + } + + // Thread the current validation entry through the decoder, + // but clean up in time for the next field. + return func(node gjson.Result, v reflect.Value, state *decoderState) error { + state.validator = entry + err := dec(node, v, state) + state.validator = nil + return err + } +} + +func indirectUnmarshalerDecoder(n gjson.Result, v reflect.Value, state *decoderState) error { + return v.Addr().Interface().(json.Unmarshaler).UnmarshalJSON([]byte(n.Raw)) +} + +func unmarshalerDecoder(n gjson.Result, v reflect.Value, state *decoderState) error { + if v.Kind() == reflect.Pointer && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + return v.Interface().(json.Unmarshaler).UnmarshalJSON([]byte(n.Raw)) +} + +func (d *decoderBuilder) newTypeDecoder(t reflect.Type) decoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return d.newTimeTypeDecoder(t) + } + + if t.Implements(reflect.TypeOf((*param.Optional)(nil)).Elem()) { + return d.newOptTypeDecoder(t) + } + + if !d.root && t.Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()) { + return unmarshalerDecoder + } + if !d.root && reflect.PointerTo(t).Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()) { + if _, ok := unionVariants[t]; !ok { + return indirectUnmarshalerDecoder + } + } + d.root = false + + if _, ok := unionRegistry[t]; ok { + if isStructUnion(t) { + return d.newStructUnionDecoder(t) + } + return d.newUnionDecoder(t) + } + + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + innerDecoder := d.typeDecoder(inner) + + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + if !v.IsValid() { + return fmt.Errorf("apijson: unexpected invalid reflection value %+#v", v) + } + + newValue := reflect.New(inner).Elem() + err := innerDecoder(n, newValue, state) + if err != nil { + return err + } + + v.Set(newValue.Addr()) + return nil + } + case reflect.Struct: + if isStructUnion(t) { + return d.newStructUnionDecoder(t) + } + return d.newStructTypeDecoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return d.newArrayTypeDecoder(t) + case reflect.Map: + return d.newMapDecoder(t) + case reflect.Interface: + return func(node gjson.Result, value reflect.Value, state *decoderState) error { + if !value.IsValid() { + return fmt.Errorf("apijson: unexpected invalid value %+#v", value) + } + if node.Value() != nil && value.CanSet() { + value.Set(reflect.ValueOf(node.Value())) + } + return nil + } + default: + return d.newPrimitiveTypeDecoder(t) + } +} + +func (d *decoderBuilder) newMapDecoder(t reflect.Type) decoderFunc { + keyType := t.Key() + itemType := t.Elem() + itemDecoder := d.typeDecoder(itemType) + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + mapValue := reflect.MakeMapWithSize(t, len(node.Map())) + + node.ForEach(func(key, value gjson.Result) bool { + // It's fine for us to just use `ValueOf` here because the key types will + // always be primitive types so we don't need to decode it using the standard pattern + keyValue := reflect.ValueOf(key.Value()) + if !keyValue.IsValid() { + if err == nil { + err = fmt.Errorf("apijson: received invalid key type %v", keyValue.String()) + } + return false + } + if keyValue.Type() != keyType { + if err == nil { + err = fmt.Errorf("apijson: expected key type %v but got %v", keyType, keyValue.Type()) + } + return false + } + + itemValue := reflect.New(itemType).Elem() + itemerr := itemDecoder(value, itemValue, state) + if itemerr != nil { + if err == nil { + err = itemerr + } + return false + } + + mapValue.SetMapIndex(keyValue, itemValue) + return true + }) + + if err != nil { + return err + } + value.Set(mapValue) + return nil + } +} + +func (d *decoderBuilder) newArrayTypeDecoder(t reflect.Type) decoderFunc { + itemDecoder := d.typeDecoder(t.Elem()) + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + if !node.IsArray() { + return fmt.Errorf("apijson: could not deserialize to an array") + } + + arrayNode := node.Array() + + arrayValue := reflect.MakeSlice(reflect.SliceOf(t.Elem()), len(arrayNode), len(arrayNode)) + for i, itemNode := range arrayNode { + err = itemDecoder(itemNode, arrayValue.Index(i), state) + if err != nil { + return err + } + } + + value.Set(arrayValue) + return nil + } +} + +func (d *decoderBuilder) newStructTypeDecoder(t reflect.Type) decoderFunc { + // map of json field name to struct field decoders + decoderFields := map[string]decoderField{} + anonymousDecoders := []decoderField{} + extraDecoder := (*decoderField)(nil) + var inlineDecoders []decoderField + + validationEntries := validationRegistry[t] + + for i := 0; i < t.NumField(); i++ { + idx := []int{i} + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + + var validator *validationEntry + for _, entry := range validationEntries { + if entry.field.Offset == field.Offset { + validator = &entry + break + } + } + + // If this is an embedded struct, traverse one level deeper to extract + // the fields and get their encoders as well. + if field.Anonymous { + anonymousDecoders = append(anonymousDecoders, decoderField{ + fn: d.typeDecoder(field.Type), + idx: idx[:], + }) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + // We only want to support unexported fields if they're tagged with + // `extras` because that field shouldn't be part of the public API. + if ptag.extras { + extraDecoder = &decoderField{ptag, d.typeDecoder(field.Type.Elem()), idx, field.Name} + continue + } + if ptag.inline { + df := decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} + inlineDecoders = append(inlineDecoders, df) + continue + } + if ptag.metadata { + continue + } + + oldFormat := d.dateFormat + dateFormat, ok := parseFormatStructTag(field) + if ok { + switch dateFormat { + case "date-time": + d.dateFormat = time.RFC3339 + case "date": + d.dateFormat = "2006-01-02" + } + } + + decoderFields[ptag.name] = decoderField{ + ptag, + d.validatedTypeDecoder(field.Type, validator), + idx, field.Name, + } + + d.dateFormat = oldFormat + } + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + if field := value.FieldByName("JSON"); field.IsValid() { + if raw := field.FieldByName("raw"); raw.IsValid() { + setUnexportedField(raw, node.Raw) + } + } + + for _, decoder := range anonymousDecoders { + // ignore errors + decoder.fn(node, value.FieldByIndex(decoder.idx), state) + } + + for _, inlineDecoder := range inlineDecoders { + var meta Field + dest := value.FieldByIndex(inlineDecoder.idx) + isValid := false + if dest.IsValid() && node.Type != gjson.Null { + inlineState := decoderState{exactness: state.exactness, strict: true} + err = inlineDecoder.fn(node, dest, &inlineState) + if err == nil { + isValid = true + } + } + + if node.Type == gjson.Null { + meta = Field{ + raw: node.Raw, + status: null, + } + } else if !isValid { + // If an inline decoder fails, unset the field and move on. + if dest.IsValid() { + dest.SetZero() + } + continue + } else if isValid { + meta = Field{ + raw: node.Raw, + status: valid, + } + } + setMetadataSubField(value, inlineDecoder.idx, inlineDecoder.goname, meta) + } + + typedExtraType := reflect.Type(nil) + typedExtraFields := reflect.Value{} + if extraDecoder != nil { + typedExtraType = value.FieldByIndex(extraDecoder.idx).Type() + typedExtraFields = reflect.MakeMap(typedExtraType) + } + untypedExtraFields := map[string]Field{} + + for fieldName, itemNode := range node.Map() { + df, explicit := decoderFields[fieldName] + var ( + dest reflect.Value + fn decoderFunc + meta Field + ) + if explicit { + fn = df.fn + dest = value.FieldByIndex(df.idx) + } + if !explicit && extraDecoder != nil { + dest = reflect.New(typedExtraType.Elem()).Elem() + fn = extraDecoder.fn + } + + isValid := false + if dest.IsValid() && itemNode.Type != gjson.Null { + err = fn(itemNode, dest, state) + if err == nil { + isValid = true + } + } + + // Handle null [param.Opt] + if itemNode.Type == gjson.Null && dest.IsValid() && dest.Type().Implements(reflect.TypeOf((*param.Optional)(nil)).Elem()) { + dest.Addr().Interface().(json.Unmarshaler).UnmarshalJSON([]byte(itemNode.Raw)) + continue + } + + if itemNode.Type == gjson.Null { + meta = Field{ + raw: itemNode.Raw, + status: null, + } + } else if !isValid { + meta = Field{ + raw: itemNode.Raw, + status: invalid, + } + } else if isValid { + meta = Field{ + raw: itemNode.Raw, + status: valid, + } + } + + if explicit { + setMetadataSubField(value, df.idx, df.goname, meta) + } + if !explicit { + untypedExtraFields[fieldName] = meta + } + if !explicit && extraDecoder != nil { + typedExtraFields.SetMapIndex(reflect.ValueOf(fieldName), dest) + } + } + + if extraDecoder != nil && typedExtraFields.Len() > 0 { + value.FieldByIndex(extraDecoder.idx).Set(typedExtraFields) + } + + // Set exactness to 'extras' if there are untyped, extra fields. + if len(untypedExtraFields) > 0 && state.exactness > extras { + state.exactness = extras + } + + if len(untypedExtraFields) > 0 { + setMetadataExtraFields(value, []int{-1}, "ExtraFields", untypedExtraFields) + } + return nil + } +} + +func (d *decoderBuilder) newPrimitiveTypeDecoder(t reflect.Type) decoderFunc { + switch t.Kind() { + case reflect.String: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetString(n.String()) + if guardStrict(state, n.Type != gjson.String) { + return fmt.Errorf("apijson: failed to parse string strictly") + } + // Everything that is not an object can be loosely stringified. + if n.Type == gjson.JSON { + return fmt.Errorf("apijson: failed to parse string") + } + + state.validateString(v) + + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed string enum validation") + } + return nil + } + case reflect.Bool: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetBool(n.Bool()) + if guardStrict(state, n.Type != gjson.True && n.Type != gjson.False) { + return fmt.Errorf("apijson: failed to parse bool strictly") + } + // Numbers and strings that are either 'true' or 'false' can be loosely + // deserialized as bool. + if n.Type == gjson.String && (n.Raw != "true" && n.Raw != "false") || n.Type == gjson.JSON { + return fmt.Errorf("apijson: failed to parse bool") + } + + state.validateBool(v) + + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed bool enum validation") + } + return nil + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetInt(n.Int()) + if guardStrict(state, n.Type != gjson.Number || n.Num != float64(int(n.Num))) { + return fmt.Errorf("apijson: failed to parse int strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as numbers. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse int") + } + + state.validateInt(v) + + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed int enum validation") + } + return nil + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetUint(n.Uint()) + if guardStrict(state, n.Type != gjson.Number || n.Num != float64(int(n.Num)) || n.Num < 0) { + return fmt.Errorf("apijson: failed to parse uint strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as uint. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse uint") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed uint enum validation") + } + return nil + } + case reflect.Float32, reflect.Float64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetFloat(n.Float()) + if guardStrict(state, n.Type != gjson.Number) { + return fmt.Errorf("apijson: failed to parse float strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as floats. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse float") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed float enum validation") + } + return nil + } + default: + return func(node gjson.Result, v reflect.Value, state *decoderState) error { + return fmt.Errorf("unknown type received at primitive decoder: %s", t.String()) + } + } +} + +func (d *decoderBuilder) newOptTypeDecoder(t reflect.Type) decoderFunc { + for t.Kind() == reflect.Pointer { + t = t.Elem() + } + valueField, _ := t.FieldByName("Value") + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + state.validateOptKind(n, valueField.Type) + return v.Addr().Interface().(json.Unmarshaler).UnmarshalJSON([]byte(n.Raw)) + } +} + +func (d *decoderBuilder) newTimeTypeDecoder(t reflect.Type) decoderFunc { + format := d.dateFormat + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + parsed, err := time.Parse(format, n.Str) + if err == nil { + v.Set(reflect.ValueOf(parsed).Convert(t)) + return nil + } + + if guardStrict(state, true) { + return err + } + + layouts := []string{ + "2006-01-02", + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05Z0700", + "2006-01-02T15:04:05", + "2006-01-02 15:04:05Z07:00", + "2006-01-02 15:04:05Z0700", + "2006-01-02 15:04:05", + } + + for _, layout := range layouts { + parsed, err := time.Parse(layout, n.Str) + if err == nil { + v.Set(reflect.ValueOf(parsed).Convert(t)) + return nil + } + } + + return fmt.Errorf("unable to leniently parse date-time string: %s", n.Str) + } +} + +func setUnexportedField(field reflect.Value, value any) { + reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Set(reflect.ValueOf(value)) +} + +func guardStrict(state *decoderState, cond bool) bool { + if !cond { + return false + } + + if state.strict { + return true + } + + state.exactness = loose + return false +} + +func canParseAsNumber(str string) bool { + _, err := strconv.ParseFloat(str, 64) + return err == nil +} + +var stringType = reflect.TypeOf(string("")) + +func guardUnknown(state *decoderState, v reflect.Value) bool { + if have, ok := v.Interface().(interface{ IsKnown() bool }); guardStrict(state, ok && !have.IsKnown()) { + return true + } + + constantString, ok := v.Interface().(interface{ Default() string }) + named := v.Type() != stringType + if guardStrict(state, ok && named && v.Equal(reflect.ValueOf(constantString.Default()))) { + return true + } + return false +} diff --git a/sdks/go/internal/apijson/decoderesp_test.go b/sdks/go/internal/apijson/decoderesp_test.go new file mode 100644 index 00000000..b45af01d --- /dev/null +++ b/sdks/go/internal/apijson/decoderesp_test.go @@ -0,0 +1,30 @@ +package apijson_test + +import ( + "encoding/json" + "github.com/kernel/hypeman-go/internal/apijson" + "github.com/kernel/hypeman-go/packages/respjson" + "testing" +) + +type StructWithNullExtraField struct { + Results []string `json:"results" api:"required"` + JSON struct { + Results respjson.Field + ExtraFields map[string]respjson.Field + raw string + } `json:"-"` +} + +func (r *StructWithNullExtraField) UnmarshalJSON(data []byte) error { + return apijson.UnmarshalRoot(data, r) +} + +func TestDecodeWithNullExtraField(t *testing.T) { + raw := `{"something_else":null}` + var dst *StructWithNullExtraField + err := json.Unmarshal([]byte(raw), &dst) + if err != nil { + t.Fatalf("error: %s", err.Error()) + } +} diff --git a/sdks/go/internal/apijson/encoder.go b/sdks/go/internal/apijson/encoder.go new file mode 100644 index 00000000..ab7a3c15 --- /dev/null +++ b/sdks/go/internal/apijson/encoder.go @@ -0,0 +1,392 @@ +package apijson + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/tidwall/sjson" +) + +var encoders sync.Map // map[encoderEntry]encoderFunc + +// If we want to set a literal key value into JSON using sjson, we need to make sure it doesn't have +// special characters that sjson interprets as a path. +var EscapeSJSONKey = strings.NewReplacer("\\", "\\\\", "|", "\\|", "#", "\\#", "@", "\\@", "*", "\\*", ".", "\\.", ":", "\\:", "?", "\\?").Replace + +func Marshal(value any) ([]byte, error) { + e := &encoder{dateFormat: time.RFC3339} + return e.marshal(value) +} + +func MarshalRoot(value any) ([]byte, error) { + e := &encoder{root: true, dateFormat: time.RFC3339} + return e.marshal(value) +} + +type encoder struct { + dateFormat string + root bool +} + +type encoderFunc func(value reflect.Value) ([]byte, error) + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + root bool +} + +func (e *encoder) marshal(value any) ([]byte, error) { + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil, nil + } + typ := val.Type() + enc := e.typeEncoder(typ) + return enc(val) +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + root: e.root, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(v reflect.Value) ([]byte, error) { + wg.Wait() + return f(v) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func marshalerEncoder(v reflect.Value) ([]byte, error) { + return v.Interface().(json.Marshaler).MarshalJSON() +} + +func indirectMarshalerEncoder(v reflect.Value) ([]byte, error) { + return v.Addr().Interface().(json.Marshaler).MarshalJSON() +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder() + } + if !e.root && t.Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return marshalerEncoder + } + if !e.root && reflect.PointerTo(t).Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return indirectMarshalerEncoder + } + e.root = false + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.typeEncoder(inner) + return func(v reflect.Value) ([]byte, error) { + if !v.IsValid() || v.IsNil() { + return nil, nil + } + return innerEncoder(v.Elem()) + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + // Note that we could use `gjson` to encode these types but it would complicate our + // code more and this current code shouldn't cause any issues + case reflect.String: + return func(v reflect.Value) ([]byte, error) { + return json.Marshal(v.Interface()) + } + case reflect.Bool: + return func(v reflect.Value) ([]byte, error) { + if v.Bool() { + return []byte("true"), nil + } + return []byte("false"), nil + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatInt(v.Int(), 10)), nil + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatUint(v.Uint(), 10)), nil + } + case reflect.Float32: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatFloat(v.Float(), 'f', -1, 32)), nil + } + case reflect.Float64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatFloat(v.Float(), 'f', -1, 64)), nil + } + default: + return func(v reflect.Value) ([]byte, error) { + return nil, fmt.Errorf("unknown type received at primitive encoder: %s", t.String()) + } + } +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + itemEncoder := e.typeEncoder(t.Elem()) + + return func(value reflect.Value) ([]byte, error) { + json := []byte("[]") + for i := 0; i < value.Len(); i++ { + var value, err = itemEncoder(value.Index(i)) + if err != nil { + return nil, err + } + if value == nil { + // Assume that empty items should be inserted as `null` so that the output array + // will be the same length as the input array + value = []byte("null") + } + + json, err = sjson.SetRawBytes(json, "-1", value) + if err != nil { + return nil, err + } + } + + return json, nil + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + encoderFields := []encoderField{} + extraEncoder := (*encoderField)(nil) + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + // We only want to support unexported field if they're tagged with + // `extras` because that field shouldn't be part of the public API. We + // also want to only keep the top level extras + if ptag.extras && len(index) == 0 { + extraEncoder = &encoderField{ptag, e.typeEncoder(field.Type.Elem()), idx} + continue + } + if ptag.name == "-" { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + encoderFields = append(encoderFields, encoderField{ptag, e.typeEncoder(field.Type), idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + // Ensure deterministic output by sorting by lexicographic order + sort.Slice(encoderFields, func(i, j int) bool { + return encoderFields[i].tag.name < encoderFields[j].tag.name + }) + + return func(value reflect.Value) (json []byte, err error) { + json = []byte("{}") + + for _, ef := range encoderFields { + field := value.FieldByIndex(ef.idx) + encoded, err := ef.fn(field) + if err != nil { + return nil, err + } + if encoded == nil { + continue + } + json, err = sjson.SetRawBytes(json, EscapeSJSONKey(ef.tag.name), encoded) + if err != nil { + return nil, err + } + } + + if extraEncoder != nil { + json, err = e.encodeMapEntries(json, value.FieldByIndex(extraEncoder.idx)) + if err != nil { + return nil, err + } + } + return + } +} + +func (e *encoder) newFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + + return func(value reflect.Value) (json []byte, err error) { + present := value.FieldByName("Present") + if !present.Bool() { + return nil, nil + } + null := value.FieldByName("Null") + if null.Bool() { + return []byte("null"), nil + } + raw := value.FieldByName("Raw") + if !raw.IsNil() { + return e.typeEncoder(raw.Type())(raw) + } + return enc(value.FieldByName("Value")) + } +} + +func (e *encoder) newTimeTypeEncoder() encoderFunc { + format := e.dateFormat + return func(value reflect.Value) (json []byte, err error) { + return []byte(`"` + value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format) + `"`), nil + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(value reflect.Value) ([]byte, error) { + value = value.Elem() + if !value.IsValid() { + return nil, nil + } + return e.typeEncoder(value.Type())(value) + } +} + +// Given a []byte of json (may either be an empty object or an object that already contains entries) +// encode all of the entries in the map to the json byte array. +func (e *encoder) encodeMapEntries(json []byte, v reflect.Value) ([]byte, error) { + type mapPair struct { + key []byte + value reflect.Value + } + + pairs := []mapPair{} + keyEncoder := e.typeEncoder(v.Type().Key()) + + iter := v.MapRange() + for iter.Next() { + var encodedKeyString string + if iter.Key().Type().Kind() == reflect.String { + encodedKeyString = iter.Key().String() + } else { + var err error + encodedKeyBytes, err := keyEncoder(iter.Key()) + if err != nil { + return nil, err + } + encodedKeyString = string(encodedKeyBytes) + } + encodedKey := []byte(encodedKeyString) + pairs = append(pairs, mapPair{key: encodedKey, value: iter.Value()}) + } + + // Ensure deterministic output + sort.Slice(pairs, func(i, j int) bool { + return bytes.Compare(pairs[i].key, pairs[j].key) < 0 + }) + + elementEncoder := e.typeEncoder(v.Type().Elem()) + for _, p := range pairs { + encodedValue, err := elementEncoder(p.value) + if err != nil { + return nil, err + } + if len(encodedValue) == 0 { + continue + } + json, err = sjson.SetRawBytes(json, EscapeSJSONKey(string(p.key)), encodedValue) + if err != nil { + return nil, err + } + } + + return json, nil +} + +func (e *encoder) newMapEncoder(_ reflect.Type) encoderFunc { + return func(value reflect.Value) ([]byte, error) { + json := []byte("{}") + var err error + json, err = e.encodeMapEntries(json, value) + if err != nil { + return nil, err + } + return json, nil + } +} diff --git a/sdks/go/internal/apijson/enum.go b/sdks/go/internal/apijson/enum.go new file mode 100644 index 00000000..5bef11c3 --- /dev/null +++ b/sdks/go/internal/apijson/enum.go @@ -0,0 +1,145 @@ +package apijson + +import ( + "fmt" + "reflect" + "slices" + "sync" + + "github.com/tidwall/gjson" +) + +/********************/ +/* Validating Enums */ +/********************/ + +type validationEntry struct { + field reflect.StructField + required bool + legalValues struct { + strings []string + // 1 represents true, 0 represents false, -1 represents either + bools int + ints []int64 + } +} + +type validatorFunc func(reflect.Value) exactness + +var validators sync.Map +var validationRegistry = map[reflect.Type][]validationEntry{} + +func RegisterFieldValidator[T any, V string | bool | int | float64](fieldName string, values ...V) { + var t T + parentType := reflect.TypeOf(t) + + if _, ok := validationRegistry[parentType]; !ok { + validationRegistry[parentType] = []validationEntry{} + } + + // The following checks run at initialization time, + // it is impossible for them to panic if any tests pass. + if parentType.Kind() != reflect.Struct { + panic(fmt.Sprintf("apijson: cannot initialize validator for non-struct %s", parentType.String())) + } + + var field reflect.StructField + found := false + for i := 0; i < parentType.NumField(); i++ { + ptag, ok := parseJSONStructTag(parentType.Field(i)) + if ok && ptag.name == fieldName { + field = parentType.Field(i) + found = true + break + } + } + + if !found { + panic(fmt.Sprintf("apijson: cannot find field %s in struct %s", fieldName, parentType.String())) + } + + newEntry := validationEntry{field: field} + newEntry.legalValues.bools = -1 // default to either + + switch values := any(values).(type) { + case []string: + newEntry.legalValues.strings = values + case []int: + newEntry.legalValues.ints = make([]int64, len(values)) + for i, value := range values { + newEntry.legalValues.ints[i] = int64(value) + } + case []bool: + for i, value := range values { + var next int + if value { + next = 1 + } + if i > 0 && newEntry.legalValues.bools != next { + newEntry.legalValues.bools = -1 // accept either + break + } + newEntry.legalValues.bools = next + } + } + + // Store the information necessary to create a validator, so that we can use it + // lazily create the validator function when did. + validationRegistry[parentType] = append(validationRegistry[parentType], newEntry) +} + +func (state *decoderState) validateString(v reflect.Value) { + if state.validator == nil { + return + } + if !slices.Contains(state.validator.legalValues.strings, v.String()) { + state.exactness = loose + } +} + +func (state *decoderState) validateInt(v reflect.Value) { + if state.validator == nil { + return + } + if !slices.Contains(state.validator.legalValues.ints, v.Int()) { + state.exactness = loose + } +} + +func (state *decoderState) validateBool(v reflect.Value) { + if state.validator == nil { + return + } + b := v.Bool() + if state.validator.legalValues.bools == 1 && b == false { + state.exactness = loose + } else if state.validator.legalValues.bools == 0 && b == true { + state.exactness = loose + } +} + +func (state *decoderState) validateOptKind(node gjson.Result, t reflect.Type) { + switch node.Type { + case gjson.JSON: + state.exactness = loose + case gjson.Null: + return + case gjson.False, gjson.True: + if t.Kind() != reflect.Bool { + state.exactness = loose + } + case gjson.Number: + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + return + default: + state.exactness = loose + } + case gjson.String: + if t.Kind() != reflect.String { + state.exactness = loose + } + } +} diff --git a/sdks/go/internal/apijson/enum_test.go b/sdks/go/internal/apijson/enum_test.go new file mode 100644 index 00000000..a2aeed44 --- /dev/null +++ b/sdks/go/internal/apijson/enum_test.go @@ -0,0 +1,87 @@ +package apijson + +import ( + "reflect" + "testing" +) + +type EnumStruct struct { + NormalString string `json:"normal_string"` + StringEnum string `json:"string_enum"` + NamedEnum NamedEnumType `json:"named_enum"` + + IntEnum int `json:"int_enum"` + BoolEnum bool `json:"bool_enum"` + + WeirdBoolEnum bool `json:"weird_bool_enum"` +} + +func (o *EnumStruct) UnmarshalJSON(data []byte) error { + return UnmarshalRoot(data, o) +} + +func init() { + RegisterFieldValidator[EnumStruct]("string_enum", "one", "two", "three") + RegisterFieldValidator[EnumStruct]("int_enum", 200, 404) + RegisterFieldValidator[EnumStruct]("bool_enum", false) + RegisterFieldValidator[EnumStruct]("weird_bool_enum", true, false) +} + +type NamedEnumType string + +const ( + NamedEnumOne NamedEnumType = "one" + NamedEnumTwo NamedEnumType = "two" + NamedEnumThree NamedEnumType = "three" +) + +func (e NamedEnumType) IsKnown() bool { + return e == NamedEnumOne || e == NamedEnumTwo || e == NamedEnumThree +} + +func TestEnumStructStringValidator(t *testing.T) { + cases := map[string]struct { + exactness + EnumStruct + }{ + `{"string_enum":"one"}`: {exact, EnumStruct{StringEnum: "one"}}, + `{"string_enum":"two"}`: {exact, EnumStruct{StringEnum: "two"}}, + `{"string_enum":"three"}`: {exact, EnumStruct{StringEnum: "three"}}, + `{"string_enum":"none"}`: {loose, EnumStruct{StringEnum: "none"}}, + `{"int_enum":200}`: {exact, EnumStruct{IntEnum: 200}}, + `{"int_enum":404}`: {exact, EnumStruct{IntEnum: 404}}, + `{"int_enum":500}`: {loose, EnumStruct{IntEnum: 500}}, + `{"bool_enum":false}`: {exact, EnumStruct{BoolEnum: false}}, + `{"bool_enum":true}`: {loose, EnumStruct{BoolEnum: true}}, + `{"weird_bool_enum":true}`: {exact, EnumStruct{WeirdBoolEnum: true}}, + `{"weird_bool_enum":false}`: {exact, EnumStruct{WeirdBoolEnum: false}}, + + `{"named_enum":"one"}`: {exact, EnumStruct{NamedEnum: NamedEnumOne}}, + `{"named_enum":"none"}`: {loose, EnumStruct{NamedEnum: "none"}}, + + `{"string_enum":"one","named_enum":"one"}`: {exact, EnumStruct{NamedEnum: "one", StringEnum: "one"}}, + `{"string_enum":"four","named_enum":"one"}`: { + loose, + EnumStruct{NamedEnum: "one", StringEnum: "four"}, + }, + `{"string_enum":"one","named_enum":"four"}`: { + loose, EnumStruct{NamedEnum: "four", StringEnum: "one"}, + }, + `{"wrong_key":"one"}`: {extras, EnumStruct{StringEnum: ""}}, + } + + for raw, expected := range cases { + var dst EnumStruct + + dec := decoderBuilder{root: true} + exactness, _ := dec.unmarshalWithExactness([]byte(raw), &dst) + + if !reflect.DeepEqual(dst, expected.EnumStruct) { + t.Fatalf("failed equality check %#v", dst) + } + + if exactness != expected.exactness { + t.Fatalf("exactness got %d expected %d %s", exactness, expected.exactness, raw) + } + } +} diff --git a/sdks/go/internal/apijson/field.go b/sdks/go/internal/apijson/field.go new file mode 100644 index 00000000..854d6dd7 --- /dev/null +++ b/sdks/go/internal/apijson/field.go @@ -0,0 +1,23 @@ +package apijson + +type status uint8 + +const ( + missing status = iota + null + invalid + valid +) + +type Field struct { + raw string + status status +} + +// Returns true if the field is explicitly `null` _or_ if it is not present at all (ie, missing). +// To check if the field's key is present in the JSON with an explicit null value, +// you must check `f.IsNull() && !f.IsMissing()`. +func (j Field) IsNull() bool { return j.status <= null } +func (j Field) IsMissing() bool { return j.status == missing } +func (j Field) IsInvalid() bool { return j.status == invalid } +func (j Field) Raw() string { return j.raw } diff --git a/sdks/go/internal/apijson/json_test.go b/sdks/go/internal/apijson/json_test.go new file mode 100644 index 00000000..fac9fcce --- /dev/null +++ b/sdks/go/internal/apijson/json_test.go @@ -0,0 +1,616 @@ +package apijson + +import ( + "reflect" + "strings" + "testing" + "time" + + "github.com/tidwall/gjson" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `json:"a"` + B int `json:"b"` + C uint `json:"c"` + D float64 `json:"d"` + E float32 `json:"e"` + F []int `json:"f"` +} + +type PrimitivePointers struct { + A *bool `json:"a"` + B *int `json:"b"` + C *uint `json:"c"` + D *float64 `json:"d"` + E *float32 `json:"e"` + F *[]int `json:"f"` +} + +type Slices struct { + Slice []Primitives `json:"slices"` +} + +type DateTime struct { + Date time.Time `json:"date" format:"date"` + DateTime time.Time `json:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `json:"a"` + ExtraFields map[string]any `json:"-" api:"extrafields"` +} + +type TypedAdditionalProperties struct { + A bool `json:"a"` + ExtraFields map[string]int `json:"-" api:"extrafields"` +} + +type EmbeddedStruct struct { + A bool `json:"a"` + B string `json:"b"` + + JSON EmbeddedStructJSON +} + +type EmbeddedStructJSON struct { + A Field + B Field + ExtraFields map[string]Field + raw string +} + +type EmbeddedStructs struct { + EmbeddedStruct + A *int `json:"a"` + ExtraFields map[string]any `json:"-" api:"extrafields"` + + JSON EmbeddedStructsJSON +} + +type EmbeddedStructsJSON struct { + A Field + ExtraFields map[string]Field + raw string +} + +type Recursive struct { + Name string `json:"name"` + Child *Recursive `json:"child"` +} + +type JSONFieldStruct struct { + A bool `json:"a"` + B int64 `json:"b"` + C string `json:"c"` + D string `json:"d"` + ExtraFields map[string]int64 `json:"" api:"extrafields"` + JSON JSONFieldStructJSON `json:",metadata"` +} + +type JSONFieldStructJSON struct { + A Field + B Field + C Field + D Field + ExtraFields map[string]Field + raw string +} + +type UnknownStruct struct { + Unknown any `json:"unknown"` +} + +type UnionStruct struct { + Union Union `json:"union" format:"date"` +} + +type Union interface { + union() +} + +type Inline struct { + InlineField Primitives `json:",inline"` + JSON InlineJSON `json:",metadata"` +} + +type InlineArray struct { + InlineField []string `json:",inline"` + JSON InlineJSON `json:",metadata"` +} + +type InlineJSON struct { + InlineField Field + raw string +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionStructA struct { + Type string `json:"type"` + A string `json:"a"` + B string `json:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `json:"type"` + A string `json:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +func init() { + RegisterUnion[Union]("type", + UnionVariant{ + TypeFilter: gjson.String, + Type: reflect.TypeOf(UnionTime{}), + }, + UnionVariant{ + TypeFilter: gjson.Number, + Type: reflect.TypeOf(UnionInteger(0)), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + DiscriminatorValue: "typeA", + Type: reflect.TypeOf(UnionStructA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + DiscriminatorValue: "typeB", + Type: reflect.TypeOf(UnionStructB{}), + }, + ) +} + +type ComplexUnionStruct struct { + Union ComplexUnion `json:"union"` +} + +type ComplexUnion interface { + complexUnion() +} + +type ComplexUnionA struct { + Boo string `json:"boo"` + Foo bool `json:"foo"` +} + +func (ComplexUnionA) complexUnion() {} + +type ComplexUnionB struct { + Boo bool `json:"boo"` + Foo string `json:"foo"` +} + +func (ComplexUnionB) complexUnion() {} + +type ComplexUnionC struct { + Boo int64 `json:"boo"` +} + +func (ComplexUnionC) complexUnion() {} + +type ComplexUnionTypeA struct { + Baz int64 `json:"baz"` + Type TypeA `json:"type"` +} + +func (ComplexUnionTypeA) complexUnion() {} + +type TypeA string + +func (t TypeA) IsKnown() bool { + return t == "a" +} + +type ComplexUnionTypeB struct { + Baz int64 `json:"baz"` + Type TypeB `json:"type"` +} + +type TypeB string + +func (t TypeB) IsKnown() bool { + return t == "b" +} + +type UnmarshalStruct struct { + Foo string `json:"foo"` + prop bool `json:"-"` +} + +func (r *UnmarshalStruct) UnmarshalJSON(json []byte) error { + r.prop = true + return UnmarshalRoot(json, r) +} + +func (ComplexUnionTypeB) complexUnion() {} + +func init() { + RegisterUnion[ComplexUnion]("", + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionB{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionC{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionTypeA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionTypeB{}), + }, + ) +} + +type MarshallingUnionStruct struct { + Union MarshallingUnion +} + +func (r *MarshallingUnionStruct) UnmarshalJSON(data []byte) (err error) { + *r = MarshallingUnionStruct{} + err = UnmarshalRoot(data, &r.Union) + return +} + +func (r MarshallingUnionStruct) MarshalJSON() (data []byte, err error) { + return MarshalRoot(r.Union) +} + +type MarshallingUnion interface { + marshallingUnion() +} + +type MarshallingUnionA struct { + Boo string `json:"boo"` +} + +func (MarshallingUnionA) marshallingUnion() {} + +func (r *MarshallingUnionA) UnmarshalJSON(data []byte) (err error) { + return UnmarshalRoot(data, r) +} + +type MarshallingUnionB struct { + Foo string `json:"foo"` +} + +func (MarshallingUnionB) marshallingUnion() {} + +func (r *MarshallingUnionB) UnmarshalJSON(data []byte) (err error) { + return UnmarshalRoot(data, r) +} + +func init() { + RegisterUnion[MarshallingUnion]( + "", + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(MarshallingUnionA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(MarshallingUnionB{}), + }, + ) +} + +var tests = map[string]struct { + buf string + val any +}{ + "true": {"true", true}, + "false": {"false", false}, + "int": {"1", 1}, + "int_bigger": {"12324", 12324}, + "int_string_coerce": {`"65"`, 65}, + "int_boolean_coerce": {"true", 1}, + "int64": {"1", int64(1)}, + "int64_huge": {"123456789123456789", int64(123456789123456789)}, + "uint": {"1", uint(1)}, + "uint_bigger": {"12324", uint(12324)}, + "uint_coerce": {`"65"`, uint(65)}, + "float_1.54": {"1.54", float32(1.54)}, + "float_1.89": {"1.89", float64(1.89)}, + "string": {`"str"`, "str"}, + "string_int_coerce": {`12`, "12"}, + "array_string": {`["foo","bar"]`, []string{"foo", "bar"}}, + "array_int": {`[1,2]`, []int{1, 2}}, + "array_int_coerce": {`["1",2]`, []int{1, 2}}, + + "ptr_true": {"true", P(true)}, + "ptr_false": {"false", P(false)}, + "ptr_int": {"1", P(1)}, + "ptr_int_bigger": {"12324", P(12324)}, + "ptr_int_string_coerce": {`"65"`, P(65)}, + "ptr_int_boolean_coerce": {"true", P(1)}, + "ptr_int64": {"1", P(int64(1))}, + "ptr_int64_huge": {"123456789123456789", P(int64(123456789123456789))}, + "ptr_uint": {"1", P(uint(1))}, + "ptr_uint_bigger": {"12324", P(uint(12324))}, + "ptr_uint_coerce": {`"65"`, P(uint(65))}, + "ptr_float_1.54": {"1.54", P(float32(1.54))}, + "ptr_float_1.89": {"1.89", P(float64(1.89))}, + + "date_time": {`"2007-03-01T13:00:00Z"`, time.Date(2007, time.March, 1, 13, 0, 0, 0, time.UTC)}, + "date_time_nano_coerce": {`"2007-03-01T13:03:05.123456789Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 123456789, time.UTC)}, + + "date_time_missing_t_coerce": {`"2007-03-01 13:03:05Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.UTC)}, + "date_time_missing_timezone_coerce": {`"2007-03-01T13:03:05"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.UTC)}, + // note: using -1200 to minimize probability of conflicting with the local timezone of the test runner + // see https://en.wikipedia.org/wiki/UTC%E2%88%9212:00 + "date_time_missing_timezone_colon_coerce": {`"2007-03-01T13:03:05-1200"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.FixedZone("", -12*60*60))}, + "date_time_nano_missing_t_coerce": {`"2007-03-01 13:03:05.123456789Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 123456789, time.UTC)}, + + "map_string": {`{"foo":"bar"}`, map[string]string{"foo": "bar"}}, + "map_string_with_sjson_path_chars": {`{":a.b.c*:d*-1e.f":"bar"}`, map[string]string{":a.b.c*:d*-1e.f": "bar"}}, + "map_interface": {`{"a":1,"b":"str","c":false}`, map[string]any{"a": float64(1), "b": "str", "c": false}}, + + "primitive_struct": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}`, + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + + "slices": { + `{"slices":[{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}]}`, + Slices{ + Slice: []Primitives{{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}}, + }, + }, + + "primitive_pointer_struct": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4,5]}`, + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + }, + + "datetime_struct": { + `{"date":"2006-01-02","date-time":"2006-01-02T15:04:05Z"}`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + }, + + "additional_properties": { + `{"a":true,"bar":"value","foo":true}`, + AdditionalProperties{ + A: true, + ExtraFields: map[string]any{ + "bar": "value", + "foo": true, + }, + }, + }, + + "embedded_struct": { + `{"a":1,"b":"bar"}`, + EmbeddedStructs{ + EmbeddedStruct: EmbeddedStruct{ + A: true, + B: "bar", + JSON: EmbeddedStructJSON{ + A: Field{raw: `1`, status: valid}, + B: Field{raw: `"bar"`, status: valid}, + raw: `{"a":1,"b":"bar"}`, + }, + }, + A: P(1), + ExtraFields: map[string]any{"b": "bar"}, + JSON: EmbeddedStructsJSON{ + A: Field{raw: `1`, status: valid}, + ExtraFields: map[string]Field{ + "b": {raw: `"bar"`, status: valid}, + }, + raw: `{"a":1,"b":"bar"}`, + }, + }, + }, + + "recursive_struct": { + `{"child":{"name":"Alex"},"name":"Robert"}`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + }, + + "metadata_coerce": { + `{"a":"12","b":"12","c":null,"extra_typed":12,"extra_untyped":{"foo":"bar"}}`, + JSONFieldStruct{ + A: false, + B: 12, + C: "", + JSON: JSONFieldStructJSON{ + raw: `{"a":"12","b":"12","c":null,"extra_typed":12,"extra_untyped":{"foo":"bar"}}`, + A: Field{raw: `"12"`, status: invalid}, + B: Field{raw: `"12"`, status: valid}, + C: Field{raw: "null", status: null}, + D: Field{raw: "", status: missing}, + ExtraFields: map[string]Field{ + "extra_typed": { + raw: "12", + status: valid, + }, + "extra_untyped": { + raw: `{"foo":"bar"}`, + status: invalid, + }, + }, + }, + ExtraFields: map[string]int64{ + "extra_typed": 12, + "extra_untyped": 0, + }, + }, + }, + + "unknown_struct_number": { + `{"unknown":12}`, + UnknownStruct{ + Unknown: 12., + }, + }, + + "unknown_struct_map": { + `{"unknown":{"foo":"bar"}}`, + UnknownStruct{ + Unknown: map[string]any{ + "foo": "bar", + }, + }, + }, + + "union_integer": { + `{"union":12}`, + UnionStruct{ + Union: UnionInteger(12), + }, + }, + + "union_struct_discriminated_a": { + `{"union":{"a":"foo","b":"bar","type":"typeA"}}`, + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + }, + + "union_struct_discriminated_b": { + `{"union":{"a":"foo","type":"typeB"}}`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + }, + + "union_struct_time": { + `{"union":"2010-05-23"}`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + }, + + "complex_union_a": { + `{"union":{"boo":"12","foo":true}}`, + ComplexUnionStruct{Union: ComplexUnionA{Boo: "12", Foo: true}}, + }, + + "complex_union_b": { + `{"union":{"boo":true,"foo":"12"}}`, + ComplexUnionStruct{Union: ComplexUnionB{Boo: true, Foo: "12"}}, + }, + + "complex_union_c": { + `{"union":{"boo":12}}`, + ComplexUnionStruct{Union: ComplexUnionC{Boo: 12}}, + }, + + "complex_union_type_a": { + `{"union":{"baz":12,"type":"a"}}`, + ComplexUnionStruct{Union: ComplexUnionTypeA{Baz: 12, Type: TypeA("a")}}, + }, + + "complex_union_type_b": { + `{"union":{"baz":12,"type":"b"}}`, + ComplexUnionStruct{Union: ComplexUnionTypeB{Baz: 12, Type: TypeB("b")}}, + }, + + "marshalling_union_a": { + `{"boo":"hello"}`, + MarshallingUnionStruct{Union: MarshallingUnionA{Boo: "hello"}}, + }, + "marshalling_union_b": { + `{"foo":"hi"}`, + MarshallingUnionStruct{Union: MarshallingUnionB{Foo: "hi"}}, + }, + + "unmarshal": { + `{"foo":"hello"}`, + &UnmarshalStruct{Foo: "hello", prop: true}, + }, + + "array_of_unmarshal": { + `[{"foo":"hello"}]`, + []UnmarshalStruct{{Foo: "hello", prop: true}}, + }, + + "inline_coerce": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}`, + Inline{ + InlineField: Primitives{A: false, B: 237628372683, C: 0x28e, D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + JSON: InlineJSON{ + InlineField: Field{raw: "{\"a\":false,\"b\":237628372683,\"c\":654,\"d\":9999.43,\"e\":43.76,\"f\":[1,2,3,4]}", status: 3}, + raw: "{\"a\":false,\"b\":237628372683,\"c\":654,\"d\":9999.43,\"e\":43.76,\"f\":[1,2,3,4]}", + }, + }, + }, + + "inline_array_coerce": { + `["Hello","foo","bar"]`, + InlineArray{ + InlineField: []string{"Hello", "foo", "bar"}, + JSON: InlineJSON{ + InlineField: Field{raw: `["Hello","foo","bar"]`, status: 3}, + raw: `["Hello","foo","bar"]`, + }, + }, + }, +} + +func TestDecode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + result := reflect.New(reflect.TypeOf(test.val)) + if err := Unmarshal([]byte(test.buf), result.Interface()); err != nil { + t.Fatalf("deserialization of %v failed with error %v", result, err) + } + if !reflect.DeepEqual(result.Elem().Interface(), test.val) { + t.Fatalf("expected '%s' to deserialize to \n%#v\nbut got\n%#v", test.buf, test.val, result.Elem().Interface()) + } + }) + } +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + if strings.HasSuffix(name, "_coerce") { + continue + } + t.Run(name, func(t *testing.T) { + raw, err := Marshal(test.val) + if err != nil { + t.Fatalf("serialization of %v failed with error %v", test.val, err) + } + if string(raw) != test.buf { + t.Fatalf("expected %+#v to serialize to %s but got %s", test.val, test.buf, string(raw)) + } + }) + } +} diff --git a/sdks/go/internal/apijson/port.go b/sdks/go/internal/apijson/port.go new file mode 100644 index 00000000..b40013c1 --- /dev/null +++ b/sdks/go/internal/apijson/port.go @@ -0,0 +1,120 @@ +package apijson + +import ( + "fmt" + "reflect" +) + +// Port copies over values from one struct to another struct. +func Port(from any, to any) error { + toVal := reflect.ValueOf(to) + fromVal := reflect.ValueOf(from) + + if toVal.Kind() != reflect.Ptr || toVal.IsNil() { + return fmt.Errorf("destination must be a non-nil pointer") + } + + for toVal.Kind() == reflect.Ptr { + toVal = toVal.Elem() + } + toType := toVal.Type() + + for fromVal.Kind() == reflect.Ptr { + fromVal = fromVal.Elem() + } + fromType := fromVal.Type() + + if toType.Kind() != reflect.Struct { + return fmt.Errorf("destination must be a non-nil pointer to a struct (%v %v)", toType, toType.Kind()) + } + + values := map[string]reflect.Value{} + fields := map[string]reflect.Value{} + + fromJSON := fromVal.FieldByName("JSON") + toJSON := toVal.FieldByName("JSON") + + // Iterate through the fields of v and load all the "normal" fields in the struct to the map of + // string to reflect.Value, as well as their raw .JSON.Foo counterpart indicated by j. + var getFields func(t reflect.Type, v reflect.Value) + getFields = func(t reflect.Type, v reflect.Value) { + j := v.FieldByName("JSON") + + // Recurse into anonymous fields first, since the fields on the object should win over the fields in the + // embedded object. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.Anonymous { + getFields(field.Type, v.Field(i)) + continue + } + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + ptag, ok := parseJSONStructTag(field) + if !ok || ptag.name == "-" || ptag.name == "" { + continue + } + values[ptag.name] = v.Field(i) + if j.IsValid() { + fields[ptag.name] = j.FieldByName(field.Name) + } + } + } + getFields(fromType, fromVal) + + // Use the values from the previous step to populate the 'to' struct. + for i := 0; i < toType.NumField(); i++ { + field := toType.Field(i) + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + if ptag.name == "-" { + continue + } + if value, ok := values[ptag.name]; ok { + delete(values, ptag.name) + if field.Type.Kind() == reflect.Interface { + toVal.Field(i).Set(value) + } else { + switch value.Kind() { + case reflect.String: + toVal.Field(i).SetString(value.String()) + case reflect.Bool: + toVal.Field(i).SetBool(value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + toVal.Field(i).SetInt(value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + toVal.Field(i).SetUint(value.Uint()) + case reflect.Float32, reflect.Float64: + toVal.Field(i).SetFloat(value.Float()) + default: + toVal.Field(i).Set(value) + } + } + } + + if fromJSONField, ok := fields[ptag.name]; ok { + if toJSONField := toJSON.FieldByName(field.Name); toJSONField.IsValid() { + toJSONField.Set(fromJSONField) + } + } + } + + // Finally, copy over the .JSON.raw and .JSON.ExtraFields + if toJSON.IsValid() { + if raw := toJSON.FieldByName("raw"); raw.IsValid() { + setUnexportedField(raw, fromJSON.Interface().(interface{ RawJSON() string }).RawJSON()) + } + + if toExtraFields := toJSON.FieldByName("ExtraFields"); toExtraFields.IsValid() { + if fromExtraFields := fromJSON.FieldByName("ExtraFields"); fromExtraFields.IsValid() { + setUnexportedField(toExtraFields, fromExtraFields.Interface()) + } + } + } + + return nil +} diff --git a/sdks/go/internal/apijson/port_test.go b/sdks/go/internal/apijson/port_test.go new file mode 100644 index 00000000..bb01f1ad --- /dev/null +++ b/sdks/go/internal/apijson/port_test.go @@ -0,0 +1,257 @@ +package apijson + +import ( + "reflect" + "testing" +) + +type Metadata struct { + CreatedAt string `json:"created_at"` +} + +// Card is the "combined" type of CardVisa and CardMastercard +type Card struct { + Processor CardProcessor `json:"processor"` + Data any `json:"data"` + IsFoo bool `json:"is_foo"` + IsBar bool `json:"is_bar"` + Metadata Metadata `json:"metadata"` + Value any `json:"value"` + + JSON cardJSON +} + +type cardJSON struct { + Processor Field + Data Field + IsFoo Field + IsBar Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardJSON) RawJSON() string { return r.raw } + +type CardProcessor string + +// CardVisa +type CardVisa struct { + Processor CardVisaProcessor `json:"processor"` + Data CardVisaData `json:"data"` + IsFoo bool `json:"is_foo"` + Metadata Metadata `json:"metadata"` + Value string `json:"value"` + + JSON cardVisaJSON +} + +type cardVisaJSON struct { + Processor Field + Data Field + IsFoo Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardVisaJSON) RawJSON() string { return r.raw } + +type CardVisaProcessor string + +type CardVisaData struct { + Foo string `json:"foo"` +} + +// CardMastercard +type CardMastercard struct { + Processor CardMastercardProcessor `json:"processor"` + Data CardMastercardData `json:"data"` + IsBar bool `json:"is_bar"` + Metadata Metadata `json:"metadata"` + Value bool `json:"value"` + + JSON cardMastercardJSON +} + +type cardMastercardJSON struct { + Processor Field + Data Field + IsBar Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardMastercardJSON) RawJSON() string { return r.raw } + +type CardMastercardProcessor string + +type CardMastercardData struct { + Bar int64 `json:"bar"` +} + +type CommonFields struct { + Metadata Metadata `json:"metadata"` + Value string `json:"value"` + + JSON commonFieldsJSON +} + +type commonFieldsJSON struct { + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +type CardEmbedded struct { + CommonFields + Processor CardVisaProcessor `json:"processor"` + Data CardVisaData `json:"data"` + IsFoo bool `json:"is_foo"` + + JSON cardEmbeddedJSON +} + +type cardEmbeddedJSON struct { + Processor Field + Data Field + IsFoo Field + ExtraFields map[string]Field + raw string +} + +func (r cardEmbeddedJSON) RawJSON() string { return r.raw } + +var portTests = map[string]struct { + from any + to any +}{ + "visa to card": { + CardVisa{ + Processor: "visa", + IsFoo: true, + Data: CardVisaData{ + Foo: "foo", + }, + Metadata: Metadata{ + CreatedAt: "Mar 29 2024", + }, + Value: "value", + JSON: cardVisaJSON{ + raw: `{"processor":"visa","is_foo":true,"data":{"foo":"foo"}}`, + Processor: Field{raw: `"visa"`, status: valid}, + IsFoo: Field{raw: `true`, status: valid}, + Data: Field{raw: `{"foo":"foo"}`, status: valid}, + Value: Field{raw: `"value"`, status: valid}, + ExtraFields: map[string]Field{"extra": {raw: `"yo"`, status: valid}}, + }, + }, + Card{ + Processor: "visa", + IsFoo: true, + IsBar: false, + Data: CardVisaData{ + Foo: "foo", + }, + Metadata: Metadata{ + CreatedAt: "Mar 29 2024", + }, + Value: "value", + JSON: cardJSON{ + raw: `{"processor":"visa","is_foo":true,"data":{"foo":"foo"}}`, + Processor: Field{raw: `"visa"`, status: valid}, + IsFoo: Field{raw: `true`, status: valid}, + Data: Field{raw: `{"foo":"foo"}`, status: valid}, + Value: Field{raw: `"value"`, status: valid}, + ExtraFields: map[string]Field{"extra": {raw: `"yo"`, status: valid}}, + }, + }, + }, + "mastercard to card": { + CardMastercard{ + Processor: "mastercard", + IsBar: true, + Data: CardMastercardData{ + Bar: 13, + }, + Value: false, + }, + Card{ + Processor: "mastercard", + IsFoo: false, + IsBar: true, + Data: CardMastercardData{ + Bar: 13, + }, + Value: false, + }, + }, + "embedded to card": { + CardEmbedded{ + CommonFields: CommonFields{ + Metadata: Metadata{ + CreatedAt: "Mar 29 2024", + }, + Value: "embedded_value", + JSON: commonFieldsJSON{ + Metadata: Field{raw: `{"created_at":"Mar 29 2024"}`, status: valid}, + Value: Field{raw: `"embedded_value"`, status: valid}, + raw: `should not matter`, + }, + }, + Processor: "visa", + IsFoo: true, + Data: CardVisaData{ + Foo: "embedded_foo", + }, + JSON: cardEmbeddedJSON{ + raw: `{"processor":"visa","is_foo":true,"data":{"foo":"embedded_foo"},"metadata":{"created_at":"Mar 29 2024"},"value":"embedded_value"}`, + Processor: Field{raw: `"visa"`, status: valid}, + IsFoo: Field{raw: `true`, status: valid}, + Data: Field{raw: `{"foo":"embedded_foo"}`, status: valid}, + }, + }, + Card{ + Processor: "visa", + IsFoo: true, + IsBar: false, + Data: CardVisaData{ + Foo: "embedded_foo", + }, + Metadata: Metadata{ + CreatedAt: "Mar 29 2024", + }, + Value: "embedded_value", + JSON: cardJSON{ + raw: `{"processor":"visa","is_foo":true,"data":{"foo":"embedded_foo"},"metadata":{"created_at":"Mar 29 2024"},"value":"embedded_value"}`, + Processor: Field{raw: `"visa"`, status: 0x3}, + IsFoo: Field{raw: "true", status: 0x3}, + Data: Field{raw: `{"foo":"embedded_foo"}`, status: 0x3}, + Metadata: Field{raw: `{"created_at":"Mar 29 2024"}`, status: 0x3}, + Value: Field{raw: `"embedded_value"`, status: 0x3}, + }, + }, + }, +} + +func TestPort(t *testing.T) { + for name, test := range portTests { + t.Run(name, func(t *testing.T) { + toVal := reflect.New(reflect.TypeOf(test.to)) + + err := Port(test.from, toVal.Interface()) + if err != nil { + t.Fatalf("port of %v failed with error %v", test.from, err) + } + + if !reflect.DeepEqual(toVal.Elem().Interface(), test.to) { + t.Fatalf("expected:\n%+#v\n\nto port to:\n%+#v\n\nbut got:\n%+#v", test.from, test.to, toVal.Elem().Interface()) + } + }) + } +} diff --git a/sdks/go/internal/apijson/registry.go b/sdks/go/internal/apijson/registry.go new file mode 100644 index 00000000..2a249827 --- /dev/null +++ b/sdks/go/internal/apijson/registry.go @@ -0,0 +1,51 @@ +package apijson + +import ( + "reflect" + + "github.com/tidwall/gjson" +) + +type UnionVariant struct { + TypeFilter gjson.Type + DiscriminatorValue any + Type reflect.Type +} + +var unionRegistry = map[reflect.Type]unionEntry{} +var unionVariants = map[reflect.Type]any{} + +type unionEntry struct { + discriminatorKey string + variants []UnionVariant +} + +func Discriminator[T any](value any) UnionVariant { + var zero T + return UnionVariant{ + TypeFilter: gjson.JSON, + DiscriminatorValue: value, + Type: reflect.TypeOf(zero), + } +} + +func RegisterUnion[T any](discriminator string, variants ...UnionVariant) { + typ := reflect.TypeOf((*T)(nil)).Elem() + unionRegistry[typ] = unionEntry{ + discriminatorKey: discriminator, + variants: variants, + } + for _, variant := range variants { + unionVariants[variant.Type] = typ + } +} + +// Useful to wrap a union type to force it to use [apijson.UnmarshalJSON] since you cannot define an +// UnmarshalJSON function on the interface itself. +type UnionUnmarshaler[T any] struct { + Value T +} + +func (c *UnionUnmarshaler[T]) UnmarshalJSON(buf []byte) error { + return UnmarshalRoot(buf, &c.Value) +} diff --git a/sdks/go/internal/apijson/subfield.go b/sdks/go/internal/apijson/subfield.go new file mode 100644 index 00000000..55b02575 --- /dev/null +++ b/sdks/go/internal/apijson/subfield.go @@ -0,0 +1,67 @@ +package apijson + +import ( + "github.com/kernel/hypeman-go/packages/respjson" + "reflect" +) + +func getSubField(root reflect.Value, index []int, name string) reflect.Value { + strct := root.FieldByIndex(index[:len(index)-1]) + if !strct.IsValid() { + panic("couldn't find encapsulating struct for field " + name) + } + meta := strct.FieldByName("JSON") + if !meta.IsValid() { + return reflect.Value{} + } + field := meta.FieldByName(name) + if !field.IsValid() { + return reflect.Value{} + } + return field +} + +func setMetadataSubField(root reflect.Value, index []int, name string, meta Field) { + target := getSubField(root, index, name) + if !target.IsValid() { + return + } + + if target.Type() == reflect.TypeOf(meta) { + target.Set(reflect.ValueOf(meta)) + } else if respMeta := meta.toRespField(); target.Type() == reflect.TypeOf(respMeta) { + target.Set(reflect.ValueOf(respMeta)) + } +} + +func setMetadataExtraFields(root reflect.Value, index []int, name string, metaExtras map[string]Field) { + target := getSubField(root, index, name) + if !target.IsValid() { + return + } + + if target.Type() == reflect.TypeOf(metaExtras) { + target.Set(reflect.ValueOf(metaExtras)) + return + } + + newMap := make(map[string]respjson.Field, len(metaExtras)) + if target.Type() == reflect.TypeOf(newMap) { + for k, v := range metaExtras { + newMap[k] = v.toRespField() + } + target.Set(reflect.ValueOf(newMap)) + } +} + +func (f Field) toRespField() respjson.Field { + if f.IsMissing() { + return respjson.Field{} + } else if f.IsNull() { + return respjson.NewField("null") + } else if f.IsInvalid() { + return respjson.NewInvalidField(f.raw) + } else { + return respjson.NewField(f.raw) + } +} diff --git a/sdks/go/internal/apijson/tag.go b/sdks/go/internal/apijson/tag.go new file mode 100644 index 00000000..49731b88 --- /dev/null +++ b/sdks/go/internal/apijson/tag.go @@ -0,0 +1,67 @@ +package apijson + +import ( + "reflect" + "strings" +) + +const apiStructTag = "api" +const jsonStructTag = "json" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + required bool + extras bool + metadata bool + inline bool +} + +func parseJSONStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(jsonStructTag) + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "required": + tag.required = true + case "extras": + tag.extras = true + case "metadata": + tag.metadata = true + case "inline": + tag.inline = true + } + } + + // the `api` struct tag is only used alongside `json` for custom behaviour + parseApiStructTag(field, &tag) + return +} + +func parseApiStructTag(field reflect.StructField, tag *parsedStructTag) { + raw, ok := field.Tag.Lookup(apiStructTag) + if !ok { + return + } + parts := strings.Split(raw, ",") + for _, part := range parts { + switch part { + case "extrafields": + tag.extras = true + case "required": + tag.required = true + } + } +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/sdks/go/internal/apijson/union.go b/sdks/go/internal/apijson/union.go new file mode 100644 index 00000000..fa66e16d --- /dev/null +++ b/sdks/go/internal/apijson/union.go @@ -0,0 +1,208 @@ +package apijson + +import ( + "errors" + "github.com/kernel/hypeman-go/packages/param" + "reflect" + + "github.com/tidwall/gjson" +) + +var apiUnionType = reflect.TypeOf(param.APIUnion{}) + +func isStructUnion(t reflect.Type) bool { + if t.Kind() != reflect.Struct { + return false + } + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type == apiUnionType && t.Field(i).Anonymous { + return true + } + } + return false +} + +func RegisterDiscriminatedUnion[T any](key string, mappings map[string]reflect.Type) { + var t T + entry := unionEntry{ + discriminatorKey: key, + variants: []UnionVariant{}, + } + for k, typ := range mappings { + entry.variants = append(entry.variants, UnionVariant{ + DiscriminatorValue: k, + Type: typ, + }) + } + unionRegistry[reflect.TypeOf(t)] = entry +} + +func (d *decoderBuilder) newStructUnionDecoder(t reflect.Type) decoderFunc { + type variantDecoder struct { + decoder decoderFunc + field reflect.StructField + } + decoders := []variantDecoder{} + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + if field.Anonymous && field.Type == apiUnionType { + continue + } + + decoder := d.typeDecoder(field.Type) + decoders = append(decoders, variantDecoder{ + decoder: decoder, + field: field, + }) + } + + type discriminatedDecoder struct { + variantDecoder + discriminator any + } + discriminatedDecoders := []discriminatedDecoder{} + unionEntry, discriminated := unionRegistry[t] + for _, variant := range unionEntry.variants { + // For each union variant, find a matching decoder and save it + for _, decoder := range decoders { + if decoder.field.Type.Elem() == variant.Type { + discriminatedDecoders = append(discriminatedDecoders, discriminatedDecoder{ + decoder, + variant.DiscriminatorValue, + }) + break + } + } + } + + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + if discriminated && n.Type == gjson.JSON && len(unionEntry.discriminatorKey) != 0 { + discriminator := n.Get(EscapeSJSONKey(unionEntry.discriminatorKey)).Value() + for _, decoder := range discriminatedDecoders { + if discriminator == decoder.discriminator { + inner := v.FieldByIndex(decoder.field.Index) + return decoder.decoder(n, inner, state) + } + } + return errors.New("apijson: was not able to find discriminated union variant") + } + + // Set bestExactness to worse than loose + bestExactness := loose - 1 + bestVariant := -1 + for i, decoder := range decoders { + // Pointers are used to discern JSON object variants from value variants + if n.Type != gjson.JSON && decoder.field.Type.Kind() == reflect.Ptr { + continue + } + + sub := decoderState{strict: state.strict, exactness: exact} + inner := v.FieldByIndex(decoder.field.Index) + err := decoder.decoder(n, inner, &sub) + if err != nil { + continue + } + if sub.exactness == exact { + bestExactness = exact + bestVariant = i + break + } + if sub.exactness > bestExactness { + bestExactness = sub.exactness + bestVariant = i + } + } + + if bestExactness < loose { + return errors.New("apijson: was not able to coerce type as union") + } + + if guardStrict(state, bestExactness != exact) { + return errors.New("apijson: was not able to coerce type as union strictly") + } + + for i := 0; i < len(decoders); i++ { + if i == bestVariant { + continue + } + v.FieldByIndex(decoders[i].field.Index).SetZero() + } + + return nil + } +} + +// newUnionDecoder returns a decoderFunc that deserializes into a union using an +// algorithm roughly similar to Pydantic's [smart algorithm]. +// +// Conceptually this is equivalent to choosing the best schema based on how 'exact' +// the deserialization is for each of the schemas. +// +// If there is a tie in the level of exactness, then the tie is broken +// left-to-right. +// +// [smart algorithm]: https://docs.pydantic.dev/latest/concepts/unions/#smart-mode +func (d *decoderBuilder) newUnionDecoder(t reflect.Type) decoderFunc { + unionEntry, ok := unionRegistry[t] + if !ok { + panic("apijson: couldn't find union of type " + t.String() + " in union registry") + } + decoders := []decoderFunc{} + for _, variant := range unionEntry.variants { + decoder := d.typeDecoder(variant.Type) + decoders = append(decoders, decoder) + } + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + // If there is a discriminator match, circumvent the exactness logic entirely + for idx, variant := range unionEntry.variants { + decoder := decoders[idx] + if variant.TypeFilter != n.Type { + continue + } + + if len(unionEntry.discriminatorKey) != 0 { + discriminatorValue := n.Get(EscapeSJSONKey(unionEntry.discriminatorKey)).Value() + if discriminatorValue == variant.DiscriminatorValue { + inner := reflect.New(variant.Type).Elem() + err := decoder(n, inner, state) + v.Set(inner) + return err + } + } + } + + // Set bestExactness to worse than loose + bestExactness := loose - 1 + for idx, variant := range unionEntry.variants { + decoder := decoders[idx] + if variant.TypeFilter != n.Type { + continue + } + sub := decoderState{strict: state.strict, exactness: exact} + inner := reflect.New(variant.Type).Elem() + err := decoder(n, inner, &sub) + if err != nil { + continue + } + if sub.exactness == exact { + v.Set(inner) + return nil + } + if sub.exactness > bestExactness { + v.Set(inner) + bestExactness = sub.exactness + } + } + + if bestExactness < loose { + return errors.New("apijson: was not able to coerce type as union") + } + + if guardStrict(state, bestExactness != exact) { + return errors.New("apijson: was not able to coerce type as union strictly") + } + + return nil + } +} diff --git a/sdks/go/internal/apiquery/encoder.go b/sdks/go/internal/apiquery/encoder.go new file mode 100644 index 00000000..a98c29c1 --- /dev/null +++ b/sdks/go/internal/apiquery/encoder.go @@ -0,0 +1,415 @@ +package apiquery + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/kernel/hypeman-go/packages/param" +) + +var encoders sync.Map // map[reflect.Type]encoderFunc + +type encoder struct { + dateFormat string + root bool + settings QuerySettings +} + +type encoderFunc func(key string, value reflect.Value) ([]Pair, error) + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + root bool + settings QuerySettings +} + +type Pair struct { + key string + value string +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + root: e.root, + settings: e.settings, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(key string, v reflect.Value) ([]Pair, error) { + wg.Wait() + return f(key, v) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func marshalerEncoder(key string, value reflect.Value) ([]Pair, error) { + s, err := value.Interface().(json.Marshaler).MarshalJSON() + if err != nil { + return nil, fmt.Errorf("apiquery: json fallback marshal error %s", err) + } + return []Pair{{key, string(s)}}, nil +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder(t) + } + + if t.Implements(reflect.TypeOf((*param.Optional)(nil)).Elem()) { + return e.newRichFieldTypeEncoder(t) + } + + if !e.root && t.Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return marshalerEncoder + } + + e.root = false + switch t.Kind() { + case reflect.Pointer: + encoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair, err error) { + if !value.IsValid() || value.IsNil() { + return + } + return encoder(key, value.Elem()) + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + if t.Implements(reflect.TypeOf((*param.Optional)(nil)).Elem()) { + return e.newRichFieldTypeEncoder(t) + } + + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type == paramUnionType && t.Field(i).Anonymous { + return e.newStructUnionTypeEncoder(t) + } + } + + encoderFields := []encoderField{} + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If query tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseQueryStructTag(field) + if !ok { + continue + } + + if (ptag.name == "-" || ptag.name == "") && !ptag.inline { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + var encoderFn encoderFunc + if ptag.omitzero { + typeEncoderFn := e.typeEncoder(field.Type) + encoderFn = func(key string, value reflect.Value) ([]Pair, error) { + if value.IsZero() { + return nil, nil + } + return typeEncoderFn(key, value) + } + } else { + encoderFn = e.typeEncoder(field.Type) + } + encoderFields = append(encoderFields, encoderField{ptag, encoderFn, idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + return func(key string, value reflect.Value) (pairs []Pair, err error) { + for _, ef := range encoderFields { + var subkey string = e.renderKeyPath(key, ef.tag.name) + if ef.tag.inline { + subkey = key + } + + field := value.FieldByIndex(ef.idx) + subpairs, suberr := ef.fn(subkey, field) + if suberr != nil { + err = suberr + } + pairs = append(pairs, subpairs...) + } + return + } +} + +var paramUnionType = reflect.TypeOf((*param.APIUnion)(nil)).Elem() + +func (e *encoder) newStructUnionTypeEncoder(t reflect.Type) encoderFunc { + var fieldEncoders []encoderFunc + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.Type == paramUnionType && field.Anonymous { + fieldEncoders = append(fieldEncoders, nil) + continue + } + fieldEncoders = append(fieldEncoders, e.typeEncoder(field.Type)) + } + + return func(key string, value reflect.Value) (pairs []Pair, err error) { + for i := 0; i < t.NumField(); i++ { + if value.Field(i).Type() == paramUnionType { + continue + } + if !value.Field(i).IsZero() { + return fieldEncoders[i](key, value.Field(i)) + } + } + return nil, fmt.Errorf("apiquery: union %s has no field set", t.String()) + } +} + +func (e *encoder) newMapEncoder(t reflect.Type) encoderFunc { + keyEncoder := e.typeEncoder(t.Key()) + elementEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair, err error) { + iter := value.MapRange() + for iter.Next() { + encodedKey, err := keyEncoder("", iter.Key()) + if err != nil { + return nil, err + } + if len(encodedKey) != 1 { + return nil, fmt.Errorf("apiquery: unexpected number of parts for encoded map key, map may contain non-primitive") + } + subkey := encodedKey[0].value + keyPath := e.renderKeyPath(key, subkey) + subpairs, suberr := elementEncoder(keyPath, iter.Value()) + if suberr != nil { + err = suberr + } + pairs = append(pairs, subpairs...) + } + return + } +} + +func (e *encoder) renderKeyPath(key string, subkey string) string { + if len(key) == 0 { + return subkey + } + if e.settings.NestedFormat == NestedQueryFormatDots { + return fmt.Sprintf("%s.%s", key, subkey) + } + return fmt.Sprintf("%s[%s]", key, subkey) +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + switch e.settings.ArrayFormat { + case ArrayQueryFormatComma: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, v reflect.Value) ([]Pair, error) { + elements := []string{} + for i := 0; i < v.Len(); i++ { + innerPairs, err := innerEncoder("", v.Index(i)) + if err != nil { + return nil, err + } + for _, pair := range innerPairs { + elements = append(elements, pair.value) + } + } + if len(elements) == 0 { + return []Pair{}, nil + } + return []Pair{{key, strings.Join(elements, ",")}}, nil + } + case ArrayQueryFormatRepeat: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair, err error) { + for i := 0; i < value.Len(); i++ { + subpairs, suberr := innerEncoder(key, value.Index(i)) + if suberr != nil { + err = suberr + } + pairs = append(pairs, subpairs...) + } + return + } + case ArrayQueryFormatIndices: + panic("The array indices format is not supported yet") + case ArrayQueryFormatBrackets: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair, err error) { + pairs = []Pair{} + for i := 0; i < value.Len(); i++ { + subpairs, suberr := innerEncoder(key+"[]", value.Index(i)) + if suberr != nil { + err = suberr + } + pairs = append(pairs, subpairs...) + } + return + } + default: + panic(fmt.Sprintf("Unknown ArrayFormat value: %d", e.settings.ArrayFormat)) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.newPrimitiveTypeEncoder(inner) + return func(key string, v reflect.Value) ([]Pair, error) { + if !v.IsValid() || v.IsNil() { + return nil, nil + } + return innerEncoder(key, v.Elem()) + } + case reflect.String: + return func(key string, v reflect.Value) ([]Pair, error) { + return []Pair{{key, v.String()}}, nil + } + case reflect.Bool: + return func(key string, v reflect.Value) ([]Pair, error) { + if v.Bool() { + return []Pair{{key, "true"}}, nil + } + return []Pair{{key, "false"}}, nil + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(key string, v reflect.Value) ([]Pair, error) { + return []Pair{{key, strconv.FormatInt(v.Int(), 10)}}, nil + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(key string, v reflect.Value) ([]Pair, error) { + return []Pair{{key, strconv.FormatUint(v.Uint(), 10)}}, nil + } + case reflect.Float32, reflect.Float64: + return func(key string, v reflect.Value) ([]Pair, error) { + return []Pair{{key, strconv.FormatFloat(v.Float(), 'f', -1, 64)}}, nil + } + case reflect.Complex64, reflect.Complex128: + bitSize := 64 + if t.Kind() == reflect.Complex128 { + bitSize = 128 + } + return func(key string, v reflect.Value) ([]Pair, error) { + return []Pair{{key, strconv.FormatComplex(v.Complex(), 'f', -1, bitSize)}}, nil + } + default: + return func(key string, v reflect.Value) ([]Pair, error) { + return nil, nil + } + } +} + +func (e *encoder) newFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + + return func(key string, value reflect.Value) ([]Pair, error) { + present := value.FieldByName("Present") + if !present.Bool() { + return nil, nil + } + null := value.FieldByName("Null") + if null.Bool() { + return nil, fmt.Errorf("apiquery: field cannot be null") + } + raw := value.FieldByName("Raw") + if !raw.IsNil() { + return e.typeEncoder(raw.Type())(key, raw) + } + return enc(key, value.FieldByName("Value")) + } +} + +func (e *encoder) newTimeTypeEncoder(_ reflect.Type) encoderFunc { + format := e.dateFormat + return func(key string, value reflect.Value) ([]Pair, error) { + return []Pair{{ + key, + value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format), + }}, nil + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(key string, value reflect.Value) ([]Pair, error) { + value = value.Elem() + if !value.IsValid() { + return nil, nil + } + return e.typeEncoder(value.Type())(key, value) + } + +} diff --git a/sdks/go/internal/apiquery/query.go b/sdks/go/internal/apiquery/query.go new file mode 100644 index 00000000..0f379fa3 --- /dev/null +++ b/sdks/go/internal/apiquery/query.go @@ -0,0 +1,55 @@ +package apiquery + +import ( + "net/url" + "reflect" + "time" +) + +func MarshalWithSettings(value any, settings QuerySettings) (url.Values, error) { + e := encoder{time.RFC3339, true, settings} + kv := url.Values{} + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil, nil + } + typ := val.Type() + + pairs, err := e.typeEncoder(typ)("", val) + if err != nil { + return nil, err + } + for _, pair := range pairs { + kv.Add(pair.key, pair.value) + } + return kv, nil +} + +func Marshal(value any) (url.Values, error) { + return MarshalWithSettings(value, QuerySettings{}) +} + +type Queryer interface { + URLQuery() (url.Values, error) +} + +type QuerySettings struct { + NestedFormat NestedQueryFormat + ArrayFormat ArrayQueryFormat +} + +type NestedQueryFormat int + +const ( + NestedQueryFormatBrackets NestedQueryFormat = iota + NestedQueryFormatDots +) + +type ArrayQueryFormat int + +const ( + ArrayQueryFormatComma ArrayQueryFormat = iota + ArrayQueryFormatRepeat + ArrayQueryFormatIndices + ArrayQueryFormatBrackets +) diff --git a/sdks/go/internal/apiquery/query_test.go b/sdks/go/internal/apiquery/query_test.go new file mode 100644 index 00000000..d3d628e9 --- /dev/null +++ b/sdks/go/internal/apiquery/query_test.go @@ -0,0 +1,435 @@ +package apiquery + +import ( + "github.com/kernel/hypeman-go/packages/param" + "net/url" + "testing" + "time" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `query:"a"` + B int `query:"b"` + C uint `query:"c"` + D float64 `query:"d"` + E float32 `query:"e"` + F []int `query:"f"` +} + +type PrimitivePointers struct { + A *bool `query:"a"` + B *int `query:"b"` + C *uint `query:"c"` + D *float64 `query:"d"` + E *float32 `query:"e"` + F *[]int `query:"f"` +} + +type Slices struct { + Slice []Primitives `query:"slices"` + Mixed []any `query:"mixed"` +} + +type DateTime struct { + Date time.Time `query:"date" format:"date"` + DateTime time.Time `query:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `query:"a"` + Extras map[string]any `query:"-,inline"` +} + +type Recursive struct { + Name string `query:"name"` + Child *Recursive `query:"child"` +} + +type UnknownStruct struct { + Unknown any `query:"unknown"` +} + +type UnionStruct struct { + Union Union `query:"union" format:"date"` +} + +type Union interface { + union() +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionString string + +func (UnionString) union() {} + +type UnionStructA struct { + Type string `query:"type"` + A string `query:"a"` + B string `query:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `query:"type"` + A string `query:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +type DeeplyNested struct { + A DeeplyNested1 `query:"a"` +} + +type DeeplyNested1 struct { + B DeeplyNested2 `query:"b"` +} + +type DeeplyNested2 struct { + C DeeplyNested3 `query:"c"` +} + +type DeeplyNested3 struct { + D *string `query:"d"` +} + +type RichPrimitives struct { + A param.Opt[string] `query:"a"` +} + +type QueryOmitTest struct { + A param.Opt[string] `query:"a,omitzero"` + B string `query:"b,omitzero"` +} + +type NamedEnum string + +const NamedEnumFoo NamedEnum = "foo" + +type StructUnionWrapper struct { + Union StructUnion `query:"union"` +} + +type StructUnion struct { + OfInt param.Opt[int64] `query:",omitzero,inline"` + OfString param.Opt[string] `query:",omitzero,inline"` + OfEnum param.Opt[NamedEnum] `query:",omitzero,inline"` + OfA UnionStructA `query:",omitzero,inline"` + OfB UnionStructB `query:",omitzero,inline"` + param.APIUnion +} + +var tests = map[string]struct { + enc string + val any + settings QuerySettings +}{ + "primitives": { + "a=false&b=237628372683&c=654&d=9999.43&e=43.7599983215332&f=1,2,3,4", + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + QuerySettings{}, + }, + + "slices_brackets": { + `mixed[]=1&mixed[]=2.3&mixed[]=hello&slices[][a]=false&slices[][a]=false&slices[][b]=237628372683&slices[][b]=237628372683&slices[][c]=654&slices[][c]=654&slices[][d]=9999.43&slices[][d]=9999.43&slices[][e]=43.7599983215332&slices[][e]=43.7599983215332&slices[][f][]=1&slices[][f][]=2&slices[][f][]=3&slices[][f][]=4&slices[][f][]=1&slices[][f][]=2&slices[][f][]=3&slices[][f][]=4`, + Slices{ + Slice: []Primitives{ + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + Mixed: []any{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatBrackets}, + }, + + "slices_comma": { + `mixed=1,2.3,hello`, + Slices{ + Mixed: []any{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatComma}, + }, + + "slices_repeat": { + `mixed=1&mixed=2.3&mixed=hello&slices[a]=false&slices[a]=false&slices[b]=237628372683&slices[b]=237628372683&slices[c]=654&slices[c]=654&slices[d]=9999.43&slices[d]=9999.43&slices[e]=43.7599983215332&slices[e]=43.7599983215332&slices[f]=1&slices[f]=2&slices[f]=3&slices[f]=4&slices[f]=1&slices[f]=2&slices[f]=3&slices[f]=4`, + Slices{ + Slice: []Primitives{ + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + Mixed: []any{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatRepeat}, + }, + + "primitive_pointer_struct": { + "a=false&b=237628372683&c=654&d=9999.43&e=43.7599983215332&f=1,2,3,4,5", + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + QuerySettings{}, + }, + + "datetime_struct": { + `date=2006-01-02&date-time=2006-01-02T15:04:05Z`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + QuerySettings{}, + }, + + "additional_properties": { + `a=true&bar=value&foo=true`, + AdditionalProperties{ + A: true, + Extras: map[string]any{ + "bar": "value", + "foo": true, + }, + }, + QuerySettings{}, + }, + + "recursive_struct_brackets": { + `child[name]=Alex&name=Robert`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "recursive_struct_dots": { + `child.name=Alex&name=Robert`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "unknown_struct_number": { + `unknown=12`, + UnknownStruct{ + Unknown: 12., + }, + QuerySettings{}, + }, + + "unknown_struct_map_brackets": { + `unknown[foo]=bar`, + UnknownStruct{ + Unknown: map[string]any{ + "foo": "bar", + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "unknown_struct_map_dots": { + `unknown.foo=bar`, + UnknownStruct{ + Unknown: map[string]any{ + "foo": "bar", + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "struct_union_string": { + `union=hello`, + StructUnionWrapper{ + Union: StructUnion{OfString: param.NewOpt("hello")}, + }, + QuerySettings{}, + }, + + "union_string": { + `union=hello`, + UnionStruct{ + Union: UnionString("hello"), + }, + QuerySettings{}, + }, + + "struct_union_integer": { + `union=12`, + StructUnionWrapper{ + Union: StructUnion{OfInt: param.NewOpt[int64](12)}, + }, + QuerySettings{}, + }, + + "union_integer": { + `union=12`, + UnionStruct{ + Union: UnionInteger(12), + }, + QuerySettings{}, + }, + + "struct_union_enum": { + `union=foo`, + StructUnionWrapper{ + Union: StructUnion{OfEnum: param.NewOpt[NamedEnum](NamedEnumFoo)}, + }, + QuerySettings{}, + }, + + "struct_union_struct_discriminated_a": { + `union[a]=foo&union[b]=bar&union[type]=typeA`, + StructUnionWrapper{ + Union: StructUnion{OfA: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }}, + }, + QuerySettings{}, + }, + + "union_struct_discriminated_a": { + `union[a]=foo&union[b]=bar&union[type]=typeA`, + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + QuerySettings{}, + }, + + "struct_union_struct_discriminated_b": { + `union[a]=foo&union[type]=typeB`, + StructUnionWrapper{ + Union: StructUnion{OfB: UnionStructB{ + Type: "typeB", + A: "foo", + }}, + }, + QuerySettings{}, + }, + + "union_struct_discriminated_b": { + `union[a]=foo&union[type]=typeB`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + QuerySettings{}, + }, + + "union_struct_time": { + `union=2010-05-23`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + QuerySettings{}, + }, + + "deeply_nested_brackets": { + `a[b][c][d]=hello`, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: P("hello"), + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "deeply_nested_dots": { + `a.b.c.d=hello`, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: P("hello"), + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "deeply_nested_brackets_empty": { + ``, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: nil, + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "deeply_nested_dots_empty": { + ``, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: nil, + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "rich_primitives": { + `a=hello`, + RichPrimitives{ + A: param.Opt[string]{Value: "hello"}, + }, + QuerySettings{}, + }, + + "rich_primitives_omit": { + ``, + QueryOmitTest{ + A: param.Opt[string]{}, + }, + QuerySettings{}, + }, + "query_omit": { + `a=hello`, + QueryOmitTest{ + A: param.Opt[string]{Value: "hello"}, + }, + QuerySettings{}, + }, +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + values, err := MarshalWithSettings(test.val, test.settings) + if err != nil { + t.Fatalf("failed to marshal url %s", err) + } + str, _ := url.QueryUnescape(values.Encode()) + if str != test.enc { + t.Fatalf("expected %+#v to serialize to %s but got %s", test.val, test.enc, str) + } + }) + } +} diff --git a/sdks/go/internal/apiquery/richparam.go b/sdks/go/internal/apiquery/richparam.go new file mode 100644 index 00000000..ef67c230 --- /dev/null +++ b/sdks/go/internal/apiquery/richparam.go @@ -0,0 +1,19 @@ +package apiquery + +import ( + "github.com/kernel/hypeman-go/packages/param" + "reflect" +) + +func (e *encoder) newRichFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + return func(key string, value reflect.Value) ([]Pair, error) { + if opt, ok := value.Interface().(param.Optional); ok && opt.Valid() { + return enc(key, value.FieldByIndex(f.Index)) + } else if ok && param.IsNull(opt) { + return []Pair{{key, "null"}}, nil + } + return nil, nil + } +} diff --git a/sdks/go/internal/apiquery/tag.go b/sdks/go/internal/apiquery/tag.go new file mode 100644 index 00000000..772c40e1 --- /dev/null +++ b/sdks/go/internal/apiquery/tag.go @@ -0,0 +1,44 @@ +package apiquery + +import ( + "reflect" + "strings" +) + +const queryStructTag = "query" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + omitempty bool + omitzero bool + inline bool +} + +func parseQueryStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(queryStructTag) + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "omitzero": + tag.omitzero = true + case "omitempty": + tag.omitempty = true + case "inline": + tag.inline = true + } + } + return +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/sdks/go/internal/encoding/json/decode.go b/sdks/go/internal/encoding/json/decode.go new file mode 100644 index 00000000..2c6b0958 --- /dev/null +++ b/sdks/go/internal/encoding/json/decode.go @@ -0,0 +1,1324 @@ +// Vendored from Go 1.24.0-pre-release +// To find alterations, check package shims, and comments beginning in SHIM(). +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "github.com/kernel/hypeman-go/internal/encoding/json/shims" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + _ "unsafe" // for linkname +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an [InvalidUnmarshalError]. +// +// Unmarshal uses the inverse of the encodings that +// [Marshal] uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing [Unmarshaler], +// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including +// when the input is a JSON null. +// Otherwise, if the value implements [encoding.TextUnmarshaler] +// and the input is a JSON quoted string, Unmarshal calls +// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by [Marshal] (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see [Decoder.DisallowUnknownFields] for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// - bool, for JSON booleans +// - float64, for JSON numbers +// - string, for JSON strings +// - []any, for JSON arrays +// - map[string]any, for JSON objects +// - nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, or implement [encoding.TextUnmarshaler]. +// +// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError]. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an [UnmarshalTypeError] describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of [Unmarshal] itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field, include embedded struct +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal]. +// (The argument to [Unmarshal] must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Pointer { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v any) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + fieldStack := d.errorContext.FieldStack + if err.Field != "" { + fieldStack = append(fieldStack, err.Field) + } + err.Field = strings.Join(fieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() any { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Pointer { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v any + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem().Equal(v) { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Expand slice length, growing the slice if necessary. + if v.Kind() == reflect.Slice { + if i >= v.Cap() { + v.Grow(1) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + for ; i < v.Len(); i++ { + v.Index(i).SetZero() // zero remainder of array + } + } else { + v.SetLen(i) // truncate the slice + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") + +// SHIM(reflect): reflect.TypeFor[T]() reflect.T +var textUnmarshalerType = shims.TypeFor[encoding.TextUnmarshaler]() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.SetZero() + } + subv = mapElem + } else { + f := fields.byExactName[string(key)] + if f == nil { + f = fields.byFoldedName[string(foldName(key))] + } + if f != nil { + subv = v + destring = f.quoted + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + for i, ind := range f.index { + if subv.Kind() == reflect.Pointer { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + if i < len(f.index)-1 { + d.errorContext.FieldStack = append( + d.errorContext.FieldStack, + subv.Type().Field(ind).Name, + ) + } + subv = subv.Field(ind) + } + d.errorContext.Struct = t + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + if reflect.PointerTo(kt).Implements(textUnmarshalerType) { + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + } else { + switch kt.Kind() { + case reflect.String: + kv = reflect.New(kt).Elem() + kv.SetString(string(key)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + // SHIM(reflect): reflect.Type.OverflowInt(int64) bool + okt := shims.OverflowableType{Type: kt} + if err != nil || okt.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.New(kt).Elem() + kv.SetInt(n) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + // SHIM(reflect): reflect.Type.OverflowUint(uint64) bool + okt := shims.OverflowableType{Type: kt} + if err != nil || okt.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.New(kt).Elem() + kv.SetUint(n) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (any, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + // SHIM(reflect): reflect.TypeFor[T]() reflect.Type + return nil, &UnmarshalTypeError{Value: "number " + s, Type: shims.TypeFor[float64](), Offset: int64(d.off)} + } + return f, nil +} + +// SHIM(reflect): TypeFor[T]() reflect.Type +var numberType = shims.TypeFor[Number]() + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + // Empty string given. + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + v.SetZero() + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + t := string(s) + if v.Type() == numberType && !isValidNumber(t) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + v.SetString(t) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(string(item)) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(string(item), 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(string(item), 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(string(item), v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns any. +func (d *decodeState) valueInterface() (val any) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []any. +func (d *decodeState) arrayInterface() []any { + var v = make([]any, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]any. +func (d *decodeState) objectInterface() map[string]any { + m := make(map[string]any) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() any { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +// unquoteBytes should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname unquoteBytes +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/sdks/go/internal/encoding/json/encode.go b/sdks/go/internal/encoding/json/encode.go new file mode 100644 index 00000000..bdd596e2 --- /dev/null +++ b/sdks/go/internal/encoding/json/encode.go @@ -0,0 +1,1395 @@ +// Vendored from Go 1.24.0-pre-release +// To find alterations, check package shims, and comments beginning in SHIM(). +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "cmp" + "encoding" + "encoding/base64" + "fmt" + "github.com/kernel/hypeman-go/internal/encoding/json/sentinel" + "github.com/kernel/hypeman-go/internal/encoding/json/shims" + "math" + "reflect" + "slices" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + _ "unsafe" // for linkname +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements [Marshaler] +// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON] +// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the +// value implements [encoding.TextMarshaler] instead, Marshal calls +// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// [Unmarshaler.UnmarshalJSON]. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and [Number] values encode as JSON numbers. +// NaN and +/-Inf values will return an [UnsupportedValueError]. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML