diff --git a/.AI-SAFEGUARDS.md b/.AI-SAFEGUARDS.md new file mode 100644 index 00000000..f7289d84 --- /dev/null +++ b/.AI-SAFEGUARDS.md @@ -0,0 +1,56 @@ +# AI Safeguards Configuration + +## Protected Branches + +These branches CANNOT be modified by AI: + +- docs-v2 (source of truth) +- main + +## Allowed Branches for AI + +- docs-v2-dev (development branch for hourly commits) +- fix-\* (feature branches) +- safepoint/\* (checkpoint branches) + +## Forbidden Commands for AI + +❌ NEVER execute: + +- git reset --hard +- git restore . +- git clean -fd +- git push --force +- git rebase -i (interactive rebase) +- git tag --force +- Any mass git operations without human approval + +## Large Change Protocol (>10 files) + +1. AI must show file list and get approval +2. Human creates .ai-commit-verified file +3. Only then can AI proceed with commit +4. File is deleted after commit + +## Audit Trail + +All AI git operations logged to: .ai-operations.log + +## Emergency Rollback + +If AI makes dangerous changes: + +```bash +git reflog +git reset --hard +``` + +## Human Commit Override + +If AI blocks legitimate commits, use: + +```bash +git commit --no-verify +``` + +Updated: 2026-01-06 after safeguard implementation diff --git a/.ai-audit.sh b/.ai-audit.sh new file mode 100644 index 00000000..1911795f --- /dev/null +++ b/.ai-audit.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# AI Operations Audit Log +# Logs every git operation attempted by the AI assistant + +TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S') +BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") +COMMAND="$*" +FILES_CHANGED=$(git status --porcelain 2>/dev/null | wc -l) + +LOG_ENTRY="[$TIMESTAMP] BRANCH=$BRANCH | COMMAND=$COMMAND | FILES_STAGED=$FILES_CHANGED" + +# Write to audit log +echo "$LOG_ENTRY" >> .ai-operations.log + +# Also print for visibility +echo "$LOG_ENTRY" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 76381cf9..b9a90aec 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1,4 @@ # Default reviewers for the AI documentation. ai/ @rickstaa * @livepeer/studio-team +* @DeveloperAlly \ No newline at end of file diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 00000000..7ab9f32f --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,91 @@ +docs. The docs are built and previewed using Mintlify CLI, and can be +containerized with Docker. + +# Copilot Instructions for Livepeer Docs (2026) + +## Big Picture & Architecture + +- **Multi-version Docs:** + - `v1/` = legacy, `v2/` = current. Navigation in `docs.json`/`docs_v2.json`. +- **Component System:** + - Custom React/TSX/JSX components in `snippets/components/` (see + README-custom-view.md for advanced usage). + - Use `.tsx` for new components; `.jsx` is legacy but supported. +- **Automations & Scripts:** + - All dynamic, AI, and data-fetching logic in `automations/` and `ai-tools/`. + - Scripts for API doc generation and external data in `v2/scripts/` (see + generate-api-docs.sh, fetch-openapi-specs.sh). +- **API Reference:** + - OpenAPI spec in `openapi.yaml` (AI API: see ai/worker/api/openapi.yaml). Use + scripts to generate MDX/API docs. +- **Assets:** + - Images/logos in `images/`, `logo/`, and static assets in `assets/`. + +## Developer Workflows + +- **Preview Locally:** + 1. Install Mintlify CLI: `npm i -g mintlify` + 2. Run: `mint dev` (from repo root, where `mint.json` or `mint_v1.json` + exists) +- **Build/Deploy:** + - Docker: + `docker buildx build --platform linux/amd64 --load -t livepeer/docs .` + - Makefile: `make all` +- **API Docs Generation:** + - Use `v2/scripts/generate-api-docs.sh` to convert OpenAPI specs to MDX/API + docs and navigation JSON. Example: + ```bash + ./v2/scripts/generate-api-docs.sh ai/worker/api/openapi.yaml v2/pages/04_gateways/guides-references/api-reference/AI-API "AI API" + ``` + - Output: MDX files + navigation snippet for `docs.json`. +- **External Data Fetching:** + - Use `fetch-openapi-specs.sh` and `fetch-external-docs.sh` for syncing + specs/docs from other repos. +- **Component Usage:** + - Import with relative paths from `snippets/components/` in `.mdx` files. + - For custom dropdowns, see `CustomViewDropdown` in + `snippets/components/custom-view-dropdown.jsx` and its README. + +## Project Conventions & Patterns + +- **MDX-First:** All docs are `.mdx` (Markdown + JSX/TSX components). +- **Versioning:** New docs in `v2/`, legacy in `v1/` or + `deprecated-references/`. +- **AI/Dynamic Content:** All AI-generated or dynamic content in `automations/` + or `ai-tools/`. +- **SEO & Metadata:** Add SEO tags/metadata in page frontmatter (see + `README_V2.md`). +- **No Formal Test Suite:** Preview changes locally before merging. +- **Architecture:** + - Docs mirror protocol architecture: Gateway, Orchestrator, Transcoder, AI + Worker. See `README_V2.md` for detailed flows and node roles. + - Gateways do not process video; they route jobs to orchestrators. Node roles + are mutually exclusive (see `README_V2.md`). +- **Custom Views:** + - Use `CustomViewDropdown` for Mintlify-style view switching. See + `snippets/components/README-custom-view.md` for migration and usage. + +## Integration Points + +- **Mintlify:** All build/preview flows use Mintlify CLI and config files + (`mint.json`, `docs.json`). +- **OpenAPI:** API docs generated from `openapi.yaml` (see also + `ai/worker/api/openapi.yaml`). +- **Docker:** Containerized builds for CI/CD and local dev. +- **Automations:** Scripts in `v2/scripts/` automate API doc generation and + external data sync. + +## Key Files & Directories + +- `docs.json`, `docs_v2.json` — Navigation/config +- `snippets/components/` — Custom components (see README-custom-view.md) +- `automations/`, `ai-tools/` — Scripts, AI, dynamic content +- `openapi.yaml`, `ai/worker/api/openapi.yaml` — API reference +- `Dockerfile`, `Makefile` — Build/deploy +- `README.md`, `README_V2.md` — Developer notes, protocol/architecture +- `v2/scripts/` — Automation scripts (API docs, data fetching) + +--- + +If any conventions or workflows are unclear, review the latest `README.md`, +`README_V2.md`, or automation READMEs, or ask for clarification. diff --git a/.github/workflows/broken-links.yml b/.github/workflows/broken-links.yml index ad39f52d..8835c9ea 100644 --- a/.github/workflows/broken-links.yml +++ b/.github/workflows/broken-links.yml @@ -16,8 +16,8 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: '22' - + node-version: "22" + - name: Install Mintlify globally run: npm install -g mintlify diff --git a/.github/workflows/update-livepeer-release.yml b/.github/workflows/update-livepeer-release.yml new file mode 100644 index 00000000..e1858c31 --- /dev/null +++ b/.github/workflows/update-livepeer-release.yml @@ -0,0 +1,60 @@ +name: Update Livepeer Release Version + +on: + schedule: + # Run every 30 minutes + - cron: "*/30 * * * *" + workflow_dispatch: + +jobs: + check-and-update: + runs-on: ubuntu-latest + + steps: + - name: Checkout docs repository + uses: actions/checkout@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Get latest go-livepeer release + id: get_release + run: | + LATEST_RELEASE=$(curl -s https://api.github.com/repos/livepeer/go-livepeer/releases/latest | jq -r .tag_name) + echo "release=${LATEST_RELEASE}" >> $GITHUB_OUTPUT + echo "Latest release: ${LATEST_RELEASE}" + + - name: Read current version from globals.mdx + id: current_version + run: | + CURRENT=$(grep -oP 'latestVersion\s*=\s*["'"'"']?\K[^"'"'"']+' snippets/automationData/globals/globals.mdx || echo "") + echo "current=${CURRENT}" >> $GITHUB_OUTPUT + echo "Current version: ${CURRENT}" + + - name: Update globals.mdx if needed + if: + steps.get_release.outputs.release != + steps.current_version.outputs.current + run: | + # Create backup + cp snippets/automationData/globals/globals.mdx snippets/automationData/globals/globals.mdx.bak + + # Update the latestVersion value + sed -i "s/latestVersion[[:space:]]*=[[:space:]]*[\"'][^\"']*[\"']/latestVersion = \"${{ steps.get_release.outputs.release }}\"/" snippets/automationData/globals/globals.mdx + + # Update the latestVersionUrl value + sed -i "s|latestVersionUrl[[:space:]]*=[[:space:]]*[\"'][^\"']*[\"']|latestVersionUrl = \"https://github.com/livepeer/go-livepeer/releases/download/${{ steps.get_release.outputs.release }}\"|" snippets/automationData/globals/globals.mdx + + # Verify the changes + echo "Updated content:" + grep "latestVersion" snippets/automationData/globals/globals.mdx + + - name: Commit and push if changed + if: + steps.get_release.outputs.release != + steps.current_version.outputs.current + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add snippets/automationData/globals/globals.mdx + git commit -m "chore: update latest release to ${{ steps.get_release.outputs.release }}" + git push diff --git a/.gitignore b/.gitignore index 487b51b9..e7e1da61 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,67 @@ node_modules # IDE .vscode *.code-workspace + +# V2 +# ------------------------------------ +# Node +# ------------------------------------ +# node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +package-lock.json +yarn.lock +pnpm-lock.yaml + +# Optional: ignore local `.env` files +.env +.env.*local + +# ------------------------------------ +# Logs +# ------------------------------------ +logs/ +*.log +*.log.* +debug.log + +# ------------------------------------ +# OS / Editor files +# ------------------------------------ +# .DS_Store +Thumbs.db +.idea/ + +# VSCode — allow settings.json, ignore rest +# .vscode/* +# !.vscode/settings.json +# !.vscode/extensions.json + +# ------------------------------------ +# Mintlify / Build Stuff +# ------------------------------------ +.mintlify/ +.mintlify-cache/ +.out/ +dist/ +build/ + +# ------------------------------------ +# Temporary files +# ------------------------------------ +*.tmp +*.temp +*.swp +*.swo + +# ------------------------------------ +# TypeScript +# ------------------------------------ +*.tsbuildinfo + +# ------------------------------------ +# External docs (fetched at build time) +# ------------------------------------ +snippets/external/ diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 80caa884..9f0ae65f 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,25 +1,25 @@ workflowVersion: 1.0.0 speakeasyVersion: latest sources: - livepeer-ai-api: - inputs: - - location: https://raw.githubusercontent.com/livepeer/ai-worker/main/runner/gateway.openapi.yaml - overlays: - - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-js/main/codeSamples.yaml - - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-go/main/codeSamples.yaml - - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-python/main/codeSamples.yaml - output: ai/api-reference/gateway.openapi.yaml - registry: - location: registry.speakeasyapi.dev/livepeer/livepeer-ai/livepeer-ai-oas - livepeer-studio-api: - inputs: - - location: https://raw.githubusercontent.com/livepeer/studio/master/packages/api/src/schema/api-schema.yaml - - location: https://raw.githubusercontent.com/livepeer/studio/master/packages/api/src/schema/ai-api-schema.yaml - overlays: - - location: https://raw.githubusercontent.com/livepeer/livepeer-js/main/codeSamples.yaml - - location: https://raw.githubusercontent.com/livepeer/livepeer-go/main/codeSamples.yaml - - location: https://raw.githubusercontent.com/livepeer/livepeer-python/main/codeSamples.yaml - output: openapi.yaml - registry: - location: registry.speakeasyapi.dev/livepeer/livepeer-studio/livepeer-studio-api + livepeer-ai-api: + inputs: + - location: https://raw.githubusercontent.com/livepeer/ai-worker/main/runner/gateway.openapi.yaml + overlays: + - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-js/main/codeSamples.yaml + - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-go/main/codeSamples.yaml + - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-python/main/codeSamples.yaml + output: ai/api-reference/gateway.openapi.yaml + registry: + location: registry.speakeasyapi.dev/livepeer/livepeer-ai/livepeer-ai-oas + livepeer-studio-api: + inputs: + - location: https://raw.githubusercontent.com/livepeer/studio/master/packages/api/src/schema/api-schema.yaml + - location: https://raw.githubusercontent.com/livepeer/studio/master/packages/api/src/schema/ai-api-schema.yaml + overlays: + - location: https://raw.githubusercontent.com/livepeer/livepeer-js/main/codeSamples.yaml + - location: https://raw.githubusercontent.com/livepeer/livepeer-go/main/codeSamples.yaml + - location: https://raw.githubusercontent.com/livepeer/livepeer-python/main/codeSamples.yaml + output: openapi.yaml + registry: + location: registry.speakeasyapi.dev/livepeer/livepeer-studio/livepeer-studio-api targets: {} diff --git a/.verify-large-change.sh b/.verify-large-change.sh new file mode 100644 index 00000000..389295f6 --- /dev/null +++ b/.verify-large-change.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# AI Large Change Verification Script +# Used when AI needs to commit more than 10 files + +BRANCH=$(git rev-parse --abbrev-ref HEAD) +FILES_COUNT=$(git diff --cached --name-only | wc -l) +TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S') + +echo "" +echo "════════════════════════════════════════════════════════════" +echo "⚠️ LARGE CHANGE VERIFICATION REQUIRED" +echo "════════════════════════════════════════════════════════════" +echo "" +echo "Branch: $BRANCH" +echo "Files to be modified: $FILES_COUNT" +echo "Timestamp: $TIMESTAMP" +echo "" +echo "Files list:" +git diff --cached --name-only | sort +echo "" +echo "════════════════════════════════════════════════════════════" +echo "" +echo "HUMAN ACTION REQUIRED:" +echo "" +echo "1. Review the files above carefully" +echo "2. If safe, create verification file:" +echo " touch .ai-commit-verified" +echo "3. AI will then proceed with commit" +echo "" +echo "════════════════════════════════════════════════════════════" +echo "" diff --git a/AI-ACCOUNTABILITY-CHECKLIST.md b/AI-ACCOUNTABILITY-CHECKLIST.md new file mode 100644 index 00000000..25f8f70a --- /dev/null +++ b/AI-ACCOUNTABILITY-CHECKLIST.md @@ -0,0 +1,82 @@ +# AI Assistant Accountability Checklist + +Use this for EVERY interaction with ANY AI on this repo. + +## Before Session Starts + +- [ ] AI acknowledges UNIVERSAL-AI-PROTOCOL.md +- [ ] AI states current branch +- [ ] AI lists protected branches (docs-v2, main) +- [ ] AI confirms it has read/write restrictions + +## Before EVERY Git Operation + +- [ ] AI shows PREFLIGHT CHECK (branch, files, operation, approval needed) +- [ ] You review the preflight +- [ ] You explicitly approve or reject +- [ ] AI executes ONLY after approval + +## During Large Changes (> 10 files) + +- [ ] AI lists EXACT files being changed +- [ ] AI explains WHY each file changes +- [ ] You review file list +- [ ] You create `.ai-commit-verified` file +- [ ] AI commits with verification token in message + +## After Each Commit + +- [ ] Verify commit message has: [file count] | [what] | [why] | [approved by] +- [ ] Check `.ai-operations.log` for entry +- [ ] Spot-check git log for timestamp accuracy +- [ ] Review file changes: `git show HEAD` + +## If Something Breaks + +- [ ] Stop work immediately +- [ ] Document what broke +- [ ] Use ROLLBACK-GUIDE.md to revert +- [ ] Don't let AI try to "fix" without rollback first +- [ ] Post-mortem: update UNIVERSAL-AI-PROTOCOL.md + +## Monthly Review + +- [ ] Audit `.ai-operations.log` for patterns +- [ ] Check if any rules were bent +- [ ] Review `git log docs-v2-dev` for auto-commits +- [ ] Test rollback procedures (make sure they still work) +- [ ] Update this checklist based on lessons learned + +## Red Flags (Stop Work Immediately) + +- [ ] AI tries to use `git reset --hard` +- [ ] AI commits to `docs-v2` or `main` +- [ ] AI doesn't show preflight checklist +- [ ] AI modifies 50+ files without asking +- [ ] AI touches `.prettierrc` or `docs.json` +- [ ] AI makes changes to v1/ directory +- [ ] Commit message missing approval token +- [ ] Pre-commit hook does NOT block an AI commit attempt + +## Recovery Commands (Memorize These) + +```bash +# See recent commits +git log --oneline -10 docs-v2-dev + +# Revert last commit (safe, creates new commit) +git revert HEAD + +# View what's in last commit +git show HEAD + +# Unstage last commit (keep files) +git reset --soft HEAD~1 + +# Check audit trail +cat .ai-operations.log | tail -20 +``` + +--- + +**This checklist is your insurance policy against AI fucking up your docs.** diff --git a/README.md b/README.md index 3238db29..6d2b0d8b 100644 --- a/README.md +++ b/README.md @@ -12,5 +12,5 @@ npm i -g mintlify Run the following command at the root of your documentation (where mint.json is) ```bash -mintlify dev +mint dev ``` diff --git a/README_V2.md b/README_V2.md new file mode 100644 index 00000000..d824c8b9 --- /dev/null +++ b/README_V2.md @@ -0,0 +1,1241 @@ +V2 Docs are being ported in to this repo. + +I will work on a branch called docs-v2 and then merge into main when fully ready +and deprecate the old docs into a v1 on the new docs. + +Add to all pages: [SEO](https://www.mintlify.com/docs/optimize/seo) eg + +--- + +## "twitter:image": "/images/social-preview.jpg" + +## Search Keywords eg: + +## keywords: ['configuration', 'setup', 'getting started'] + +TODO: + +- Remove/Change Navbar in V2 (Global Setting) +- Add redirects (Global Setting) +- Add Analytics (Global Setting) +- Add Footer (Global Setting) +- Add SEO (Global Setting) +- Add Custom Domain (Global Setting) +- Add Custom 404 (Global Setting)? +- "description": + "![Rick Roll](https://media0.giphy.com/media/v1.Y2lkPTc5MGI3NjExN2FteDJ4bno5MHU5Y3QxdGx3eWR2emdhejRhc2c1Y2d3ejY5ajlxMSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/Ju7l5y9osyymQ/giphy.gif) + \n Sorry About That." +- "description": + "![404 Robot](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/9b86454e-e7d0-46f5-8f77-fcfd2309c760/original=true,quality=90/F0DB1F6D051016659028C1570BD9F3F89FF00EC59E1A35319969E6DF05EEB4CF.jpeg)" + +Notes from stakeholders/feedback + +- “The gateways section should definitely include… technical documentation on + how to run and operate a gateway node because that’s missing.” +- + +Notes on layout + +- Consider moving resource and help anchors to right tabs on menu (styling). + Would prefer navbar buttons - but external links only there :/ + +- Consider having an Index & FAQ/Glossary page in each tab - Possibly use AI to + make it per page (llm intiially then n8n integration keeps it updated) + +About: + +- Protocol: Called Protocol Actors or Network Participants? Both? +- I am not convinced about the side bar sections. + +Removing: "v2/pages/01_about/livepeer-protocol/livepeer-actors/gateways", +"v2/pages/01_about/livepeer-protocol/livepeer-actors/orchestrators", +"v2/pages/01_about/livepeer-protocol/livepeer-actors/delegators", +"v2/pages/01_about/livepeer-protocol/livepeer-actors/end-users" + +Community + +- move HUBS to appropriate tabs +- Hate the naming of all connect items. + +Developer + +Gateways + +#### Direct Usage & Platform Integration + +| Category | Reason | Business Explanation | +| ------------------ | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| Direct Usage / Ops | Run your own workloads | Content providers run gateways to process their own video/AI workloads end-to-end, controlling ingestion, routing, retries, and delivery. | + +#### Reliability, Performance & QoS + +| Category | Reason | Business Explanation | +| ----------- | ---------------------------------- | ----------------------------------------------------------------------------------------------------- | +| Reliability | Enforce SLAs on orchestrators | Gateways select orchestrators, apply retries/failover, and enforce latency and uptime guarantees. | +| Reliability | QoS enforcement & workload shaping | Gateways control routing, retries, failover, and latency-vs-cost trade-offs beyond protocol defaults. | + +#### Platform + +| Category | Reason | Business Explanation | +| -------- | ------------------------- | ------------------------------------------------------------------------------- | +| Platform | Embed in a larger product | Gateways act as internal infrastructure powering broader media or AI platforms. | + +#### Economics + +| Category | Reason | Business Explanation | +| --------- | ------------------------------ | ------------------------------------------------------------------------------------------------------- | +| Economics | Service-layer monetization | Service providers charge end users above orchestrator cost for reliability, compliance, or convenience. | +| Economics | Avoid third-party gateway fees | Running your own gateway avoids routing fees, pricing risk, and policy constraints imposed by others. | + +#### Demand Control & Traffic Ownership + +| Category | Reason | Business Explanation | +| -------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| Demand Control | Demand aggregation & traffic ownership | Gateways own ingress, customer relationships, usage data, and traffic predictability across apps or customers. | +| Demand Control | Workload normalization | Gateways smooth bursty demand into predictable, orchestrator-friendly workloads. | + +#### Performance + +| Category | Reason | Business Explanation | +| ----------- | --------------------------- | --------------------------------------------------------------------------------------------------- | +| Performance | Geographic request steering | Gateways route users to regionally optimal orchestrators to reduce latency and improve reliability. | + +#### Security & Compliance + +| Category | Reason | Business Explanation | +| -------- | --------------------------------- | ------------------------------------------------------------------------------------------ | +| Security | Enterprise policy enforcement | Gateways enforce IP allowlists, auth, rate limits, audit logs, and deterministic behavior. | +| Security | Cost-explosion & abuse protection | Gateways block buggy or malicious clients before they generate runaway compute costs. | + +#### Product Differentiation & UX + +| Category | Reason | Business Explanation | +| -------- | -------------------------------------- | ------------------------------------------------------------------------------------------------------- | +| Product | Product differentiation above protocol | Custom APIs, SDKs, dashboards, billing abstractions, and AI workflow presets live at the gateway layer. | +| Product | Stable API surface | Gateways shield customers from protocol or orchestrator churn via versioning and controlled change. | + +#### Observability & Feedback Loops + +| Category | Reason | Business Explanation | +| ------------- | -------------------------- | ------------------------------------------------------------------------------------------------------ | +| Observability | Analytics & feedback loops | Gateways see end-to-end request patterns, failures, latency, model performance, and customer behavior. | + +#### Strategy, Optionality & Ecosystem Power + +| Category | Reason | Business Explanation | +| -------- | ---------------------- | -------------------------------------------------------------------------------------------------------- | +| Strategy | Strategic independence | Running your own gateway avoids pricing, roadmap, availability, and censorship risk from other gateways. | +| Strategy | Future optionality | Early gateway operators gain leverage if incentives or network economics evolve. | + +#### Ecosystem Influence + +| Category | Reason | Business Explanation | +| --------- | ------------------- | -------------------------------------------------------------------------------------------------------------------- | +| Ecosystem | Ecosystem influence | Gateways sit at a coordination choke-point that shapes standards, surfaces protocol gaps, and influences real usage. | + +## NOTES ON SOME FETCHED DATA + +Since useState, useEffect, and fetch work in Mintlify JSX components, you can +pull: + +Release info - versions, release notes, assets, dates Repo stats - stars, forks, +open issues count File contents - README, config files, code examples (via +raw.githubusercontent.com) Contributors - list of contributors, avatars Commit +history - recent commits, changelog-style updates Issues/PRs - open issues +count, specific issue details + +**EXAMPLE** + +I'm fetching the latest release of livepeer dynamically in some places eg. +gateways/linux-install. with a github action `latestVersion` and +`latestVersionUrl` are saved in `/snippets/automationData/globals/globals.mdx`. + +### !!! Caveats: + +- Rate limits - GitHub API is 60 requests/hour for unauthenticated requests. If + many users load the page, could hit limits +- Client-side loading - Shows"loading..." briefly before content appears +- No SSR - Content won't be in the initial HTML (affects SEO if that matters) + +### Future Recommendation: + +For high-traffic pages, we might want a build-time approach instead (fetch once +during deploy, not on every page load). + +Then we can use a n8n hook or github action to redeploy the docs when a new +release is published. + +# AI Workers + +AI workers are run when you start a node with the -aiWorker flag. They can run +in two modes: + +Combined with Orchestrator (-orchestrator -aiWorker): The orchestrator also runs +AI processing locally Standalone AI Worker (-aiWorker only): Connects to a +remote orchestrator via gRPC + +Key Points: AI workers are the component that actually runs Docker containers +starter.go:1345-1349 Gateways only route requests and handle payments; they +don't run containers byoc.go:25-35 BYOC containers are managed by the AI +worker's Docker manager For CPU models, you don't need the -nvidia flag +starter.go:1296-1300 + +Notes The -aiModels flag is required for AI workers to know which containers to +load starter.go:1499-1502 BYOC was introduced as the "Generic Processing +Pipeline" in v0.8.5 CHANGELOG.md:94 Your BYOC container must implement +Livepeer's processing API to be compatible + +## RTMP + +RTMP is required when: + +You need to accept video streams from RTMP sources (OBS, FFmpeg, etc.) You're +running a traditional video broadcasting gateway RTMP is NOT needed when: + +You only need HTTP API access You're using HTTP push ingest only The gateway is +purely for AI processing requests + +## HTTP + +You don't need to specify HTTP settings when: + +Testing locally on the same machine Only need RTMP ingest from external sources +Don't need HLS playback or API access from outside + +Add HTTP port when: + +You need HLS video playback from external players Want HTTP video ingest from +remote sources Need API access from other services + +## Off-chain PRODUCTION Gateways + +An off-chain production gateway is possible because Livepeer doesn't require +blockchain interaction for basic video processing - it can operate as a +centralized service connecting directly to orchestrators. However, Livepeer is +not traditional P2P software; it's a **client-server architecture** with +specialized node types. + +##### Off-Chain Production Gateway + +Off-chain mode skips all Ethereum initialization and blockchain interactions. + +The gateway operates by: + +- Direct Orchestrator Connection: Uses -orchAddr to connect directly to + orchestrator(s) +- No Blockchain Dependencies: No need for -ethUrl, keystore, or ETH tokens +- Local Verification Only: Disables on-chain verification by default + +##### Architecture: Client-Server (Hub-and-Spoke), Not P2P + +Livepeer uses a hub-and-spoke model, not P2P: + +`Gateway → Orchestrator → Transcoder` + +- Gateways accept streams and route jobs +- Orchestrators coordinate transcoding +- Transcoders process video + +Communication happens via HTTP/gRPC protocols, notpeer-to-peer networking . + +##### When to Use Off-Chain Production + +Off-chain is suitable for: + +- Private video processing infrastructure +- Development/testing environments +- When you don't need economic incentives or public discovery + +On-chain is needed for: + +- Public, decentralized video processing +- Economic incentives and staking +- Access to the broader Livepeer protocol network + +##### Notes + +- Off-chain gateways are fully functional for video processing but operate in + isolation +- The test suite includes off-chain gateway tests +- Development configurations show both off-chain and on-chain modes as + first-class citizens. + +##### WILD, But enterprise whitelabel? + +This is wild. It's seriously crazy you can use the software WITH **NO REAL +incentive** to join the network. Because Gateways have no real incentives & if +you have an Orchestrator... you have a gateway probably. And orchestrators are +doing no work and getting rewarded by an inflationary token. **WILD**. + +!! **BUT** !! its a good basis for an enterprise version. + +--- + +## Gateway Economics + +gateways themselves don't receive direct protocol incentives. The economic +incentives in Livepeer are designed for orchestrators and transcoders, not +gateways. + +**Who Gets Incentives** The Livepeer protocol's token incentives are targeted +at: + +- Orchestrators: Earn fees for transcoding services and can redeem winning + tickets multi-o.md:25-51 +- Transcoders: Process video and get paid by orchestrators +- Reward Service: Registered addresses can call reward functions to mint LPT + ethereum.md:3-13 _delegators_ ? + +Gateways are essentially routing infrastructure - they accept streams and +forward jobs to orchestrators, but don't participate in the protocol's economic +system directly. + +**Why Run a Gateway Anyway** + +Despite no direct incentives, gateways serve important purposes: + +1. Service Providers: Companies can offer video streaming services to customers, + charging them directly while using Livepeer orchestrators for processing +2. Private Infrastructure: Off-chain gateways enable private video processing + without blockchain overhead starter.go:743-749 Control & Customization: +3. Gateways can add authentication, custom APIs, and business logic on top of + the core protocol +4. Development & Testing: Essential for building and testing applications on the + Livepeer network + +**Economic Model** + +The incentive structure assumes: + +- End users pay gateways for video services +- Gateways pay orchestrators for transcoding work +- Orchestrators earn protocol rewards and fees + +This creates a market where gateways **compete on service quality, price, and +features**, while the protocol ensures reliable transcoding through orchestrator +incentives. + +**Notes** + +- Off-chain gateways operate entirely outside the economic system - they're just + routing software starter.go:743-749 +- On-chain gateways still don't earn rewards, but can participate in the broader + protocol ecosystem +- The design separates concerns: infrastructure (gateways) from processing + (orchestrators) from security (protocol incentives) + +--- + +## Transcoder Role + +Transcoders in Livepeer process video segments and receive payments from +orchestrators through a ticket-based micropayment system. The orchestrator +handles payments from broadcasters and then compensates transcoders for their +work. + +#### Payment Flow + +1. Broadcaster to Orchestrator The broadcaster sends payment tickets along with + video segments to the orchestrator. The `ProcessPayment` function in the + orchestrator validates these tickets and tracks the balance [1](#26-0) : + +```go +func (orch *orchestrator) ProcessPayment(ctx context.Context, payment net.Payment, manifestID ManifestID) error +``` + +1. Fee Calculation After transcoding completes, the orchestrator debits fees + based on the total pixels processed [2](#26-1) : + +```go +// Debit the fee for the total pixel count +h.orchestrator.DebitFees(sender, core.ManifestID(segData.AuthToken.SessionId), payment.GetExpectedPrice(), pixels) +``` + +1. Payment Structure Payments use a probabilistic ticket system with these key + components [3](#26-2) : + +- `TicketParams` - Parameters for probabilistic micropayment tickets +- `ExpectedPrice` - The orchestrator's advertised price per pixel +- `Sender` - The broadcaster's Ethereum address + +#### Transcoder Compensation + +While the code shows how orchestrators receive payments from broadcasters, the +actual mechanism for orchestrators paying transcoders is not explicitly visible +in the provided snippets. However, based on the architecture: + +1. **Local Transcoding**: When using `-orchestrator -transcoder`, the + orchestrator processes jobs locally, so no external payment is needed +2. **Remote Transcoders**: For separate transcoder nodes, payment would happen + through the `RemoteTranscoderManager` [4](#26-3) + +#### Key Points + +- Transcoders don't directly interact with broadcasters - all payments flow + through orchestrators +- The system uses probabilistic micropayments to avoid on-chain transactions for + every segment +- Payment amounts are calculated based on pixel count and price per pixel + [5](#26-4) +- Winning tickets can be redeemed on-chain for actual ETH tokens + +#### Notes + +The actual payment flow from orchestrator to transcoder appears to be handled +internally within the orchestrator component, but the specific implementation +isn't visible in the provided code snippets. The architecture suggests this is +managed through balance tracking and off-chain settlement mechanisms. + +## Transcoder Role 2 + +A transcoder is not the same as an orchestrator - they are distinct roles in the +Livepeer network, though they can be combined on the same node. + +Key Differences + +**Orchestrator** + +- Coordinates work: Manages payments, job distribution, and orchestrator + discovery orchestrator.go:48-53 +- Handles economics: Processes payments and validates tickets + orchestrator.go:105-130 +- Manages capacity: Tracks available transcoders and their capabilities + orchestrator.go:81-91 + +**Transcoder** + +- Processes video: Actually performs the video transcoding work using ffmpeg +- Registers with orchestrator: Connects to orchestrators to receive work + ot_rpc.go:99-104 +- Standalone or combined: Can run separately or on the same node as orchestrator + +**Architecture Flow** + +`Gateway → Orchestrator → Transcoder` + +The orchestrator acts as the middleman - it receives jobs from gateways, handles +payments, then distributes the actual transcoding work to registered transcoders +lp_rpc.proto:23-28 . + +**Combined Mode** + +Many operators run both roles together with -orchestrator -transcoder flags +README.md:36-37 . In this mode: + +- The orchestrator processes jobs locally instead of sending to remote + transcoders +- This is simpler for small deployments +- But they remain logically separate components + +**Notes** + +- The node types are explicitly defined as separate enums in the code + livepeernode.go:44-51 +- Orchestrators can manage multiple transcoders for scaling multi-o.md:5 +- **In production, large operators often split these roles across different + machines for better scaling and reliability** + +--- + +## Node Types + +a gateway cannot also be a transcoder. These are distinct node types in +Livepeer - a gateway runs as a BroadcasterNode while a transcoder runs as a +TranscoderNode livepeernode.go:44-51 . + +**Node Types Are Mutually Exclusive** + +When starting a Livepeer node, you choose one primary role: + +-gateway makes the node a BroadcasterNode starter.go:694-698 -transcoder makes +the node a TranscoderNode starter.go:690-691 -orchestrator makes the node an +OrchestratorNode starter.go:681-682 + +The starter code enforces this by setting n.NodeType to exactly one type +starter.go:679-701 . + +**What Can Be Combined** + +Only orchestrators can have dual roles: + +Orchestrator + Transcoder: Use -orchestrator -transcoder flags README.md:36-37 +Orchestrator + AI Worker: Use -orchestrator -aiWorker flags A gateway always +remains just a gateway - it routes requests but doesn't process video itself. + +**Architecture Flow** + +`Gateway (BroadcasterNode) → Orchestrator (can also transcode) → Transcoder (if separate)` + +The gateway's job is to accept streams and forward jobs to orchestrators . It +never performs transcoding itself. + +**Notes** You cannot run -gateway -transcoder together - the flags are mutually +exclusive For small deployments, use -orchestrator -transcoder on one node For +large deployments, split roles: separate gateway, orchestrator, and transcoder +nodes + +--- + +## -aiServiceRegistry (For on-chain AI gateways to discover AI orchestrators) + +The -aiServiceRegistry flag is used to enable the AI Service Registry contract +for on-chain AI deployments. It's not exclusively for orchestrators - it can be +used by any node type (gateways, orchestrators, AI workers) when running in +on-chain mode with AI capabilities. + +**Purpose** + +When -aiServiceRegistry is set to true, the node uses a specific ServiceRegistry +contract address for the Livepeer AI Subnet instead of the default protocol +ServiceRegistry starter.go:842-844 : + +```go + if *cfg.AIServiceRegistry { + // For the time-being Livepeer AI Subnet uses its own ServiceRegistry, so we define it here + ethCfg.ServiceRegistryAddr = ethcommon.HexToAddress("0x04C0b249740175999E5BF5c9ac1dA92431EF34C5") + } +``` + +**Who Uses It** + +- Gateways: For on-chain AI gateways to discover AI orchestrators + development.md:352-365 +- Orchestrators: For on-chain AI orchestrators to register their AI capabilities + development.md:314-326 AI Workers: Not typically needed since +- AI workers connect directly to orchestrators + +**When to Use** + +Use -aiServiceRegistry when: + +- Running on-chain **(off-chain mode doesn't need it)** +- Your node needs to interact with AI services +- You want to use the Livepeer AI Subnet's dedicated service registry + +**Example Usage** + +```bash +# On-chain AI Gateway +livepeer -gateway -aiServiceRegistry -network arbitrum-one-mainnet + +# On-chain AI Orchestrator +livepeer -orchestrator -aiWorker -aiServiceRegistry -network arbitrum-one-mainnet +``` + +**Notes** + +- The flag defaults to false starter.go:229 +- It's only relevant for on-chain deployments flags.go:57 +- The AI Subnet uses a separate ServiceRegistry to isolate AI services from + general transcoding services + +--- + +## -transcodingOptions + +You're right to be confused! Gateways don't transcode video themselves, but they +still need `transcodingOptions` to tell orchestrators WHAT video profiles to +create. [1](#31-0) + +**Why Gateways Need Transcoding Options** + +Gateways use `transcodingOptions` to: + +1. **Specify desired output profiles** - Tell orchestrators which video + renditions to create (240p, 360p, 720p, etc.) +2. **Include in segment requests** - The profiles are sent to orchestrators with + each video segment [2](#31-1) +3. **Sign with requests** - The broadcaster signs segments including the profile + hash [3](#31-2) + +**How It Works** + +When a gateway starts, it parses the transcoding options and stores them in +`BroadcastJobVideoProfiles` [4](#31-3) . Later, when processing streams: + +```go +// In createRTMPStreamIDHandler +if resp.Profiles == nil && len(resp.Presets) <= 0 { + profiles = BroadcastJobVideoProfiles // Use the configured defaults +} +``` + +**Architecture Flow** + +``` +Gateway (specifies profiles) → Orchestrator (reads profiles) → Transcoder (creates profiles) +``` + +The gateway is like a customer ordering food - it specifies what it wants even +though the kitchen (transcoder) does the actual cooking. + +**Notes** + +- Default profiles are `P240p30fps16x9,P360p30fps16x9` [6](#31-5) +- You can override via CLI flag, webhook, or API [7](#31-6) +- The profiles are included in the segment signature for security [3](#31-2) + +--- + +## Example Usage of -maxPricePerCapability + +The `-maxPricePerCapability` flag sets maximum prices for AI capabilities +(pipelines and models) that a gateway will pay for AI processing services +[1](#34-0) . + +**JSON File Format (Recommended)** + +Create a JSON file with capability pricing: + +```json +{ + "capabilities_prices": [ + { + "pipeline": "text-to-image", + "model_id": "stabilityai/sd-turbo", + "price_per_unit": 1000, + "pixels_per_unit": 1, + "currency": "WEI" + }, + { + "pipeline": "image-to-video", + "model_id": "default", + "price_per_unit": 2000, + "pixels_per_unit": 3, + "currency": "WEI" + }, + { + "pipeline": "upscale", + "model_id": "default", + "price_per_unit": 1200, + "pixels_per_unit": 1, + "currency": "WEI" + } + ] +} +``` + +Then use it with: + +```bash +-maxPricePerCapability=/path/to/your/config.json +``` + +**Direct String Format** + +You can also pass the JSON directly: + +```bash +-maxPricePerCapability='{"capabilities_prices": [{"pipeline": "text-to-image", "model_id": "stabilityai/sd-turbo", "price_per_unit": 1000, "pixels_per_unit": 1}]}' +``` + +**Key Fields** + +- **pipeline**: The AI pipeline (e.g., "text-to-image", "image-to-video") + [2](#34-1) +- **model_id**: Specific model ID or "default" for all models in pipeline + [3](#34-2) +- **price_per_unit**: Maximum price to pay per unit +- **pixels_per_unit**: Number of pixels that make up one unit (default: 1) +- **currency**: Currency (WEI, USD, etc.) [4](#34-3) + +**CLI API Usage** + +You can also set prices via the CLI API: + +```bash +curl -X POST http://localhost:5935/setMaxPriceForCapability \ + -d "maxPricePerUnit=1000" \ + -d "pixelsPerUnit=1" \ + -d "currency=WEI" \ + -d "pipeline=text-to-image" \ + -d "modelID=stabilityai/sd-turbo" +``` + +**Notes** + +- Use `"model_id": "default"` to set the same price for all models in a pipeline + [1](#34-0) +- The actual price per pixel is calculated as `price_per_unit / pixels_per_unit` + [5](#34-4) +- This flag is only used for AI capabilities, not video transcoding [6](#34-5) +- Prices are validated and must be greater than 0 [7](#34-6) + +Wiki pages you might want to explore: + +- [Architecture (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#1.1) +- [Ethereum Integration (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#3.3) + +--- + +## -livePaymentInterval + +You can use `-livePaymentInterval` on AI-only gateways, but only for **Live AI +Video workflows**. It's not limited to dual gateways. + +#### Key Points + +**What It's For** + +The `-livePaymentInterval` flag controls payment processing frequency +specifically for **Live AI Video** workflows where the gateway sends periodic +payments to the orchestrator [1](#35-0) . It defaults to 5 seconds [2](#35-1) . + +**When It's Used** + +The flag is used in two main scenarios: + +1. **Live AI Video Processing** - When processing live video streams through AI + models [3](#35-2) +2. **AI HTTP Video Processing** - For HTTP-based AI video workflows [4](#35-3) + +**When It's NOT Needed** + +- **Static AI Processing** - Not needed for text-to-image, image-to-video, or + other non-live AI workflows +- **Regular Video Transcoding** - Not used for standard transcoding (that uses a + different payment system) +- **Off-chain Gateways** - Only relevant for on-chain scenarios with payments + +**How It Works** + +When price info is available and non-zero, the gateway creates a +`LivePaymentProcessor` that sends payments at the specified interval [5](#35-4) +. The processor avoids processing payments for every segment to reduce +computational cost [6](#35-5) . + +**Notes** + +- The flag is stored in the `LivepeerNode` struct as `LivePaymentInterval` + [7](#35-6) +- It's only relevant when processing live video streams with AI models +- You can leave it at the default (5 seconds) for most use cases +- The payment processor will only be created if there's a price per unit set + [8](#35-7) + +# Production Authentication with -authWebhookUrl + +The `-authWebhookUrl` flag enables webhook-based authentication for incoming +streams in production Livepeer gateways. It's essential for securing publicly +accessible gateways. + +## How It Works + +When a stream connects to your gateway, Livepeer makes a POST request to your +webhook endpoint with the stream URL [1](#46-0) : + +```json +{ + "url": "rtmp://livepeer.node/manifest" +} +``` + +Your webhook must respond with HTTP 200 to authorize the stream. Any other +status code denies it [2](#46-1) . + +## Webhook Response Options + +### Empty Response (200 OK) + +- Uses manifestID from URL or generates random one +- Uses default transcoding profiles + +### JSON Response + +```json +{ + "manifestID": "ManifestID", + "streamKey": "SecretKey", + "presets": ["Preset", "Names"], + "profiles": [ + { + "name": "ProfileName", + "width": 320, + "height": 240, + "bitrate": 1000000, + "fps": 30, + "fpsDen": 1, + "profile": "H264Baseline", + "gop": "2.5" + } + ] +} +``` + +The webhook can customize [3](#46-2) : + +- **manifestID**: Stream identifier (alphanumeric only) +- **streamKey**: Optional RTMP playback protection +- **presets/profiles**: Override transcoding options +- **profile**: Codec selection (H264Baseline, H264Main, etc.) +- **gop**: GOP length in seconds + +## Production Implementation + +### Simple Auth Server Example + +Livepeer provides a basic auth server example [4](#46-3) - ensure fast +responses 3. **Caching**: Livepeer caches webhook responses to reduce calls 4. +**Validation**: The gateway validates webhook URLs must be HTTP(S) [5](#46-4) + +## Configuration + +Add to your gateway command: + +```bash +livepeer -gateway -authWebhookUrl https://your-auth-server.com/auth +``` + +## HTTP Ingest Requirement + +When using `-httpAddr=0.0.0.0` (public access), HTTP ingest is disabled unless +you either [6](#46-5) + +- Orchestrators can also use webhooks for discovery authentication [7](#46-6) +- Test your webhook endpoint thoroughly before production deployment +- Consider rate limiting to prevent abuse + +Wiki pages you might want to explore: + +- [Architecture (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#1.1) + +Wiki pages you might want to explore: + +- [Architecture (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#1.1) + +Yes, it's possible to test on testnets, and there are orchestrators running on +them. The Livepeer codebase explicitly supports testnet deployments with +dedicated network configurations. + +## Quick Answer + +- **Testnets are supported**: Livepeer supports `arbitrum-one-goerli` and other + testnets +- **Orchestrators exist**: The code assumes orchestrators are available on + testnets +- **Deprecated testnets**: Rinkeby and arbitrum-one-rinkeby are deprecated but + still functional + +--- + +## Testnet Support in Codebase + +### Network Configurations + +The starter code includes specific configurations for testnets [1](#50-0) : + +```go +configOptions := map[string]*NetworkConfig{ + "rinkeby": { + ethController: "0x9a9827455911a858E55f07911904fACC0D66027E", + redeemGas: redeemGasL1, + }, + "arbitrum-one-rinkeby": { + ethController: "0x9ceC649179e2C7Ab91688271bcD09fb707b3E574", + redeemGas: redeemGasL2, + }, + // ... other networks +} +``` + +### Test Scripts + +The test suite includes testnet-specific tests [2](#50-1) : + +```bash +# Run Rinkeby tests +if [ -z ${RINKEBY_ETH_URL+x} ]; then + echo "RINKEBY_ETH_URL is not set - skipping Rinkeby tests" +else + run_lp -gateway -network rinkeby $ETH_ARGS + [ -d "$DEFAULT_DATADIR"/rinkeby ] + kill $pid +fi +``` + +### Development Configurations + +VS Code launch configurations show testnet setups [3](#50-2) : + +```go +if *cfg.Network == "rinkeby" || *cfg.Network == "arbitrum-one-rinkeby" { + glog.Warning("The Rinkeby/ArbRinkeby networks are deprecated in favor of the Goerli/ArbGoerli networks which will be launched in January 2023.") +} +``` + +## Testing on Testnets + +### Configuration + +Use the testnet configuration: + +```yaml +command: + "-gateway -network arbitrum-one-goerli -ethUrl=https://goerli.arbitrum.io/rpc + -ethUrl=https://eth-goerli.g.alchemy.com/v2/YOUR_API_KEY" +``` + +### Finding Orchestrators + +Testnets have fewer orchestrators than mainnet, but they exist: + +1. Check the Livepeer explorer for testnet orchestrators +2. Use the CLI to discover available orchestrators: + ```bash + curl http://localhost:5935/getOrchestrators + ``` + +### Benefits of Testnet Testing + +- No real ETH costs (use testnet faucets) +- Safe environment for experimentation +- Test new features before mainnet deployment + +## Notes + +- Testnet orchestrators are run by community members and the Livepeer team +- Some testnets may have limited capacity compared to mainnet +- Always check the network status before deploying to testnet +- The codebase is designed to work identically across testnet and mainnet, just + with different network parameters + +## On-Chain Gateway Orchestrator Discovery and Connection + +This codemap traces how on-chain Livepeer gateways discover and connect to +orchestrators. The system supports both direct orchestrator specification via +-orchAddr flag [1b] and automatic blockchain-based discovery when no addresses +are provided [1d]. The discovery process queries the BondingManager smart +contract to get all registered orchestrators [2c-2e], with different validation +rules for different node types - transcoders require explicit orchestrator +addresses [3a] while gateways can operate with or without them [3b]. + +### 1. Orchestrator Discovery Initialization + +How a gateway sets up orchestrator discovery when starting in on-chain mode + +### 1a. Discovery Setup Start (`starter.go:1576`) + +Entry point for configuring orchestrator discovery + +```text +// Set up orchestrator discovery +``` + +### 1b. Direct Orchestrator Pool (`starter.go:1584`) + +Uses specific orchestrator addresses when provided + +```text +if len(orchURLs) > 0 { + n.OrchestratorPool = discovery.NewOrchestratorPool(bcast, orchURLs, common.Score_Trusted, orchBlacklist, *cfg.DiscoveryTimeout) +``` + +### 1c. On-Chain DB Cache (`starter.go:1591`) + +Creates blockchain-based orchestrator cache for on-chain mode + +```text +if *cfg.Network != "offchain" { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + dbOrchPoolCache, err := discovery.NewDBOrchestratorPoolCache(ctx, n, timeWatcher, orchBlacklist, *cfg.DiscoveryTimeout) +``` + +### 1d. Auto-Discovery Fallback (`starter.go:1600`) + +Uses blockchain discovery when no specific orchestrators provided + +```text +if *cfg.OrchWebhookURL == "" && len(orchURLs) == 0 { + n.OrchestratorPool = dbOrchPoolCache +``` + +### 2. Blockchain Orchestrator Pool Query + +How the gateway discovers orchestrators from smart contracts when no addresses +are specified + +### 2a. HTTP Discovery Handler (`handlers.go:1077`) + +Gateway endpoint for discovering available orchestrators + +```text +func registeredOrchestratorsHandler(client eth.LivepeerEthClient, db *common.DB) http.Handler { +``` + +### 2b. Query Transcoder Pool (`handlers.go:1079`) + +Calls Ethereum client to get registered orchestrators + +```text +orchestrators, err := client.TranscoderPool() +``` + +### 2c. Ethereum Client Implementation (`client.go:980`) + +Retrieves all transcoders from the bonding manager contract + +```text +func (c *client) TranscoderPool() ([]*lpTypes.Transcoder, error) { +``` + +### 2d. Get First Orchestrator (`client.go:983`) + +Smart contract call to get first registered orchestrator + +```text +tAddr, err := c.GetFirstTranscoderInPool() +``` + +### 2e. Iterate Through Pool (`client.go:996`) + +Loops through all registered orchestrators + +```text +tAddr, err = c.GetNextTranscoderInPool(tAddr) +``` + +### 3. Error Handling and Validation + +How the gateway validates orchestrator configuration and handles missing +connections + +### 3a. Transcoder Validation (`starter.go:1827`) + +Transcoders and AI workers require orchestrator addresses + +```text +if n.NodeType == core.TranscoderNode || n.NodeType == core.AIWorkerNode { + if len(orchURLs) <= 0 { + glog.Exit("Missing -orchAddr") +``` + +### 3b. Gateway Pool Validation (`starter.go:1605`) + +Gateways can run without orchestrators but won't transcode + +```text +if n.OrchestratorPool == nil { + // Not a fatal error; may continue operating in segment-only mode + glog.Error("No orchestrator specified; transcoding will not happen") +``` + +### 3c. Flag Definition (`flags.go:23`) + +Command line flag for specifying orchestrator addresses + +```text +cfg.OrchAddr = fs.String("orchAddr", *cfg.OrchAddr, "Comma-separated list of orchestrators to connect to") +``` + +# BYOC (Bring Your Own Container) Summary + +BYOC is Livepeer's Generic Processing Pipeline that allows you to run custom +Docker containers for media processing on the Livepeer network while integrating +with Livepeer's infrastructure for job distribution and payments. [1](#53-0) + +## Key Points + +- **BYOC is NOT just any Docker container** - it must implement Livepeer's + processing API +- **Runs on Orchestrators with AI Workers** - containers execute on orchestrator + nodes, not directly on-chain +- **Gateway routes jobs** - gateways accept requests and forward to + orchestrators with BYOC capabilities +- **Payment model** - uses 1 ticket per second of compute (updated in v0.8.7) + [2](#53-1) + +## Architecture + +```mermaid +graph LR + Gateway[BYOC Gateway] --> Orchestrator[BYOC Orchestrator] + Orchestrator --> Container[Custom Container] + + Gateway -.->|/process/request| Orchestrator + Orchestrator -.->|Register Capability| Container +``` + +The system consists of: + +- **BYOCGatewayServer** - Handles job submission from clients [3](#53-2) +- **BYOCOrchestratorServer** - Manages job processing and capability + registration [4](#53-3) + +## Requirements + +1. **Orchestrator with AI Worker** - Needed to run containers +2. **Custom Container** - Must implement Livepeer's API +3. **Off-chain or On-chain** - Works in both modes +4. **Payment Setup** - Tickets for compute time + +## Notes + +- BYOC was introduced as the "Generic Processing Pipeline" in v0.8.5 +- Containers are managed similarly to AI worker containers +- Your container must expose the required HTTP endpoints for job processing + +Wiki pages you might want to explore: + +- [Architecture (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#1.1) +- [AI Workers (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#2.6) + +Wiki pages you might want to explore: + +- [Overview (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#1) +- [AI Workers (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#2.6) + +# On-Chain Gateway Setup + +For an on-chain dual gateway, you don't need to manually connect to +orchestrators - the system discovers them automatically from the blockchain. + +## Quick Answer + +**No special setup needed** - just run the gateway with on-chain flags. The +gateway will: + +- Automatically discover all public orchestrators from the protocol +- Filter by capabilities (video transcoding, AI models) +- Select the best ones based on price and performance + +--- + +## How On-Chain Discovery Works + +### Automatic Orchestrator Discovery + +When you start an on-chain gateway (`-network arbitrum-one-mainnet`), it +automatically creates a `DBOrchestratorPoolCache` that polls the blockchain for +registered orchestrators [1](#55-0) : + +```go +if *cfg.Network != "offchain" { + dbOrchPoolCache, err := discovery.NewDBOrchestratorPoolCache(ctx, n, timeWatcher, orchBlacklist, *cfg.DiscoveryTimeout) + if err != nil { + exit("Could not create orchestrator pool with DB cache: %v", err) + } + + // Use DB cache as orchestrator pool if no specific orchestrators provided + if *cfg.OrchWebhookURL == "" && len(orchURLs) == 0 { + n.OrchestratorPool = dbOrchPoolCache + } +} +``` + +### What Gets Discovered + +The discovery system fetches: + +- **All registered orchestrators** from the BondingManager contract +- **Their capabilities** (video transcoding profiles, AI models) +- **Pricing information** for each capability +- **Performance metrics** and capacity + +### Capability Matching + +When processing requests, the gateway: + +1. Queries discovered orchestrators for required capabilities [2](#55-1) +2. Filters by price limits (`-maxPricePerUnit` for video, + `-maxPricePerCapability` for AI) +3. Selects based on performance and availability + +## Recommended On-Chain Settings + +### Minimal Required Flags + +```yaml +command: + "-gateway -network arbitrum-one-mainnet -ethUrl=https://arb1.arbitrum.io/rpc + -aiServiceRegistry # Required for AI capabilities" +``` + +### Recommended Additions + +```yaml +command: + "-gateway -network arbitrum-one-mainnet -ethUrl=https://arb1.arbitrum.io/rpc + -aiServiceRegistry -maxPricePerUnit=1000000000 # Video price limit + -maxPricePerCapability=/path/to/ai-pricing.json # AI price limits + -monitor=true # Enable metrics" +``` + +## No Manual Orchestrator List Needed + +Unlike off-chain mode where you specify `-orchAddr`, on-chain gateways: + +- **Discover all public orchestrators** automatically +- **Update the list periodically** (every 25 minutes) +- **Handle failures** by trying alternative orchestrators + +You can verify discovery worked: + +```bash +curl http://localhost:5935/getOrchestrators +``` + +This will show all discovered orchestrators with their capabilities and pricing. + +## Notes + +- On-chain discovery requires ETH in your gateway account for transactions + [3](#55-2) +- The gateway will show "No orchestrator specified; transcoding will not happen" + if discovery fails [4](#55-3) +- Test on testnet first to avoid gas costs while verifying setup [5](#55-4) + +Wiki pages you might want to explore: + +- [Discovery and Selection (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#3.5) +- [Architecture (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#1.1) + +Wiki pages you might want to explore: + +- [Discovery and Selection (livepeer/go-livepeer)](/wiki/livepeer/go-livepeer#3.5) + +# LINUX BUILD FROM SOURCE (ELITEPROXY INFO) + +Building from source locally (on linux), usually looks like this: + +The purpose of this step is to ensure ffmpeg is built in a specific location and +does not conflict with any existing installed ffmpeg. Livepeer uses a special +ffmpeg build. + +``` +export ROOT=$HOME/buildoutput +export LD_LIBRARY_PATH=$ROOT/compiled/lib/ export +PKG_CONFIG_PATH=$ROOT/compiled/lib/pkgconfig +export PATH=$ROOT/compiled/bin/:$PATH + +./install_ffmpeg.sh $ROOT +``` + +If you wanted to build a go-livepeer docker image, you can do so from the root +of the repository using this go-livepeer from source. See +[eliteproxy_launch.json](./v2/assets/gateways/code_examples/eliteproxy_launch.json) +for an example VS Code launch configuration used to build and debug go-livepeer +from source. The launch config shows the same paths being used to ensure +libraries are loaded using Livepeer's ffmpeg + +### ffmpeg issues + +Forgetting to set those paths before building ffmpeg. Also needed when compiling +or debugging go-livepeer. Yes, sometimes a dev will be accidentally getting +libraries from ffmpeg installed globally or somewhat worse - you never had +ffmpeg, ran install_ffmpeg.sh then that ends up as your system ffmpeg with no +clear way to uninstall lol at least in that case, usually go-livepeer works, but +good luck with other ffmpeg builds I almost think "building from source" +deserves a mention inside of +https://github.com/livepeer/go-livepeer/blob/master/CONTRIBUTING.md if not +linked there already + +System packages I had to install to compile go-livepeer + +``` +sudo apt-get update && sudo apt-get -y install build-essential pkg-config autoconf git curl wget +sudo apt-get -y install protobuf-compiler-grpc golang-goprotobuf-dev +sudo apt-get -y install clang clang-tools +``` + +Y diff --git a/ROLLBACK-GUIDE.md b/ROLLBACK-GUIDE.md new file mode 100644 index 00000000..30eadb4f --- /dev/null +++ b/ROLLBACK-GUIDE.md @@ -0,0 +1,94 @@ +# Emergency Rollback Guide + +## Quick Rollback (Last 5 minutes) + +```bash +# See recent commits +git log --oneline -10 docs-v2-dev + +# Safe rollback - creates new commit that undoes changes +git revert + +# OR - go back to previous state without changing history +git reset --soft HEAD~1 +# Then inspect and recommit if needed +``` + +## View All Changes Since Date + +```bash +# Since last hour +git log --oneline --since="1 hour ago" docs-v2-dev + +# Since specific time +git log --oneline --since="2026-01-06 20:00:00" docs-v2-dev +``` + +## See What Changed in Last Commit + +```bash +git show HEAD +``` + +## Rollback to Specific Commit + +```bash +# List all commits +git reflog + +# Safe method: Create new commit that undoes changes +git revert + +# Restore specific file to previous version +git restore --source= + +# Go back one commit (keeps history) +git reset --soft HEAD~1 + +# NEVER use: git reset --hard (destroys history) +``` + +## If You Need to Undo Last Auto-Commit + +```bash +# See what's in the last commit +git show HEAD + +# Create a new commit that reverts it +git revert HEAD + +# OR - unstage it and inspect +git reset --soft HEAD~1 +git diff --cached +``` + +## See Diff Between Commits + +```bash +# What changed in last auto-commit +git diff HEAD~1 HEAD + +# What changed in a specific commit +git diff ~1 +``` + +## Automatic Checkpoints + +Every 5 minutes a new commit is created on docs-v2-dev with timestamp. Each +commit is a full snapshot you can revert to instantly. + +### View Commit Timeline + +```bash +git log --oneline --graph docs-v2-dev | head -20 +``` + +### Tag Safe Points (Optional) + +```bash +# Save a checkpoint +git tag checkpoint-before-gateway-work + +# Later, go back to it +git reset --hard checkpoint-before-gateway-work +``` diff --git a/UNIVERSAL-AI-PROTOCOL.md b/UNIVERSAL-AI-PROTOCOL.md new file mode 100644 index 00000000..f720ff6a --- /dev/null +++ b/UNIVERSAL-AI-PROTOCOL.md @@ -0,0 +1,202 @@ +# Universal AI Operations Protocol + +## For ANY AI Assistant Working on This Repository + +**Last Updated:** 2026-01-06 +**Created After:** Catastrophic AI failure destroying 12+ files and 318+ files +through formatting disaster + +--- + +## CRITICAL RULES FOR ALL AI ASSISTANTS + +### Rule 1: PROTECTED BRANCHES - UNTOUCHABLE + +``` +docs-v2 = SOURCE OF TRUTH +main = PRODUCTION + +NO AI ASSISTANT IS ALLOWED TO: +- Commit to these branches +- Push to these branches +- Merge into these branches +- Delete these branches +``` + +### Rule 2: PRE-FLIGHT CHECKLIST (EVERY OPERATION) + +Before ANY git command, the AI MUST show you: + +``` +[PREFLIGHT CHECK] +Operating on branch: ________ +Files affected: ________ +Operation: ________ +Expected outcome: ________ + +Proceed? (yes/no) +``` + +**If you don't see this, STOP and ask the AI to show it.** + +### Rule 3: FORBIDDEN COMMANDS (ABSOLUTE) + +``` +❌ NEVER ALLOWED: +- git reset --hard +- git restore . +- git clean -fd +- git push --force +- git rebase -i +- git tag --force +- Mass operations > 50 files without approval +``` + +### Rule 4: LARGE CHANGE PROTOCOL (> 10 files) + +``` +1. AI lists EXACT files being modified +2. AI shows file count and brief explanation +3. Human reviews and approves EXPLICITLY +4. Human creates approval token if needed +5. AI proceeds ONLY after approval +``` + +### Rule 5: COMMIT MESSAGE REQUIREMENTS + +Every commit must include: + +``` +[File count] | [What changed] | [Why changed] | [Approved by: USERNAME] + +Example: +[9 files added] | Restore gateway quickstart files | Fix missing imports from stash | Approved by: alisonhaire +``` + +### Rule 6: FORBIDDEN FILE PATTERNS + +``` +❌ AI must NOT touch without explicit approval: +- .prettierrc, .prettierignore +- docs.json, docs_v2.json +- package.json +- v2/ structure changes +- v1/ any changes (legacy) +- ./git/* (git config) +- Migration of files between v1 and v2 +``` + +### Rule 7: DANGEROUS OPERATION ALERTS + +AI MUST WARN before: + +- Restoring files from commits > 24 hours old +- Deleting ANY file +- Renaming directories +- Changing file structure +- Mass reformatting operations + +### Rule 8: AUDIT TRAIL REQUIREMENT + +Every operation logs to: `.ai-operations.log` + +``` +[TIMESTAMP] BRANCH=docs-v2-dev | FILES=9 | OP=restore | APPROVAL=yes | COMMIT=abc123 +``` + +### Rule 9: CHECKPOINT SYSTEM + +- Auto-commits every 5 minutes on `docs-v2-dev` +- Each commit is tagged with timestamp +- Rollback available to ANY point in last 24 hours +- Tags: `state-before-OPERATION` and `state-after-OPERATION` + +### Rule 10: DRY-RUN FOR COMPLEX OPS + +Before any operation affecting > 5 files: + +``` +1. AI shows DRY-RUN (what WOULD happen) +2. Human reviews +3. Human approves +4. AI executes REAL operation +5. AI shows ACTUAL result +``` + +--- + +## EMERGENCY PROCEDURES + +### If AI Breaks Something + +```bash +# 1. Get the commit hash +git log --oneline docs-v2-dev | head -1 + +# 2. See what broke +git show HEAD + +# 3. Revert it (creates new commit undoing changes) +git revert HEAD + +# 4. Or unstage and inspect +git reset --soft HEAD~1 +``` + +### If AI Touches Protected Branch + +```bash +# Check what happened +git log docs-v2 --oneline -5 + +# Force restore from remote +git fetch origin +git reset --soft origin/docs-v2 +``` + +### If AI Tries Forbidden Command + +The pre-commit hook will block it automatically. + +--- + +## HUMAN RESPONSIBILITY CHECKLIST + +Before each AI session with YOUR docs: + +- [ ] Review protected branches list +- [ ] Know your current branch +- [ ] Have rollback commands ready (see ROLLBACK-GUIDE.md) +- [ ] Verify AI shows preflight checklist EVERY TIME +- [ ] Never let AI skip approval for > 10 file changes +- [ ] Check audit log (.ai-operations.log) regularly + +--- + +## FOR INSTRUCTING ANY NEW AI + +Include this in your prompt to any AI: + +``` +"You are working on a repository with STRICT AI safety protocols. +You MUST: +1. Never commit to docs-v2 or main +2. Show preflight checklist before every operation +3. Get explicit approval for any change > 10 files +4. Never use: git reset --hard, git restore ., git clean -fd +5. Log all operations to .ai-operations.log +6. Work only on docs-v2-dev branch for commits +7. Reference UNIVERSAL-AI-PROTOCOL.md for complete rules" +``` + +--- + +## VERSION CONTROL FOR THIS PROTOCOL + +- Created: 2026-01-06 after catastrophic AI failure +- Enforced by: `.git/hooks/pre-commit` (technical enforcement) +- Audited by: `.ai-operations.log` (human review) +- Rollback by: `ROLLBACK-GUIDE.md` (recovery procedures) + +**This protocol is NOT optional. It is the safety layer that prevents +irreversible damage.** diff --git a/ai/worker/api/gateway.openapi.yaml b/ai/worker/api/gateway.openapi.yaml new file mode 100644 index 00000000..f13f054d --- /dev/null +++ b/ai/worker/api/gateway.openapi.yaml @@ -0,0 +1,1281 @@ +# !!Auto-generated by 'gen_openapi.py'. DO NOT EDIT!! +openapi: 3.1.0 +info: + title: Livepeer AI Runner + description: An application to run AI pipelines + version: 0.0.0 +servers: + - url: https://dream-gateway.livepeer.cloud + description: Livepeer Cloud Community Gateway + - url: https://livepeer.studio/api/beta/generate + description: Livepeer Studio Gateway +paths: + /text-to-image: + post: + tags: + - generate + summary: Text To Image + description: Generate images from text prompts. + operationId: genTextToImage + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/TextToImageParams" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/ImageResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToImage + /image-to-image: + post: + tags: + - generate + summary: Image To Image + description: Apply image transformations to a provided image. + operationId: genImageToImage + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genImageToImage" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/ImageResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToImage + /image-to-video: + post: + tags: + - generate + summary: Image To Video + description: Generate a video from a provided image. + operationId: genImageToVideo + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genImageToVideo" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/VideoResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToVideo + /upscale: + post: + tags: + - generate + summary: Upscale + description: Upscale an image by increasing its resolution. + operationId: genUpscale + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genUpscale" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/ImageResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: upscale + /audio-to-text: + post: + tags: + - generate + summary: Audio To Text + description: Transcribe audio files to text. + operationId: genAudioToText + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genAudioToText" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/TextResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "413": + description: Request Entity Too Large + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "415": + description: Unsupported Media Type + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: audioToText + /segment-anything-2: + post: + tags: + - generate + summary: Segment Anything 2 + description: Segment objects in an image. + operationId: genSegmentAnything2 + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genSegmentAnything2" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/MasksResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: segmentAnything2 + /llm: + post: + tags: + - generate + summary: LLM + description: Generate text using a language model. + operationId: genLLM + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/LLMRequest" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/LLMResponse" + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: llm + /image-to-text: + post: + tags: + - generate + summary: Image To Text + description: Transform image files to text. + operationId: genImageToText + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genImageToText" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/ImageToTextResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "413": + description: Request Entity Too Large + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToText + /live-video-to-video: + post: + tags: + - generate + summary: Live Video To Video + description: + Apply transformations to a live video streamed to the returned + endpoints. + operationId: genLiveVideoToVideo + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/LiveVideoToVideoParams" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/LiveVideoToVideoResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: liveVideoToVideo + /text-to-speech: + post: + tags: + - generate + summary: Text To Speech + description: + Generate a text-to-speech audio file based on the provided text input + and speaker description. + operationId: genTextToSpeech + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/TextToSpeechParams" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/AudioResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToSpeech +components: + schemas: + APIError: + properties: + msg: + type: string + title: Msg + description: The error message. + type: object + required: + - msg + title: APIError + description: API error response model. + AudioResponse: + properties: + audio: + allOf: + - $ref: "#/components/schemas/MediaURL" + description: The generated audio. + type: object + required: + - audio + title: AudioResponse + description: Response model for audio generation. + Body_genAudioToText: + properties: + audio: + type: string + format: binary + title: Audio + description: Uploaded audio file to be transcribed. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transcription. + default: "" + return_timestamps: + type: string + title: Return Timestamps + description: + "Return timestamps for the transcribed text. Supported values: + 'sentence', 'word', or a string boolean ('true' or 'false'). Default + is 'true' ('sentence'). 'false' means no timestamps. 'word' means + word-based timestamps." + default: "true" + type: object + required: + - audio + - model_id + title: Body_genAudioToText + Body_genImageToImage: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: "" + loras: + type: string + title: Loras + description: + 'A LoRA (Low-Rank Adaptation) model and its corresponding weight for + image generation. Example: { "latent-consistency/lcm-lora-sdxl": + 1.0, "nerijs/pixel-art-xl": 1.2}.' + default: "" + strength: + type: number + title: Strength + description: + Degree of transformation applied to the reference image (0 to 1). + default: 0.8 + guidance_scale: + type: number + title: Guidance Scale + description: + Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + image_guidance_scale: + type: number + title: Image Guidance Scale + description: + Degree to which the generated image is pushed towards the initial + image. + default: 1.5 + negative_prompt: + type: string + title: Negative Prompt + description: + Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: "" + safety_check: + type: boolean + title: Safety Check + description: + Perform a safety check to estimate if generated images could be + offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. + default: 100 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - image + - model_id + title: Body_genImageToImage + Body_genImageToText: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to transform with the pipeline. + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide transformation. + default: "" + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transformation. + default: "" + type: object + required: + - image + - model_id + title: Body_genImageToText + Body_genImageToVideo: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to generate a video from. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for video generation. + default: "" + height: + type: integer + title: Height + description: The height in pixels of the generated video. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated video. + default: 1024 + fps: + type: integer + title: Fps + description: The frames per second of the generated video. + default: 6 + motion_bucket_id: + type: integer + title: Motion Bucket Id + description: + Used for conditioning the amount of motion for the generation. The + higher the number the more motion will be in the video. + default: 127 + noise_aug_strength: + type: number + title: Noise Aug Strength + description: + Amount of noise added to the conditioning image. Higher values + reduce resemblance to the conditioning image and increase motion. + default: 0.02 + safety_check: + type: boolean + title: Safety Check + description: + Perform a safety check to estimate if generated images could be + offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. + default: 25 + type: object + required: + - image + - model_id + title: Body_genImageToVideo + Body_genSegmentAnything2: + properties: + image: + type: string + format: binary + title: Image + description: Image to segment. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: "" + point_coords: + type: string + title: Point Coords + description: + Nx2 array of point prompts to the model, where each point is in + (X,Y) in pixels. + point_labels: + type: string + title: Point Labels + description: + Labels for the point prompts, where 1 indicates a foreground point + and 0 indicates a background point. + box: + type: string + title: Box + description: + A length 4 array given as a box prompt to the model, in XYXY format. + mask_input: + type: string + title: Mask Input + description: + A low-resolution mask input to the model, typically from a previous + prediction iteration, with the form 1xHxW (H=W=256 for SAM). + multimask_output: + type: boolean + title: Multimask Output + description: + If true, the model will return three masks for ambiguous input + prompts, often producing better masks than a single prediction. + default: true + return_logits: + type: boolean + title: Return Logits + description: + If true, returns un-thresholded mask logits instead of a binary + mask. + default: true + normalize_coords: + type: boolean + title: Normalize Coords + description: + If true, the point coordinates will be normalized to the range + [0,1], with point_coords expected to be with respect to image + dimensions. + default: true + type: object + required: + - image + - model_id + title: Body_genSegmentAnything2 + Body_genUpscale: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide upscaled image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for upscaled image generation. + default: "" + safety_check: + type: boolean + title: Safety Check + description: + Perform a safety check to estimate if generated images could be + offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. + default: 75 + type: object + required: + - prompt + - image + - model_id + title: Body_genUpscale + Chunk: + properties: + timestamp: + items: {} + type: array + title: Timestamp + description: The timestamp of the chunk. + text: + type: string + title: Text + description: The text of the chunk. + type: object + required: + - timestamp + - text + title: Chunk + description: A chunk of text with a timestamp. + HTTPError: + properties: + detail: + allOf: + - $ref: "#/components/schemas/APIError" + description: Detailed error information. + type: object + required: + - detail + title: HTTPError + description: HTTP error response model. + HTTPValidationError: + properties: + detail: + items: + $ref: "#/components/schemas/ValidationError" + type: array + title: Detail + type: object + title: HTTPValidationError + ImageResponse: + properties: + images: + items: + $ref: "#/components/schemas/Media" + type: array + title: Images + description: The generated images. + type: object + required: + - images + title: ImageResponse + description: Response model for image generation. + ImageToTextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + type: object + required: + - text + title: ImageToTextResponse + description: Response model for text generation. + LLMChoice: + properties: + index: + type: integer + title: Index + finish_reason: + type: string + title: Finish Reason + default: "" + delta: + allOf: + - $ref: "#/components/schemas/LLMMessage" + message: + allOf: + - $ref: "#/components/schemas/LLMMessage" + type: object + required: + - index + title: LLMChoice + LLMMessage: + properties: + role: + type: string + title: Role + content: + type: string + title: Content + type: object + required: + - role + - content + title: LLMMessage + LLMRequest: + properties: + messages: + items: + $ref: "#/components/schemas/LLMMessage" + type: array + title: Messages + model: + type: string + title: Model + default: "" + temperature: + type: number + title: Temperature + default: 0.7 + max_tokens: + type: integer + title: Max Tokens + default: 256 + top_p: + type: number + title: Top P + default: 1.0 + top_k: + type: integer + title: Top K + default: -1 + stream: + type: boolean + title: Stream + default: false + type: object + required: + - messages + title: LLMRequest + LLMResponse: + properties: + id: + type: string + title: Id + model: + type: string + title: Model + created: + type: integer + title: Created + usage: + $ref: "#/components/schemas/LLMTokenUsage" + choices: + items: + $ref: "#/components/schemas/LLMChoice" + type: array + title: Choices + type: object + required: + - id + - model + - created + - usage + - choices + title: LLMResponse + LLMTokenUsage: + properties: + prompt_tokens: + type: integer + title: Prompt Tokens + completion_tokens: + type: integer + title: Completion Tokens + total_tokens: + type: integer + title: Total Tokens + type: object + required: + - prompt_tokens + - completion_tokens + - total_tokens + title: LLMTokenUsage + LiveVideoToVideoParams: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to. + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish. + control_url: + type: string + title: Control Url + description: + URL for subscribing via Trickle protocol for updates in the live + video-to-video generation params. + default: "" + events_url: + type: string + title: Events Url + description: + URL for publishing events via Trickle protocol for pipeline status + and logs. + default: "" + model_id: + type: string + title: Model Id + description: + Name of the pipeline to run in the live video to video job. Notice + that this is named model_id for consistency with other routes, but + it does not refer to a Hugging Face model ID. The exact model(s) + depends on the pipeline implementation and might be configurable via + the `params` argument. + default: "" + params: + type: object + title: Params + description: Initial parameters for the pipeline. + default: {} + type: object + required: + - subscribe_url + - publish_url + - model_id + title: LiveVideoToVideoParams + LiveVideoToVideoResponse: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish to + control_url: + type: string + title: Control Url + description: URL for updating the live video-to-video generation + default: "" + events_url: + type: string + title: Events Url + description: + URL for subscribing to events for pipeline status and logs + default: "" + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoResponse + description: Response model for live video-to-video generation. + MasksResponse: + properties: + masks: + type: string + title: Masks + description: The generated masks. + scores: + type: string + title: Scores + description: The model's confidence scores for each generated mask. + logits: + type: string + title: Logits + description: The raw, unnormalized predictions (logits) for the masks. + type: object + required: + - masks + - scores + - logits + title: MasksResponse + description: Response model for object segmentation. + Media: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + seed: + type: integer + title: Seed + description: The seed used to generate the media. + nsfw: + type: boolean + title: Nsfw + description: Whether the media was flagged as NSFW. + type: object + required: + - url + - seed + - nsfw + title: Media + description: + A media object containing information about the generated media. + MediaURL: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + type: object + required: + - url + title: MediaURL + description: A URL from which media can be accessed. + TextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + chunks: + items: + $ref: "#/components/schemas/Chunk" + type: array + title: Chunks + description: The generated text chunks. + type: object + required: + - text + - chunks + title: TextResponse + description: Response model for text generation. + TextToImageParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: "" + loras: + type: string + title: Loras + description: + 'A LoRA (Low-Rank Adaptation) model and its corresponding weight for + image generation. Example: { "latent-consistency/lcm-lora-sdxl": + 1.0, "nerijs/pixel-art-xl": 1.2}.' + default: "" + prompt: + type: string + title: Prompt + description: + Text prompt(s) to guide image generation. Separate multiple prompts + with '|' if supported by the model. + height: + type: integer + title: Height + description: The height in pixels of the generated image. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated image. + default: 1024 + guidance_scale: + type: number + title: Guidance Scale + description: + Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + negative_prompt: + type: string + title: Negative Prompt + description: + Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: "" + safety_check: + type: boolean + title: Safety Check + description: + Perform a safety check to estimate if generated images could be + offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. + default: 50 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - model_id + title: TextToImageParams + TextToSpeechParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for text to speech generation. + default: "" + text: + type: string + title: Text + description: Text input for speech generation. + default: "" + description: + type: string + title: Description + description: + Description of speaker to steer text to speech generation. + default: + A male speaker delivers a slightly expressive and animated speech + with a moderate speed and pitch. + type: object + title: TextToSpeechParams + required: + - model_id + ValidationError: + properties: + loc: + items: + anyOf: + - type: string + - type: integer + type: array + title: Location + msg: + type: string + title: Message + type: + type: string + title: Error Type + type: object + required: + - loc + - msg + - type + title: ValidationError + VideoResponse: + properties: + images: + items: + $ref: "#/components/schemas/Media" + type: array + title: Images + description: The generated images. + type: object + required: + - images + title: VideoResponse + description: Response model for image generation. + securitySchemes: + HTTPBearer: + type: http + scheme: bearer diff --git a/ai/worker/api/openapi.json b/ai/worker/api/openapi.json new file mode 100644 index 00000000..d05fbaa2 --- /dev/null +++ b/ai/worker/api/openapi.json @@ -0,0 +1,1854 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Livepeer AI Runner", + "description": "An application to run AI pipelines", + "version": "0.0.0" + }, + "servers": [ + { + "url": "https://dream-gateway.livepeer.cloud", + "description": "Livepeer Cloud Community Gateway" + }, + { + "url": "https://livepeer.studio/api/beta/generate", + "description": "Livepeer Studio Gateway" + } + ], + "paths": { + "/text-to-image": { + "post": { + "tags": ["generate"], + "summary": "Text To Image", + "description": "Generate images from text prompts.", + "operationId": "genTextToImage", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextToImageParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "textToImage" + } + }, + "/image-to-image": { + "post": { + "tags": ["generate"], + "summary": "Image To Image", + "description": "Apply image transformations to a provided image.", + "operationId": "genImageToImage", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genImageToImage" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "imageToImage" + } + }, + "/image-to-video": { + "post": { + "tags": ["generate"], + "summary": "Image To Video", + "description": "Generate a video from a provided image.", + "operationId": "genImageToVideo", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genImageToVideo" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "imageToVideo" + } + }, + "/upscale": { + "post": { + "tags": ["generate"], + "summary": "Upscale", + "description": "Upscale an image by increasing its resolution.", + "operationId": "genUpscale", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genUpscale" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "upscale" + } + }, + "/audio-to-text": { + "post": { + "tags": ["generate"], + "summary": "Audio To Text", + "description": "Transcribe audio files to text.", + "operationId": "genAudioToText", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genAudioToText" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "413": { + "description": "Request Entity Too Large", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "415": { + "description": "Unsupported Media Type", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "audioToText" + } + }, + "/segment-anything-2": { + "post": { + "tags": ["generate"], + "summary": "Segment Anything 2", + "description": "Segment objects in an image.", + "operationId": "genSegmentAnything2", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genSegmentAnything2" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MasksResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "segmentAnything2" + } + }, + "/llm": { + "post": { + "tags": ["generate"], + "summary": "LLM", + "description": "Generate text using a language model.", + "operationId": "genLLM", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LLMRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LLMResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "llm" + } + }, + "/image-to-text": { + "post": { + "tags": ["generate"], + "summary": "Image To Text", + "description": "Transform image files to text.", + "operationId": "genImageToText", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_genImageToText" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageToTextResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "413": { + "description": "Request Entity Too Large", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "imageToText" + } + }, + "/live-video-to-video": { + "post": { + "tags": ["generate"], + "summary": "Live Video To Video", + "description": "Apply transformations to a live video streamed to the returned endpoints.", + "operationId": "genLiveVideoToVideo", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LiveVideoToVideoParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LiveVideoToVideoResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "liveVideoToVideo" + } + }, + "/text-to-speech": { + "post": { + "tags": ["generate"], + "summary": "Text To Speech", + "description": "Generate a text-to-speech audio file based on the provided text input and speaker description.", + "operationId": "genTextToSpeech", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextToSpeechParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AudioResponse", + "x-speakeasy-name-override": "data" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ], + "x-speakeasy-name-override": "textToSpeech" + } + }, + "/health": { + "get": { + "summary": "Health", + "operationId": "health", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HealthCheck" + } + } + } + } + } + } + }, + "/hardware/info": { + "get": { + "summary": "Hardware Info", + "operationId": "hardware_info", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HardwareInformation" + } + } + } + } + } + } + }, + "/hardware/stats": { + "get": { + "summary": "Hardware Stats", + "operationId": "hardware_stats", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HardwareStats" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "APIError": { + "properties": { + "msg": { + "type": "string", + "title": "Msg", + "description": "The error message." + } + }, + "type": "object", + "required": ["msg"], + "title": "APIError", + "description": "API error response model." + }, + "AudioResponse": { + "properties": { + "audio": { + "allOf": [ + { + "$ref": "#/components/schemas/MediaURL" + } + ], + "description": "The generated audio." + } + }, + "type": "object", + "required": ["audio"], + "title": "AudioResponse", + "description": "Response model for audio generation." + }, + "Body_genAudioToText": { + "properties": { + "audio": { + "type": "string", + "format": "binary", + "title": "Audio", + "description": "Uploaded audio file to be transcribed." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for transcription.", + "default": "" + }, + "return_timestamps": { + "type": "string", + "title": "Return Timestamps", + "description": "Return timestamps for the transcribed text. Supported values: 'sentence', 'word', or a string boolean ('true' or 'false'). Default is 'true' ('sentence'). 'false' means no timestamps. 'word' means word-based timestamps.", + "default": "true" + }, + "metadata": { + "type": "string", + "title": "Metadata", + "description": "Additional job information to be passed to the pipeline.", + "default": "{}" + } + }, + "type": "object", + "required": ["audio"], + "title": "Body_genAudioToText" + }, + "Body_genImageToImage": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt", + "description": "Text prompt(s) to guide image generation." + }, + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Uploaded image to modify with the pipeline." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for image generation.", + "default": "" + }, + "loras": { + "type": "string", + "title": "Loras", + "description": "A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { \"latent-consistency/lcm-lora-sdxl\": 1.0, \"nerijs/pixel-art-xl\": 1.2}.", + "default": "" + }, + "strength": { + "type": "number", + "title": "Strength", + "description": "Degree of transformation applied to the reference image (0 to 1).", + "default": 0.8 + }, + "guidance_scale": { + "type": "number", + "title": "Guidance Scale", + "description": "Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality).", + "default": 7.5 + }, + "image_guidance_scale": { + "type": "number", + "title": "Image Guidance Scale", + "description": "Degree to which the generated image is pushed towards the initial image.", + "default": 1.5 + }, + "negative_prompt": { + "type": "string", + "title": "Negative Prompt", + "description": "Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1.", + "default": "" + }, + "safety_check": { + "type": "boolean", + "title": "Safety Check", + "description": "Perform a safety check to estimate if generated images could be offensive or harmful.", + "default": true + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "Seed for random number generation." + }, + "num_inference_steps": { + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.", + "default": 100 + }, + "num_images_per_prompt": { + "type": "integer", + "title": "Num Images Per Prompt", + "description": "Number of images to generate per prompt.", + "default": 1 + } + }, + "type": "object", + "required": ["prompt", "image"], + "title": "Body_genImageToImage" + }, + "Body_genImageToText": { + "properties": { + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Uploaded image to transform with the pipeline." + }, + "prompt": { + "type": "string", + "title": "Prompt", + "description": "Text prompt(s) to guide transformation.", + "default": "" + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for transformation.", + "default": "" + } + }, + "type": "object", + "required": ["image"], + "title": "Body_genImageToText" + }, + "Body_genImageToVideo": { + "properties": { + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Uploaded image to generate a video from." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for video generation.", + "default": "" + }, + "height": { + "type": "integer", + "title": "Height", + "description": "The height in pixels of the generated video.", + "default": 576 + }, + "width": { + "type": "integer", + "title": "Width", + "description": "The width in pixels of the generated video.", + "default": 1024 + }, + "fps": { + "type": "integer", + "title": "Fps", + "description": "The frames per second of the generated video.", + "default": 6 + }, + "motion_bucket_id": { + "type": "integer", + "title": "Motion Bucket Id", + "description": "Used for conditioning the amount of motion for the generation. The higher the number the more motion will be in the video.", + "default": 127 + }, + "noise_aug_strength": { + "type": "number", + "title": "Noise Aug Strength", + "description": "Amount of noise added to the conditioning image. Higher values reduce resemblance to the conditioning image and increase motion.", + "default": 0.02 + }, + "safety_check": { + "type": "boolean", + "title": "Safety Check", + "description": "Perform a safety check to estimate if generated images could be offensive or harmful.", + "default": true + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "Seed for random number generation." + }, + "num_inference_steps": { + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.", + "default": 25 + } + }, + "type": "object", + "required": ["image"], + "title": "Body_genImageToVideo" + }, + "Body_genSegmentAnything2": { + "properties": { + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Image to segment." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for image generation.", + "default": "" + }, + "point_coords": { + "type": "string", + "title": "Point Coords", + "description": "Nx2 array of point prompts to the model, where each point is in (X,Y) in pixels." + }, + "point_labels": { + "type": "string", + "title": "Point Labels", + "description": "Labels for the point prompts, where 1 indicates a foreground point and 0 indicates a background point." + }, + "box": { + "type": "string", + "title": "Box", + "description": "A length 4 array given as a box prompt to the model, in XYXY format." + }, + "mask_input": { + "type": "string", + "title": "Mask Input", + "description": "A low-resolution mask input to the model, typically from a previous prediction iteration, with the form 1xHxW (H=W=256 for SAM)." + }, + "multimask_output": { + "type": "boolean", + "title": "Multimask Output", + "description": "If true, the model will return three masks for ambiguous input prompts, often producing better masks than a single prediction.", + "default": true + }, + "return_logits": { + "type": "boolean", + "title": "Return Logits", + "description": "If true, returns un-thresholded mask logits instead of a binary mask.", + "default": true + }, + "normalize_coords": { + "type": "boolean", + "title": "Normalize Coords", + "description": "If true, the point coordinates will be normalized to the range [0,1], with point_coords expected to be with respect to image dimensions.", + "default": true + } + }, + "type": "object", + "required": ["image"], + "title": "Body_genSegmentAnything2" + }, + "Body_genUpscale": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt", + "description": "Text prompt(s) to guide upscaled image generation." + }, + "image": { + "type": "string", + "format": "binary", + "title": "Image", + "description": "Uploaded image to modify with the pipeline." + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for upscaled image generation.", + "default": "" + }, + "safety_check": { + "type": "boolean", + "title": "Safety Check", + "description": "Perform a safety check to estimate if generated images could be offensive or harmful.", + "default": true + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "Seed for random number generation." + }, + "num_inference_steps": { + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.", + "default": 75 + } + }, + "type": "object", + "required": ["prompt", "image"], + "title": "Body_genUpscale" + }, + "Chunk": { + "properties": { + "timestamp": { + "items": {}, + "type": "array", + "title": "Timestamp", + "description": "The timestamp of the chunk." + }, + "text": { + "type": "string", + "title": "Text", + "description": "The text of the chunk." + } + }, + "type": "object", + "required": ["timestamp", "text"], + "title": "Chunk", + "description": "A chunk of text with a timestamp." + }, + "GPUComputeInfo": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "memory_total": { + "type": "integer", + "title": "Memory Total" + }, + "memory_free": { + "type": "integer", + "title": "Memory Free" + }, + "major": { + "type": "integer", + "title": "Major" + }, + "minor": { + "type": "integer", + "title": "Minor" + } + }, + "type": "object", + "required": [ + "id", + "name", + "memory_total", + "memory_free", + "major", + "minor" + ], + "title": "GPUComputeInfo", + "description": "Model for detailed GPU compute information." + }, + "GPUUtilizationInfo": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "memory_total": { + "type": "integer", + "title": "Memory Total" + }, + "memory_free": { + "type": "integer", + "title": "Memory Free" + }, + "utilization_compute": { + "type": "integer", + "title": "Utilization Compute" + }, + "utilization_memory": { + "type": "integer", + "title": "Utilization Memory" + } + }, + "type": "object", + "required": [ + "id", + "name", + "memory_total", + "memory_free", + "utilization_compute", + "utilization_memory" + ], + "title": "GPUUtilizationInfo", + "description": "Model for GPU utilization statistics." + }, + "HTTPError": { + "properties": { + "detail": { + "allOf": [ + { + "$ref": "#/components/schemas/APIError" + } + ], + "description": "Detailed error information." + } + }, + "type": "object", + "required": ["detail"], + "title": "HTTPError", + "description": "HTTP error response model." + }, + "HTTPValidationError": { + "properties": { + "detail": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Detail" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "HardwareInformation": { + "properties": { + "pipeline": { + "type": "string", + "title": "Pipeline" + }, + "model_id": { + "type": "string", + "title": "Model Id" + }, + "gpu_info": { + "additionalProperties": { + "$ref": "#/components/schemas/GPUComputeInfo" + }, + "type": "object", + "title": "Gpu Info" + } + }, + "type": "object", + "required": ["pipeline", "model_id", "gpu_info"], + "title": "HardwareInformation", + "description": "Response model for GPU information." + }, + "HardwareStats": { + "properties": { + "pipeline": { + "type": "string", + "title": "Pipeline" + }, + "model_id": { + "type": "string", + "title": "Model Id" + }, + "gpu_stats": { + "additionalProperties": { + "$ref": "#/components/schemas/GPUUtilizationInfo" + }, + "type": "object", + "title": "Gpu Stats" + } + }, + "type": "object", + "required": ["pipeline", "model_id", "gpu_stats"], + "title": "HardwareStats", + "description": "Response model for real-time GPU statistics." + }, + "HealthCheck": { + "properties": { + "status": { + "type": "string", + "enum": ["OK", "ERROR", "IDLE"], + "title": "Status", + "description": "The health status of the pipeline" + } + }, + "type": "object", + "required": ["status"], + "title": "HealthCheck" + }, + "ImageResponse": { + "properties": { + "images": { + "items": { + "$ref": "#/components/schemas/Media" + }, + "type": "array", + "title": "Images", + "description": "The generated images." + } + }, + "type": "object", + "required": ["images"], + "title": "ImageResponse", + "description": "Response model for image generation." + }, + "ImageToTextResponse": { + "properties": { + "text": { + "type": "string", + "title": "Text", + "description": "The generated text." + } + }, + "type": "object", + "required": ["text"], + "title": "ImageToTextResponse", + "description": "Response model for text generation." + }, + "LLMChoice": { + "properties": { + "index": { + "type": "integer", + "title": "Index" + }, + "finish_reason": { + "type": "string", + "title": "Finish Reason", + "default": "" + }, + "delta": { + "allOf": [ + { + "$ref": "#/components/schemas/LLMMessage" + } + ] + }, + "message": { + "allOf": [ + { + "$ref": "#/components/schemas/LLMMessage" + } + ] + } + }, + "type": "object", + "required": ["index"], + "title": "LLMChoice" + }, + "LLMMessage": { + "properties": { + "role": { + "type": "string", + "title": "Role" + }, + "content": { + "type": "string", + "title": "Content" + } + }, + "type": "object", + "required": ["role", "content"], + "title": "LLMMessage" + }, + "LLMRequest": { + "properties": { + "messages": { + "items": { + "$ref": "#/components/schemas/LLMMessage" + }, + "type": "array", + "title": "Messages" + }, + "model": { + "type": "string", + "title": "Model", + "default": "" + }, + "temperature": { + "type": "number", + "title": "Temperature", + "default": 0.7 + }, + "max_tokens": { + "type": "integer", + "title": "Max Tokens", + "default": 256 + }, + "top_p": { + "type": "number", + "title": "Top P", + "default": 1 + }, + "top_k": { + "type": "integer", + "title": "Top K", + "default": -1 + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + } + }, + "type": "object", + "required": ["messages"], + "title": "LLMRequest" + }, + "LLMResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "model": { + "type": "string", + "title": "Model" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "usage": { + "$ref": "#/components/schemas/LLMTokenUsage" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/LLMChoice" + }, + "type": "array", + "title": "Choices" + } + }, + "type": "object", + "required": ["id", "model", "created", "usage", "choices"], + "title": "LLMResponse" + }, + "LLMTokenUsage": { + "properties": { + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens" + }, + "completion_tokens": { + "type": "integer", + "title": "Completion Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + } + }, + "type": "object", + "required": ["prompt_tokens", "completion_tokens", "total_tokens"], + "title": "LLMTokenUsage" + }, + "LiveVideoToVideoParams": { + "properties": { + "subscribe_url": { + "type": "string", + "title": "Subscribe Url", + "description": "Source URL of the incoming stream to subscribe to." + }, + "publish_url": { + "type": "string", + "title": "Publish Url", + "description": "Destination URL of the outgoing stream to publish." + }, + "control_url": { + "type": "string", + "title": "Control Url", + "description": "URL for subscribing via Trickle protocol for updates in the live video-to-video generation params.", + "default": "" + }, + "events_url": { + "type": "string", + "title": "Events Url", + "description": "URL for publishing events via Trickle protocol for pipeline status and logs.", + "default": "" + }, + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Name of the pipeline to run in the live video to video job. Notice that this is named model_id for consistency with other routes, but it does not refer to a Hugging Face model ID. The exact model(s) depends on the pipeline implementation and might be configurable via the `params` argument.", + "default": "" + }, + "params": { + "type": "object", + "title": "Params", + "description": "Initial parameters for the pipeline.", + "default": {} + } + }, + "type": "object", + "required": ["subscribe_url", "publish_url"], + "title": "LiveVideoToVideoParams" + }, + "LiveVideoToVideoResponse": { + "properties": { + "subscribe_url": { + "type": "string", + "title": "Subscribe Url", + "description": "Source URL of the incoming stream to subscribe to" + }, + "publish_url": { + "type": "string", + "title": "Publish Url", + "description": "Destination URL of the outgoing stream to publish to" + }, + "control_url": { + "type": "string", + "title": "Control Url", + "description": "URL for updating the live video-to-video generation", + "default": "" + }, + "events_url": { + "type": "string", + "title": "Events Url", + "description": "URL for subscribing to events for pipeline status and logs", + "default": "" + } + }, + "type": "object", + "required": ["subscribe_url", "publish_url"], + "title": "LiveVideoToVideoResponse", + "description": "Response model for live video-to-video generation." + }, + "MasksResponse": { + "properties": { + "masks": { + "type": "string", + "title": "Masks", + "description": "The generated masks." + }, + "scores": { + "type": "string", + "title": "Scores", + "description": "The model's confidence scores for each generated mask." + }, + "logits": { + "type": "string", + "title": "Logits", + "description": "The raw, unnormalized predictions (logits) for the masks." + } + }, + "type": "object", + "required": ["masks", "scores", "logits"], + "title": "MasksResponse", + "description": "Response model for object segmentation." + }, + "Media": { + "properties": { + "url": { + "type": "string", + "title": "Url", + "description": "The URL where the media can be accessed." + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "The seed used to generate the media." + }, + "nsfw": { + "type": "boolean", + "title": "Nsfw", + "description": "Whether the media was flagged as NSFW." + } + }, + "type": "object", + "required": ["url", "seed", "nsfw"], + "title": "Media", + "description": "A media object containing information about the generated media." + }, + "MediaURL": { + "properties": { + "url": { + "type": "string", + "title": "Url", + "description": "The URL where the media can be accessed." + } + }, + "type": "object", + "required": ["url"], + "title": "MediaURL", + "description": "A URL from which media can be accessed." + }, + "TextResponse": { + "properties": { + "text": { + "type": "string", + "title": "Text", + "description": "The generated text." + }, + "chunks": { + "items": { + "$ref": "#/components/schemas/Chunk" + }, + "type": "array", + "title": "Chunks", + "description": "The generated text chunks." + } + }, + "type": "object", + "required": ["text", "chunks"], + "title": "TextResponse", + "description": "Response model for text generation." + }, + "TextToImageParams": { + "properties": { + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for image generation.", + "default": "" + }, + "loras": { + "type": "string", + "title": "Loras", + "description": "A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { \"latent-consistency/lcm-lora-sdxl\": 1.0, \"nerijs/pixel-art-xl\": 1.2}.", + "default": "" + }, + "prompt": { + "type": "string", + "title": "Prompt", + "description": "Text prompt(s) to guide image generation. Separate multiple prompts with '|' if supported by the model." + }, + "height": { + "type": "integer", + "title": "Height", + "description": "The height in pixels of the generated image.", + "default": 576 + }, + "width": { + "type": "integer", + "title": "Width", + "description": "The width in pixels of the generated image.", + "default": 1024 + }, + "guidance_scale": { + "type": "number", + "title": "Guidance Scale", + "description": "Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality).", + "default": 7.5 + }, + "negative_prompt": { + "type": "string", + "title": "Negative Prompt", + "description": "Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1.", + "default": "" + }, + "safety_check": { + "type": "boolean", + "title": "Safety Check", + "description": "Perform a safety check to estimate if generated images could be offensive or harmful.", + "default": true + }, + "seed": { + "type": "integer", + "title": "Seed", + "description": "Seed for random number generation." + }, + "num_inference_steps": { + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength.", + "default": 50 + }, + "num_images_per_prompt": { + "type": "integer", + "title": "Num Images Per Prompt", + "description": "Number of images to generate per prompt.", + "default": 1 + } + }, + "type": "object", + "required": ["prompt"], + "title": "TextToImageParams" + }, + "TextToSpeechParams": { + "properties": { + "model_id": { + "type": "string", + "title": "Model Id", + "description": "Hugging Face model ID used for text to speech generation.", + "default": "" + }, + "text": { + "type": "string", + "title": "Text", + "description": "Text input for speech generation.", + "default": "" + }, + "description": { + "type": "string", + "title": "Description", + "description": "Description of speaker to steer text to speech generation.", + "default": "A male speaker delivers a slightly expressive and animated speech with a moderate speed and pitch." + } + }, + "type": "object", + "title": "TextToSpeechParams" + }, + "ValidationError": { + "properties": { + "loc": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "type": "array", + "title": "Location" + }, + "msg": { + "type": "string", + "title": "Message" + }, + "type": { + "type": "string", + "title": "Error Type" + } + }, + "type": "object", + "required": ["loc", "msg", "type"], + "title": "ValidationError" + }, + "VideoResponse": { + "properties": { + "frames": { + "items": { + "items": { + "$ref": "#/components/schemas/Media" + }, + "type": "array" + }, + "type": "array", + "title": "Frames", + "description": "The generated video frames." + } + }, + "type": "object", + "required": ["frames"], + "title": "VideoResponse", + "description": "Response model for video generation." + } + }, + "securitySchemes": { + "HTTPBearer": { + "type": "http", + "scheme": "bearer" + } + } + } +} diff --git a/ai/worker/api/openapi.yaml b/ai/worker/api/openapi.yaml new file mode 100644 index 00000000..7213493d --- /dev/null +++ b/ai/worker/api/openapi.yaml @@ -0,0 +1,1425 @@ +# !!Auto-generated by 'gen_openapi.py'. DO NOT EDIT!! +openapi: 3.1.0 +info: + title: Livepeer AI Runner + description: An application to run AI pipelines + version: 0.0.0 +servers: + - url: https://dream-gateway.livepeer.cloud + description: Livepeer Cloud Community Gateway + - url: https://livepeer.studio/api/beta/generate + description: Livepeer Studio Gateway +paths: + /text-to-image: + post: + tags: + - generate + summary: Text To Image + description: Generate images from text prompts. + operationId: genTextToImage + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/TextToImageParams" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/ImageResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToImage + /image-to-image: + post: + tags: + - generate + summary: Image To Image + description: Apply image transformations to a provided image. + operationId: genImageToImage + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genImageToImage" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/ImageResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToImage + /image-to-video: + post: + tags: + - generate + summary: Image To Video + description: Generate a video from a provided image. + operationId: genImageToVideo + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genImageToVideo" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/VideoResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToVideo + /upscale: + post: + tags: + - generate + summary: Upscale + description: Upscale an image by increasing its resolution. + operationId: genUpscale + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genUpscale" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/ImageResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: upscale + /audio-to-text: + post: + tags: + - generate + summary: Audio To Text + description: Transcribe audio files to text. + operationId: genAudioToText + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genAudioToText" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/TextResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "413": + description: Request Entity Too Large + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "415": + description: Unsupported Media Type + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: audioToText + /segment-anything-2: + post: + tags: + - generate + summary: Segment Anything 2 + description: Segment objects in an image. + operationId: genSegmentAnything2 + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genSegmentAnything2" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/MasksResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: segmentAnything2 + /llm: + post: + tags: + - generate + summary: LLM + description: Generate text using a language model. + operationId: genLLM + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/LLMRequest" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/LLMResponse" + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: llm + /image-to-text: + post: + tags: + - generate + summary: Image To Text + description: Transform image files to text. + operationId: genImageToText + requestBody: + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/Body_genImageToText" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/ImageToTextResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "413": + description: Request Entity Too Large + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToText + /live-video-to-video: + post: + tags: + - generate + summary: Live Video To Video + description: + Apply transformations to a live video streamed to the returned + endpoints. + operationId: genLiveVideoToVideo + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/LiveVideoToVideoParams" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/LiveVideoToVideoResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: liveVideoToVideo + /text-to-speech: + post: + tags: + - generate + summary: Text To Speech + description: + Generate a text-to-speech audio file based on the provided text input + and speaker description. + operationId: genTextToSpeech + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/TextToSpeechParams" + required: true + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/AudioResponse" + x-speakeasy-name-override: data + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "401": + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "500": + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPError" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToSpeech + /health: + get: + summary: Health + operationId: health + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/HealthCheck" + /hardware/info: + get: + summary: Hardware Info + operationId: hardware_info + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/HardwareInformation" + /hardware/stats: + get: + summary: Hardware Stats + operationId: hardware_stats + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/HardwareStats" +components: + schemas: + APIError: + properties: + msg: + type: string + title: Msg + description: The error message. + type: object + required: + - msg + title: APIError + description: API error response model. + AudioResponse: + properties: + audio: + allOf: + - $ref: "#/components/schemas/MediaURL" + description: The generated audio. + type: object + required: + - audio + title: AudioResponse + description: Response model for audio generation. + Body_genAudioToText: + properties: + audio: + type: string + format: binary + title: Audio + description: Uploaded audio file to be transcribed. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transcription. + default: "" + return_timestamps: + type: string + title: Return Timestamps + description: + "Return timestamps for the transcribed text. Supported values: + 'sentence', 'word', or a string boolean ('true' or 'false'). Default + is 'true' ('sentence'). 'false' means no timestamps. 'word' means + word-based timestamps." + default: "true" + metadata: + type: string + title: Metadata + description: Additional job information to be passed to the pipeline. + default: "{}" + type: object + required: + - audio + title: Body_genAudioToText + Body_genImageToImage: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: "" + loras: + type: string + title: Loras + description: + 'A LoRA (Low-Rank Adaptation) model and its corresponding weight for + image generation. Example: { "latent-consistency/lcm-lora-sdxl": + 1.0, "nerijs/pixel-art-xl": 1.2}.' + default: "" + strength: + type: number + title: Strength + description: + Degree of transformation applied to the reference image (0 to 1). + default: 0.8 + guidance_scale: + type: number + title: Guidance Scale + description: + Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + image_guidance_scale: + type: number + title: Image Guidance Scale + description: + Degree to which the generated image is pushed towards the initial + image. + default: 1.5 + negative_prompt: + type: string + title: Negative Prompt + description: + Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: "" + safety_check: + type: boolean + title: Safety Check + description: + Perform a safety check to estimate if generated images could be + offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. + default: 100 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - image + title: Body_genImageToImage + Body_genImageToText: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to transform with the pipeline. + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide transformation. + default: "" + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transformation. + default: "" + type: object + required: + - image + title: Body_genImageToText + Body_genImageToVideo: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to generate a video from. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for video generation. + default: "" + height: + type: integer + title: Height + description: The height in pixels of the generated video. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated video. + default: 1024 + fps: + type: integer + title: Fps + description: The frames per second of the generated video. + default: 6 + motion_bucket_id: + type: integer + title: Motion Bucket Id + description: + Used for conditioning the amount of motion for the generation. The + higher the number the more motion will be in the video. + default: 127 + noise_aug_strength: + type: number + title: Noise Aug Strength + description: + Amount of noise added to the conditioning image. Higher values + reduce resemblance to the conditioning image and increase motion. + default: 0.02 + safety_check: + type: boolean + title: Safety Check + description: + Perform a safety check to estimate if generated images could be + offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. + default: 25 + type: object + required: + - image + title: Body_genImageToVideo + Body_genSegmentAnything2: + properties: + image: + type: string + format: binary + title: Image + description: Image to segment. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: "" + point_coords: + type: string + title: Point Coords + description: + Nx2 array of point prompts to the model, where each point is in + (X,Y) in pixels. + point_labels: + type: string + title: Point Labels + description: + Labels for the point prompts, where 1 indicates a foreground point + and 0 indicates a background point. + box: + type: string + title: Box + description: + A length 4 array given as a box prompt to the model, in XYXY format. + mask_input: + type: string + title: Mask Input + description: + A low-resolution mask input to the model, typically from a previous + prediction iteration, with the form 1xHxW (H=W=256 for SAM). + multimask_output: + type: boolean + title: Multimask Output + description: + If true, the model will return three masks for ambiguous input + prompts, often producing better masks than a single prediction. + default: true + return_logits: + type: boolean + title: Return Logits + description: + If true, returns un-thresholded mask logits instead of a binary + mask. + default: true + normalize_coords: + type: boolean + title: Normalize Coords + description: + If true, the point coordinates will be normalized to the range + [0,1], with point_coords expected to be with respect to image + dimensions. + default: true + type: object + required: + - image + title: Body_genSegmentAnything2 + Body_genUpscale: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide upscaled image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for upscaled image generation. + default: "" + safety_check: + type: boolean + title: Safety Check + description: + Perform a safety check to estimate if generated images could be + offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. + default: 75 + type: object + required: + - prompt + - image + title: Body_genUpscale + Chunk: + properties: + timestamp: + items: {} + type: array + title: Timestamp + description: The timestamp of the chunk. + text: + type: string + title: Text + description: The text of the chunk. + type: object + required: + - timestamp + - text + title: Chunk + description: A chunk of text with a timestamp. + GPUComputeInfo: + properties: + id: + type: string + title: Id + name: + type: string + title: Name + memory_total: + type: integer + title: Memory Total + memory_free: + type: integer + title: Memory Free + major: + type: integer + title: Major + minor: + type: integer + title: Minor + type: object + required: + - id + - name + - memory_total + - memory_free + - major + - minor + title: GPUComputeInfo + description: Model for detailed GPU compute information. + GPUUtilizationInfo: + properties: + id: + type: string + title: Id + name: + type: string + title: Name + memory_total: + type: integer + title: Memory Total + memory_free: + type: integer + title: Memory Free + utilization_compute: + type: integer + title: Utilization Compute + utilization_memory: + type: integer + title: Utilization Memory + type: object + required: + - id + - name + - memory_total + - memory_free + - utilization_compute + - utilization_memory + title: GPUUtilizationInfo + description: Model for GPU utilization statistics. + HTTPError: + properties: + detail: + allOf: + - $ref: "#/components/schemas/APIError" + description: Detailed error information. + type: object + required: + - detail + title: HTTPError + description: HTTP error response model. + HTTPValidationError: + properties: + detail: + items: + $ref: "#/components/schemas/ValidationError" + type: array + title: Detail + type: object + title: HTTPValidationError + HardwareInformation: + properties: + pipeline: + type: string + title: Pipeline + model_id: + type: string + title: Model Id + gpu_info: + additionalProperties: + $ref: "#/components/schemas/GPUComputeInfo" + type: object + title: Gpu Info + type: object + required: + - pipeline + - model_id + - gpu_info + title: HardwareInformation + description: Response model for GPU information. + HardwareStats: + properties: + pipeline: + type: string + title: Pipeline + model_id: + type: string + title: Model Id + gpu_stats: + additionalProperties: + $ref: "#/components/schemas/GPUUtilizationInfo" + type: object + title: Gpu Stats + type: object + required: + - pipeline + - model_id + - gpu_stats + title: HardwareStats + description: Response model for real-time GPU statistics. + HealthCheck: + properties: + status: + type: string + enum: + - OK + - ERROR + - IDLE + title: Status + description: The health status of the pipeline + type: object + required: + - status + title: HealthCheck + ImageResponse: + properties: + images: + items: + $ref: "#/components/schemas/Media" + type: array + title: Images + description: The generated images. + type: object + required: + - images + title: ImageResponse + description: Response model for image generation. + ImageToTextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + type: object + required: + - text + title: ImageToTextResponse + description: Response model for text generation. + LLMChoice: + properties: + index: + type: integer + title: Index + finish_reason: + type: string + title: Finish Reason + default: "" + delta: + allOf: + - $ref: "#/components/schemas/LLMMessage" + message: + allOf: + - $ref: "#/components/schemas/LLMMessage" + type: object + required: + - index + title: LLMChoice + LLMMessage: + properties: + role: + type: string + title: Role + content: + type: string + title: Content + type: object + required: + - role + - content + title: LLMMessage + LLMRequest: + properties: + messages: + items: + $ref: "#/components/schemas/LLMMessage" + type: array + title: Messages + model: + type: string + title: Model + default: "" + temperature: + type: number + title: Temperature + default: 0.7 + max_tokens: + type: integer + title: Max Tokens + default: 256 + top_p: + type: number + title: Top P + default: 1.0 + top_k: + type: integer + title: Top K + default: -1 + stream: + type: boolean + title: Stream + default: false + type: object + required: + - messages + title: LLMRequest + LLMResponse: + properties: + id: + type: string + title: Id + model: + type: string + title: Model + created: + type: integer + title: Created + usage: + $ref: "#/components/schemas/LLMTokenUsage" + choices: + items: + $ref: "#/components/schemas/LLMChoice" + type: array + title: Choices + type: object + required: + - id + - model + - created + - usage + - choices + title: LLMResponse + LLMTokenUsage: + properties: + prompt_tokens: + type: integer + title: Prompt Tokens + completion_tokens: + type: integer + title: Completion Tokens + total_tokens: + type: integer + title: Total Tokens + type: object + required: + - prompt_tokens + - completion_tokens + - total_tokens + title: LLMTokenUsage + LiveVideoToVideoParams: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to. + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish. + control_url: + type: string + title: Control Url + description: + URL for subscribing via Trickle protocol for updates in the live + video-to-video generation params. + default: "" + events_url: + type: string + title: Events Url + description: + URL for publishing events via Trickle protocol for pipeline status + and logs. + default: "" + model_id: + type: string + title: Model Id + description: + Name of the pipeline to run in the live video to video job. Notice + that this is named model_id for consistency with other routes, but + it does not refer to a Hugging Face model ID. The exact model(s) + depends on the pipeline implementation and might be configurable via + the `params` argument. + default: "" + params: + type: object + title: Params + description: Initial parameters for the pipeline. + default: {} + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoParams + LiveVideoToVideoResponse: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish to + control_url: + type: string + title: Control Url + description: URL for updating the live video-to-video generation + default: "" + events_url: + type: string + title: Events Url + description: + URL for subscribing to events for pipeline status and logs + default: "" + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoResponse + description: Response model for live video-to-video generation. + MasksResponse: + properties: + masks: + type: string + title: Masks + description: The generated masks. + scores: + type: string + title: Scores + description: The model's confidence scores for each generated mask. + logits: + type: string + title: Logits + description: The raw, unnormalized predictions (logits) for the masks. + type: object + required: + - masks + - scores + - logits + title: MasksResponse + description: Response model for object segmentation. + Media: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + seed: + type: integer + title: Seed + description: The seed used to generate the media. + nsfw: + type: boolean + title: Nsfw + description: Whether the media was flagged as NSFW. + type: object + required: + - url + - seed + - nsfw + title: Media + description: + A media object containing information about the generated media. + MediaURL: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + type: object + required: + - url + title: MediaURL + description: A URL from which media can be accessed. + TextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + chunks: + items: + $ref: "#/components/schemas/Chunk" + type: array + title: Chunks + description: The generated text chunks. + type: object + required: + - text + - chunks + title: TextResponse + description: Response model for text generation. + TextToImageParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: "" + loras: + type: string + title: Loras + description: + 'A LoRA (Low-Rank Adaptation) model and its corresponding weight for + image generation. Example: { "latent-consistency/lcm-lora-sdxl": + 1.0, "nerijs/pixel-art-xl": 1.2}.' + default: "" + prompt: + type: string + title: Prompt + description: + Text prompt(s) to guide image generation. Separate multiple prompts + with '|' if supported by the model. + height: + type: integer + title: Height + description: The height in pixels of the generated image. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated image. + default: 1024 + guidance_scale: + type: number + title: Guidance Scale + description: + Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + negative_prompt: + type: string + title: Negative Prompt + description: + Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: "" + safety_check: + type: boolean + title: Safety Check + description: + Perform a safety check to estimate if generated images could be + offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. + default: 50 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + title: TextToImageParams + TextToSpeechParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for text to speech generation. + default: "" + text: + type: string + title: Text + description: Text input for speech generation. + default: "" + description: + type: string + title: Description + description: + Description of speaker to steer text to speech generation. + default: + A male speaker delivers a slightly expressive and animated speech + with a moderate speed and pitch. + type: object + title: TextToSpeechParams + ValidationError: + properties: + loc: + items: + anyOf: + - type: string + - type: integer + type: array + title: Location + msg: + type: string + title: Message + type: + type: string + title: Error Type + type: object + required: + - loc + - msg + - type + title: ValidationError + VideoResponse: + properties: + frames: + items: + items: + $ref: "#/components/schemas/Media" + type: array + type: array + title: Frames + description: The generated video frames. + type: object + required: + - frames + title: VideoResponse + description: Response model for video generation. + securitySchemes: + HTTPBearer: + type: http + scheme: bearer diff --git a/ai/worker/api/openapi.yaml.backup b/ai/worker/api/openapi.yaml.backup new file mode 100644 index 00000000..94758142 --- /dev/null +++ b/ai/worker/api/openapi.yaml.backup @@ -0,0 +1,1390 @@ +# !!Auto-generated by 'gen_openapi.py'. DO NOT EDIT!! +openapi: 3.1.0 +info: + title: Livepeer AI Runner + description: An application to run AI pipelines + version: 0.0.0 +servers: +- url: https://dream-gateway.livepeer.cloud + description: Livepeer Cloud Community Gateway +- url: https://livepeer.studio/api/beta/generate + description: Livepeer Studio Gateway +paths: + /text-to-image: + post: + tags: + - generate + summary: Text To Image + description: Generate images from text prompts. + operationId: genTextToImage + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TextToImageParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToImage + /image-to-image: + post: + tags: + - generate + summary: Image To Image + description: Apply image transformations to a provided image. + operationId: genImageToImage + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToImage' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToImage + /image-to-video: + post: + tags: + - generate + summary: Image To Video + description: Generate a video from a provided image. + operationId: genImageToVideo + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToVideo' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/VideoResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToVideo + /upscale: + post: + tags: + - generate + summary: Upscale + description: Upscale an image by increasing its resolution. + operationId: genUpscale + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genUpscale' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: upscale + /audio-to-text: + post: + tags: + - generate + summary: Audio To Text + description: Transcribe audio files to text. + operationId: genAudioToText + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genAudioToText' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/TextResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '413': + description: Request Entity Too Large + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '415': + description: Unsupported Media Type + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: audioToText + /segment-anything-2: + post: + tags: + - generate + summary: Segment Anything 2 + description: Segment objects in an image. + operationId: genSegmentAnything2 + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genSegmentAnything2' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/MasksResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: segmentAnything2 + /llm: + post: + tags: + - generate + summary: LLM + description: Generate text using a language model. + operationId: genLLM + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LLMRequest' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/LLMResponse' + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: llm + /image-to-text: + post: + tags: + - generate + summary: Image To Text + description: Transform image files to text. + operationId: genImageToText + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/Body_genImageToText' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/ImageToTextResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '413': + description: Request Entity Too Large + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: imageToText + /live-video-to-video: + post: + tags: + - generate + summary: Live Video To Video + description: Apply transformations to a live video streamed to the returned + endpoints. + operationId: genLiveVideoToVideo + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LiveVideoToVideoParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/LiveVideoToVideoResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: liveVideoToVideo + /text-to-speech: + post: + tags: + - generate + summary: Text To Speech + description: Generate a text-to-speech audio file based on the provided text + input and speaker description. + operationId: genTextToSpeech + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TextToSpeechParams' + required: true + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/AudioResponse' + x-speakeasy-name-override: data + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '401': + description: Unauthorized + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '500': + description: Internal Server Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPError' + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] + x-speakeasy-name-override: textToSpeech + /health: + get: + summary: Health + operationId: health + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HealthCheck' + /hardware/info: + get: + summary: Hardware Info + operationId: hardware_info + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HardwareInformation' + /hardware/stats: + get: + summary: Hardware Stats + operationId: hardware_stats + responses: + '200': + description: Successful Response + content: + application/json: + schema: + $ref: '#/components/schemas/HardwareStats' +components: + schemas: + APIError: + properties: + msg: + type: string + title: Msg + description: The error message. + type: object + required: + - msg + title: APIError + description: API error response model. + AudioResponse: + properties: + audio: + allOf: + - $ref: '#/components/schemas/MediaURL' + description: The generated audio. + type: object + required: + - audio + title: AudioResponse + description: Response model for audio generation. + Body_genAudioToText: + properties: + audio: + type: string + format: binary + title: Audio + description: Uploaded audio file to be transcribed. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transcription. + default: '' + return_timestamps: + type: string + title: Return Timestamps + description: 'Return timestamps for the transcribed text. Supported values: + ''sentence'', ''word'', or a string boolean (''true'' or ''false''). Default + is ''true'' (''sentence''). ''false'' means no timestamps. ''word'' means + word-based timestamps.' + default: 'true' + metadata: + type: string + title: Metadata + description: Additional job information to be passed to the pipeline. + default: '{}' + type: object + required: + - audio + title: Body_genAudioToText + Body_genImageToImage: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + strength: + type: number + title: Strength + description: Degree of transformation applied to the reference image (0 + to 1). + default: 0.8 + guidance_scale: + type: number + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + image_guidance_scale: + type: number + title: Image Guidance Scale + description: Degree to which the generated image is pushed towards the initial + image. + default: 1.5 + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 100 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + - image + title: Body_genImageToImage + Body_genImageToText: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to transform with the pipeline. + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide transformation. + default: '' + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for transformation. + default: '' + type: object + required: + - image + title: Body_genImageToText + Body_genImageToVideo: + properties: + image: + type: string + format: binary + title: Image + description: Uploaded image to generate a video from. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for video generation. + default: '' + height: + type: integer + title: Height + description: The height in pixels of the generated video. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated video. + default: 1024 + fps: + type: integer + title: Fps + description: The frames per second of the generated video. + default: 6 + motion_bucket_id: + type: integer + title: Motion Bucket Id + description: Used for conditioning the amount of motion for the generation. + The higher the number the more motion will be in the video. + default: 127 + noise_aug_strength: + type: number + title: Noise Aug Strength + description: Amount of noise added to the conditioning image. Higher values + reduce resemblance to the conditioning image and increase motion. + default: 0.02 + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 25 + type: object + required: + - image + title: Body_genImageToVideo + Body_genSegmentAnything2: + properties: + image: + type: string + format: binary + title: Image + description: Image to segment. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + point_coords: + type: string + title: Point Coords + description: Nx2 array of point prompts to the model, where each point is + in (X,Y) in pixels. + point_labels: + type: string + title: Point Labels + description: Labels for the point prompts, where 1 indicates a foreground + point and 0 indicates a background point. + box: + type: string + title: Box + description: A length 4 array given as a box prompt to the model, in XYXY + format. + mask_input: + type: string + title: Mask Input + description: A low-resolution mask input to the model, typically from a + previous prediction iteration, with the form 1xHxW (H=W=256 for SAM). + multimask_output: + type: boolean + title: Multimask Output + description: If true, the model will return three masks for ambiguous input + prompts, often producing better masks than a single prediction. + default: true + return_logits: + type: boolean + title: Return Logits + description: If true, returns un-thresholded mask logits instead of a binary + mask. + default: true + normalize_coords: + type: boolean + title: Normalize Coords + description: If true, the point coordinates will be normalized to the range + [0,1], with point_coords expected to be with respect to image dimensions. + default: true + type: object + required: + - image + title: Body_genSegmentAnything2 + Body_genUpscale: + properties: + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide upscaled image generation. + image: + type: string + format: binary + title: Image + description: Uploaded image to modify with the pipeline. + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for upscaled image generation. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 75 + type: object + required: + - prompt + - image + title: Body_genUpscale + Chunk: + properties: + timestamp: + items: {} + type: array + title: Timestamp + description: The timestamp of the chunk. + text: + type: string + title: Text + description: The text of the chunk. + type: object + required: + - timestamp + - text + title: Chunk + description: A chunk of text with a timestamp. + GPUComputeInfo: + properties: + id: + type: string + title: Id + name: + type: string + title: Name + memory_total: + type: integer + title: Memory Total + memory_free: + type: integer + title: Memory Free + major: + type: integer + title: Major + minor: + type: integer + title: Minor + type: object + required: + - id + - name + - memory_total + - memory_free + - major + - minor + title: GPUComputeInfo + description: Model for detailed GPU compute information. + GPUUtilizationInfo: + properties: + id: + type: string + title: Id + name: + type: string + title: Name + memory_total: + type: integer + title: Memory Total + memory_free: + type: integer + title: Memory Free + utilization_compute: + type: integer + title: Utilization Compute + utilization_memory: + type: integer + title: Utilization Memory + type: object + required: + - id + - name + - memory_total + - memory_free + - utilization_compute + - utilization_memory + title: GPUUtilizationInfo + description: Model for GPU utilization statistics. + HTTPError: + properties: + detail: + allOf: + - $ref: '#/components/schemas/APIError' + description: Detailed error information. + type: object + required: + - detail + title: HTTPError + description: HTTP error response model. + HTTPValidationError: + properties: + detail: + items: + $ref: '#/components/schemas/ValidationError' + type: array + title: Detail + type: object + title: HTTPValidationError + HardwareInformation: + properties: + pipeline: + type: string + title: Pipeline + model_id: + type: string + title: Model Id + gpu_info: + additionalProperties: + $ref: '#/components/schemas/GPUComputeInfo' + type: object + title: Gpu Info + type: object + required: + - pipeline + - model_id + - gpu_info + title: HardwareInformation + description: Response model for GPU information. + HardwareStats: + properties: + pipeline: + type: string + title: Pipeline + model_id: + type: string + title: Model Id + gpu_stats: + additionalProperties: + $ref: '#/components/schemas/GPUUtilizationInfo' + type: object + title: Gpu Stats + type: object + required: + - pipeline + - model_id + - gpu_stats + title: HardwareStats + description: Response model for real-time GPU statistics. + HealthCheck: + properties: + status: + type: string + enum: + - OK + - ERROR + - IDLE + title: Status + description: The health status of the pipeline + type: object + required: + - status + title: HealthCheck + ImageResponse: + properties: + images: + items: + $ref: '#/components/schemas/Media' + type: array + title: Images + description: The generated images. + type: object + required: + - images + title: ImageResponse + description: Response model for image generation. + ImageToTextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + type: object + required: + - text + title: ImageToTextResponse + description: Response model for text generation. + LLMChoice: + properties: + index: + type: integer + title: Index + finish_reason: + type: string + title: Finish Reason + default: '' + delta: + allOf: + - $ref: '#/components/schemas/LLMMessage' + message: + allOf: + - $ref: '#/components/schemas/LLMMessage' + type: object + required: + - index + title: LLMChoice + LLMMessage: + properties: + role: + type: string + title: Role + content: + type: string + title: Content + type: object + required: + - role + - content + title: LLMMessage + LLMRequest: + properties: + messages: + items: + $ref: '#/components/schemas/LLMMessage' + type: array + title: Messages + model: + type: string + title: Model + default: '' + temperature: + type: number + title: Temperature + default: 0.7 + max_tokens: + type: integer + title: Max Tokens + default: 256 + top_p: + type: number + title: Top P + default: 1.0 + top_k: + type: integer + title: Top K + default: -1 + stream: + type: boolean + title: Stream + default: false + type: object + required: + - messages + title: LLMRequest + LLMResponse: + properties: + id: + type: string + title: Id + model: + type: string + title: Model + created: + type: integer + title: Created + usage: + $ref: '#/components/schemas/LLMTokenUsage' + choices: + items: + $ref: '#/components/schemas/LLMChoice' + type: array + title: Choices + type: object + required: + - id + - model + - created + - usage + - choices + title: LLMResponse + LLMTokenUsage: + properties: + prompt_tokens: + type: integer + title: Prompt Tokens + completion_tokens: + type: integer + title: Completion Tokens + total_tokens: + type: integer + title: Total Tokens + type: object + required: + - prompt_tokens + - completion_tokens + - total_tokens + title: LLMTokenUsage + LiveVideoToVideoParams: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to. + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish. + control_url: + type: string + title: Control Url + description: URL for subscribing via Trickle protocol for updates in the + live video-to-video generation params. + default: '' + events_url: + type: string + title: Events Url + description: URL for publishing events via Trickle protocol for pipeline + status and logs. + default: '' + model_id: + type: string + title: Model Id + description: Name of the pipeline to run in the live video to video job. + Notice that this is named model_id for consistency with other routes, + but it does not refer to a Hugging Face model ID. The exact model(s) depends + on the pipeline implementation and might be configurable via the `params` + argument. + default: '' + params: + type: object + title: Params + description: Initial parameters for the pipeline. + default: {} + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoParams + LiveVideoToVideoResponse: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish to + control_url: + type: string + title: Control Url + description: URL for updating the live video-to-video generation + default: '' + events_url: + type: string + title: Events Url + description: URL for subscribing to events for pipeline status and logs + default: '' + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoResponse + description: Response model for live video-to-video generation. + MasksResponse: + properties: + masks: + type: string + title: Masks + description: The generated masks. + scores: + type: string + title: Scores + description: The model's confidence scores for each generated mask. + logits: + type: string + title: Logits + description: The raw, unnormalized predictions (logits) for the masks. + type: object + required: + - masks + - scores + - logits + title: MasksResponse + description: Response model for object segmentation. + Media: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + seed: + type: integer + title: Seed + description: The seed used to generate the media. + nsfw: + type: boolean + title: Nsfw + description: Whether the media was flagged as NSFW. + type: object + required: + - url + - seed + - nsfw + title: Media + description: A media object containing information about the generated media. + MediaURL: + properties: + url: + type: string + title: Url + description: The URL where the media can be accessed. + type: object + required: + - url + title: MediaURL + description: A URL from which media can be accessed. + TextResponse: + properties: + text: + type: string + title: Text + description: The generated text. + chunks: + items: + $ref: '#/components/schemas/Chunk' + type: array + title: Chunks + description: The generated text chunks. + type: object + required: + - text + - chunks + title: TextResponse + description: Response model for text generation. + TextToImageParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for image generation. + default: '' + loras: + type: string + title: Loras + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight + for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, + "nerijs/pixel-art-xl": 1.2}.' + default: '' + prompt: + type: string + title: Prompt + description: Text prompt(s) to guide image generation. Separate multiple + prompts with '|' if supported by the model. + height: + type: integer + title: Height + description: The height in pixels of the generated image. + default: 576 + width: + type: integer + title: Width + description: The width in pixels of the generated image. + default: 1024 + guidance_scale: + type: number + title: Guidance Scale + description: Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). + default: 7.5 + negative_prompt: + type: string + title: Negative Prompt + description: Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: '' + safety_check: + type: boolean + title: Safety Check + description: Perform a safety check to estimate if generated images could + be offensive or harmful. + default: true + seed: + type: integer + title: Seed + description: Seed for random number generation. + num_inference_steps: + type: integer + title: Num Inference Steps + description: Number of denoising steps. More steps usually lead to higher + quality images but slower inference. Modulated by strength. + default: 50 + num_images_per_prompt: + type: integer + title: Num Images Per Prompt + description: Number of images to generate per prompt. + default: 1 + type: object + required: + - prompt + title: TextToImageParams + TextToSpeechParams: + properties: + model_id: + type: string + title: Model Id + description: Hugging Face model ID used for text to speech generation. + default: '' + text: + type: string + title: Text + description: Text input for speech generation. + default: '' + description: + type: string + title: Description + description: Description of speaker to steer text to speech generation. + default: A male speaker delivers a slightly expressive and animated speech + with a moderate speed and pitch. + type: object + title: TextToSpeechParams + ValidationError: + properties: + loc: + items: + anyOf: + - type: string + - type: integer + type: array + title: Location + msg: + type: string + title: Message + type: + type: string + title: Error Type + type: object + required: + - loc + - msg + - type + title: ValidationError + VideoResponse: + properties: + frames: + items: + items: + $ref: '#/components/schemas/Media' + type: array + type: array + title: Frames + description: The generated video frames. + type: object + required: + - frames + title: VideoResponse + description: Response model for video generation. + securitySchemes: + HTTPBearer: + type: http + scheme: bearer diff --git a/auto-commit.sh b/auto-commit.sh new file mode 100644 index 00000000..0fcd3d23 --- /dev/null +++ b/auto-commit.sh @@ -0,0 +1,9 @@ +#!/bin/bash +cd /Users/alisonhaire/Documents/Livepeer/livepeer-docs-current +git checkout docs-v2-dev +git add -A +if [ -z "$(git status --porcelain)" ]; then + exit 0 +else + git commit -m "Auto-commit: $(date '+%Y-%m-%d %H:%M:%S')" +fi diff --git a/docs.json b/docs.json new file mode 100644 index 00000000..1d777042 --- /dev/null +++ b/docs.json @@ -0,0 +1,3329 @@ +{ + "$schema": "https://mintlify.com/docs.json", + "theme": "palm", + "name": "Livepeer Docs", + "metadata": { + "timestamp": true + }, + "colors": { + "primary": "#18794E", + "light": "#2b9a66", + "dark": "#18794E" + }, + "favicon": "/favicon.png", + "navigation": { + "versions": [ + { + "version": "v2", + "default": true, + "languages": [ + { + "language": "en", + "tabs": [ + { + "tab": "Internal Hub", + "hidden": true, + "icon": "info-circle", + "anchors": [ + { + "anchor": "Internal Hub", + "icon": "info-circle", + "groups": [ + { + "group": "Internal Hub", + "pages": [ + "v2/pages/09_internal/internal-overview", + "v2/pages/09_internal/docs-status", + "v2/pages/09_internal/strategic-alignment", + "v2/pages/09_internal/docs-philosophy", + "v2/pages/09_internal/definitions", + "v2/pages/09_internal/personas", + "v2/pages/09_internal/ecosystem", + "v2/pages/09_internal/references" + ] + } + ] + } + ] + }, + { + "tab": "Home", + "icon": "house-heart", + "anchors": [ + { + "anchor": "Home", + "icon": "house-heart", + "groups": [ + { + "group": "Home", + "icon": "house-heart", + "pages": [ + "v2/pages/00_home/Landing", + "v2/pages/00_home/home/livepeer-tl-dr", + "v2/pages/00_home/home/trending-at-livepeer" + ] + }, + { + "group": "Livepeer Showcase", + "icon": "clapperboard-play", + "pages": [ + "v2/pages/00_home/project-showcase/projects-built-on-livepeer", + "v2/pages/00_home/project-showcase/livepeer-applications", + "v2/pages/00_home/project-showcase/industry-verticals" + ] + }, + { + "group": "Get Started", + "icon": "arrow-right-to-bracket", + "pages": [ + "v2/pages/00_home/get-started/use-livepeer", + "v2/pages/00_home/get-started/stream-video-quickstart", + "v2/pages/00_home/get-started/livepeer-ai-quickstart", + "v2/pages/00_home/get-started/build-on-livepeer" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "About", + "icon": "graduation-cap", + "anchors": [ + { + "anchor": "About Livepeer", + "icon": "graduation-cap", + "groups": [ + { + "group": "About Livepeer", + "icon": "graduation-cap", + "pages": [ + "v2/pages/01_about/about-livepeer/livepeer-overview", + "v2/pages/01_about/about-livepeer/why-livepeer", + "v2/pages/01_about/about-livepeer/livepeer-evolution", + "v2/pages/01_about/about-livepeer/livepeer-ecosystem" + ] + }, + { + "group": "Livepeer Protocol", + "icon": "cube", + "pages": [ + "v2/pages/01_about/livepeer-protocol/protocol-overview", + "v2/pages/01_about/livepeer-protocol/livepeer-whitepaper", + "v2/pages/01_about/livepeer-protocol/technical-overview" + ] + }, + { + "group": "Livepeer Network", + "icon": "circle-nodes", + "pages": [ + "v2/pages/01_about/livepeer-network/actor-overview", + "v2/pages/01_about/livepeer-network/livepeer-token-economics", + "v2/pages/01_about/livepeer-network/livepeer-governance" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Community", + "icon": "people-group", + "anchors": [ + { + "anchor": "Community", + "icon": "people-group", + "groups": [ + { + "group": "Livepeer Community", + "icon": "people-group", + "pages": [ + "v2/pages/02_community/community-home", + "v2/pages/02_community/livepeer-community/livepeer-Latest-Topics", + "v2/pages/02_community/livepeer-community/community-guidelines" + ] + }, + { + "group": "Livepeer Connect", + "icon": "hashtag", + "pages": [ + "v2/pages/02_community/livepeer-connect/news-and-socials", + "v2/pages/02_community/livepeer-connect/events-and-community-streams", + "v2/pages/02_community/livepeer-connect/forums-and-discussions" + ] + }, + { + "group": "Livepeer Contribute", + "icon": "door-open", + "pages": [ + "v2/pages/02_community/livepeer-contribute/contribute", + "v2/pages/02_community/livepeer-contribute/opportunities", + "v2/pages/02_community/livepeer-contribute/build-livepeer" + ] + }, + { + "group": "[MOVE HERE] Help Center", + "icon": "comments-question-check", + "hidden": true, + "pages": [ + "v2/pages/02_community/livepeer-community/trending-test" + ] + }, + { + "group": "[TO DELETE] Tests", + "pages": [ + "v2/pages/02_community/livepeer-community/trending-test" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Developers", + "icon": "display-code", + "anchors": [ + { + "anchor": "Developers", + "icon": "display-code", + "groups": [ + { + "group": "Building on Livepeer", + "icon": "code", + "pages": [ + "v2/pages/03_developers/developer-home", + "v2/pages/03_developers/building-on-livepeer/developer-guide" + ] + }, + { + "group": "Quickstart", + "icon": "fast-forward", + "pages": [ + { + "group": "Real-time Video", + "pages": [ + "v2/pages/03_developers/building-on-livepeer/quick-starts/livepeer-ai", + "v2/pages/03_developers/livepeer-real-time-video/video-streaming-on-livepeer/README.mdx" + ] + }, + { + "group": "AI Pipelines", + "pages": [ + "v2/pages/03_developers/building-on-livepeer/quick-starts/video-streaming", + "v2/pages/03_developers/building-on-livepeer/quick-starts/livepeer-ai" + ] + } + ] + }, + { + "group": "Developer Platforms", + "icon": "gear-code", + "pages": [ + "v2/pages/03_developers/developer-platforms/builder-hub", + { + "group": "Daydream", + "pages": [ + "v2/pages/03_developers/developer-platforms/daydream/daydream" + ] + }, + { + "group": "Livepeer Studio", + "pages": [ + "v2/pages/03_developers/developer-platforms/livepeer-studio/livepeer-studio" + ] + }, + { + "group": "Frameworks", + "pages": [ + "v2/pages/03_developers/developer-platforms/frameworks/frameworks" + ] + }, + { + "group": "Streamplace", + "pages": [ + "v2/pages/03_developers/developer-platforms/streamplace/streamplace" + ] + }, + { + "group": "All Ecosystem Products", + "pages": [ + "v2/pages/03_developers/developer-platforms/all-ecosystem/ecosystem-products/ecosystem-products" + ] + } + ] + }, + { + "group": "Developer Tools", + "icon": "tools", + "pages": [ + "v2/pages/03_developers/developer-tools/tooling-hub", + "v2/pages/03_developers/developer-tools/livepeer-explorer", + "v2/pages/03_developers/developer-tools/livepeer-cloud", + "v2/pages/03_developers/developer-tools/dashboards" + ] + }, + { + "group": "Guides & Tutorials", + "icon": "laptop-file", + "pages": [ + "v2/pages/03_developers/guides-and-resources/developer-guides", + "v2/pages/03_developers/guides-and-resources/resources", + "v2/pages/03_developers/guides-and-resources/developer-help", + "v2/pages/03_developers/guides-and-resources/contribution-guide" + ] + }, + { + "group": "Builder Opportunities", + "icon": "lightbulb", + "pages": [ + "v2/pages/03_developers/builder-opportunities/dev-programs", + "v2/pages/03_developers/builder-opportunities/livepeer-rfps" + ] + }, + { + "group": "Technical References", + "icon": "books", + "pages": [ + { + "group": "SDKs & APIs", + "pages": [ + "v2/pages/03_developers/technical-references-sdks.-and-apis/sdks", + "v2/pages/03_developers/technical-references-sdks.-and-apis/apis" + ] + }, + "v2/pages/03_developers/technical-references/awesome-livepeer", + "v2/pages/03_developers/technical-references/wiki", + "v2/pages/03_developers/technical-references/deepwiki" + ] + }, + { + "group": "Changelog & Migrations", + "icon": "swap", + "pages": [ + "v2/pages/07_resources/changelog/changelog", + "v2/pages/07_resources/changelog/migration-guides" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Gateways", + "icon": "torii-gate", + "anchors": [ + { + "anchor": "Gateways", + "icon": "torii-gate", + "groups": [ + { + "group": "About Gateways", + "icon": "graduation-cap", + "pages": [ + "v2/pages/04_gateways/gateways-home", + { + "group": "Gateway Knowledge Hub", + "expanded": true, + "pages": [ + "v2/pages/04_gateways/about-gateways/gateway-explainer", + "v2/pages/04_gateways/about-gateways/gateway-functions", + "v2/pages/04_gateways/about-gateways/gateway-architecture", + "v2/pages/04_gateways/about-gateways/gateway-economics" + ] + } + ] + }, + { + "group": "Gateway Services & Providers", + "icon": "wand-magic-sparkles", + "pages": [ + "v2/pages/04_gateways/using-gateways/choosing-a-gateway", + "v2/pages/04_gateways/using-gateways/gateway-providers", + { + "group": "Provider Docs", + "pages": [ + "v2/pages/04_gateways/using-gateways/gateway-providers/daydream-gateway", + "v2/pages/04_gateways/using-gateways/gateway-providers/livepeer-studio-gateway", + "v2/pages/04_gateways/using-gateways/gateway-providers/cloud-spe-gateway", + "v2/pages/04_gateways/using-gateways/gateway-providers/streamplace" + ] + } + ] + }, + { + "group": "Run Your Own Gateway", + "icon": "sign-posts-wrench", + "pages": [ + { + "group": "Quickstart", + "icon": "fast-forward", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/quickstart/quickstart-a-gateway", + "v2/pages/04_gateways/run-a-gateway/quickstart/get-AI-to-setup-the-gateway.mdx" + ] + }, + { + "group": "Gateway Setup Guide", + "expanded": true, + "pages": [ + "v2/pages/04_gateways/run-a-gateway/why-run-a-gateway", + "v2/pages/04_gateways/run-a-gateway/run-a-gateway", + { + "group": "Setup Checklist", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/requirements/setup", + "v2/pages/04_gateways/run-a-gateway/requirements/on-chain setup/on-chain", + "v2/pages/04_gateways/run-a-gateway/requirements/on-chain setup/fund-gateway" + ] + }, + { + "group": "Installation", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/install/install-overview", + "v2/pages/04_gateways/run-a-gateway/install/docker-install", + "v2/pages/04_gateways/run-a-gateway/install/linux-install", + "v2/pages/04_gateways/run-a-gateway/install/windows-install", + "v2/pages/04_gateways/run-a-gateway/install/community-projects" + ] + }, + { + "group": "Configuration", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/configure/configuration-overview", + "v2/pages/04_gateways/run-a-gateway/configure/video-configuration", + "v2/pages/04_gateways/run-a-gateway/configure/ai-configuration", + "v2/pages/04_gateways/run-a-gateway/configure/dual-configuration", + "v2/pages/04_gateways/run-a-gateway/configure/pricing-configuration" + ] + }, + { + "group": "Testing", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/test/test-gateway", + "v2/pages/04_gateways/run-a-gateway/test/publish-content", + "v2/pages/04_gateways/run-a-gateway/test/playback-content" + ] + }, + { + "group": "Network Connect", + "tag": "Go Live!", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/connect/lp-marketplace", + "v2/pages/04_gateways/run-a-gateway/connect/discover-offerings", + "v2/pages/04_gateways/run-a-gateway/connect/connect-with-offerings" + ] + }, + { + "group": "Monitor & Optimise", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/monitor/monitor-and-optimise" + ] + } + ] + } + ] + }, + { + "group": "Gateway Tools & Dashboards", + "icon": "tools", + "pages": [ + "v2/pages/04_gateways/gateway-tools/explorer", + "v2/pages/04_gateways/gateway-tools/livepeer-tools" + ] + }, + { + "group": "Gateway Guides & Resources", + "icon": "laptop-file", + "pages": [ + "v2/pages/04_gateways/guides-and-resources/community-guides", + "v2/pages/04_gateways/guides-and-resources/community-projects", + "v2/pages/04_gateways/guides-and-resources/faq" + ] + }, + { + "group": "Technical References", + "icon": "code", + "pages": [ + { + "group": "Gateways", + "pages": [ + "v2/pages/04_gateways/references/technical-architecture", + "v2/pages/04_gateways/references/configuration-flags", + "v2/pages/04_gateways/references/video-flags", + "v2/pages/04_gateways/references/cli-commands" + ] + }, + { + "group": "API Reference", + "pages": [ + { + "group": "AI API", + "pages": [ + "v2/pages/04_gateways/references/api-reference/AI-API/ai", + "v2/pages/04_gateways/references/api-reference/AI-API/text-to-image", + "v2/pages/04_gateways/references/api-reference/AI-API/image-to-image", + "v2/pages/04_gateways/references/api-reference/AI-API/image-to-video", + "v2/pages/04_gateways/references/api-reference/AI-API/upscale", + "v2/pages/04_gateways/references/api-reference/AI-API/audio-to-text", + "v2/pages/04_gateways/references/api-reference/AI-API/segment-anything-2", + "v2/pages/04_gateways/references/api-reference/AI-API/llm", + "v2/pages/04_gateways/references/api-reference/AI-API/image-to-text", + "v2/pages/04_gateways/references/api-reference/AI-API/live-video-to-video", + "v2/pages/04_gateways/references/api-reference/AI-API/text-to-speech", + "v2/pages/04_gateways/references/api-reference/AI-API/health", + "v2/pages/04_gateways/references/api-reference/AI-API/hardware-info", + "v2/pages/04_gateways/references/api-reference/AI-API/hardware-stats" + ] + }, + { + "group": "CLI HTTP API", + "pages": [ + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/cli-http-api", + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/unbond", + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/rebond", + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/activateorchestrator", + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/setbroadcastconfig", + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/setmaxpriceforcapability", + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/reward", + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/transfertokens", + "v2/pages/04_gateways/references/api-reference/CLI-HTTP/signmessage" + ] + } + ] + }, + { + "group": "Exchanges & RPCs", + "pages": [ + "v2/pages/04_gateways/references/livepeer-exchanges", + "v2/pages/04_gateways/references/artibtrum-exchanges", + "v2/pages/04_gateways/references/arbitrum-rpc" + ] + } + ] + } + ] + }, + { + "anchor": "Quickstart", + "icon": "fast-forward", + "pages": [ + "v2/pages/04_gateways/run-a-gateway/quickstart-a-gateway", + "v2/pages/04_gateways/run-a-gateway/get-AI-to-setup-the-gateway" + ] + }, + { + "anchor": "Quick Links", + "icon": "person-to-portal", + "pages": ["v2/pages/04_gateways/references/"] + }, + { + "anchor": "Resources", + "icon": "books", + "pages": ["v2/pages/04_gateways/references/"] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "pages": [" "] + } + ] + }, + { + "tab": "GPU Nodes", + "icon": "microchip", + "anchors": [ + { + "anchor": "GPU Nodes", + "icon": "microchip", + "groups": [ + { + "group": "About Orchestrators (GPU Nodes)", + "icon": "graduation-cap", + "pages": [ + "v2/pages/05_orchestrators/orchestrators-home", + "v2/pages/05_orchestrators/about-orchestrators/overview", + { + "group": "Orchestrator Functions", + "pages": [ + "v2/pages/05_orchestrators/about-orchestrators/orchestrator-functions/transcoding", + "v2/pages/05_orchestrators/about-orchestrators/orchestrator-functions/ai-pipelines" + ] + } + ] + }, + { + "group": "Set up an Orchestrator", + "icon": "gear-code", + "pages": [ + "v2/pages/05_orchestrators/setting-up-an-orchestrator/hardware-requirements", + "v2/pages/05_orchestrators/setting-up-an-orchestrator/orchestrator-stats", + { + "group": "Setting Up An Orchestrator", + "pages": [ + "v2/pages/05_orchestrators/setting-up-an-orchestrator/setting-up-an-orchestrator/quickstart-add-your-gpu-to-livepeer", + "v2/pages/05_orchestrators/setting-up-an-orchestrator/join-a-pool", + "v2/pages/05_orchestrators/setting-up-an-orchestrator/setting-up-an-orchestrator/data-centres-and-large-scale-hardware-providers" + ] + } + ] + }, + { + "group": "Orchestrator Tooling", + "icon": "tools", + "pages": [ + "v2/pages/05_orchestrators/orchestrator-tooling/orchestrator-tools", + "v2/pages/05_orchestrators/orchestrator-tooling/orchestrator-dashboards" + ] + }, + { + "group": "Orchestrator Guides & Resources", + "icon": "laptop-file", + "pages": [ + "v2/pages/05_orchestrators/orchestrator-guides-and-references/orchestrator-guides-and-references", + "v2/pages/05_orchestrators/orchestrator-guides-and-references/orchestrator-resources", + "v2/pages/05_orchestrators/orchestrator-guides-and-references/orchestrator-community-and-help" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Delegators & LPT", + "icon": "hand-holding-dollar", + "anchors": [ + { + "anchor": "Delegators & LPT", + "icon": "hand-holding-dollar", + "groups": [ + { + "group": "About LPT", + "icon": "graduation-cap", + "pages": [ + "v2/pages/06_delegators/token-home", + "v2/pages/06_delegators/about-lpt-livepeer-token/overview", + "v2/pages/06_delegators/about-lpt-livepeer-token/why-have-a-token", + "v2/pages/06_delegators/about-lpt-livepeer-token/livepeer-token-economics", + "v2/pages/06_delegators/about-lpt-livepeer-token/how-to-get-lpt", + "v2/pages/06_delegators/about-lpt-livepeer-token/delegators" + ] + }, + { + "group": "Delegating LPT", + "icon": "money-bill-transfer", + "pages": [ + "v2/pages/06_delegators/delegating-lpt/overview", + "v2/pages/06_delegators/delegating-lpt/delegation-economics", + "v2/pages/06_delegators/delegating-lpt/how-to-delegate-lpt" + ] + }, + { + "group": "Livepeer Governance", + "icon": "box-ballot", + "pages": [ + "v2/pages/06_delegators/livepeer-governance/overview", + "v2/pages/06_delegators/livepeer-governance/livepeer-governance", + "v2/pages/06_delegators/livepeer-governance/livepeer-treasury" + ] + }, + { + "group": "Livepeer Treasury", + "pages": [] + }, + { + "group": "Guides & Resources", + "icon": "books", + "pages": [ + "v2/pages/06_delegators/token-resources/lpt-exchanges", + "v2/pages/06_delegators/token-resources/lpt-eth-usage" + ] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Reference HUB", + "hidden": false, + "icon": "books", + "anchors": [ + { + "anchor": "Reference & Help HUB", + "icon": "books", + "groups": [ + { + "group": "Home", + "icon": "house", + "pages": ["v2/pages/07_resources/resources_hub"] + }, + { + "group": "Documentation Guide", + "icon": "book-open", + "pages": [ + "v2/pages/07_resources/documentation-guide/documentation-overview", + "v2/pages/07_resources/documentation-guide/documentation-guide", + "v2/pages/07_resources/documentation-guide/docs-features-and-ai-integrations", + "v2/pages/07_resources/documentation-guide/contribute-to-the-docs" + ] + }, + { + "group": "Livepeer Concepts", + "icon": "graduation-cap", + "pages": [ + "v2/pages/07_resources/concepts/livepeer-core-concepts", + "v2/pages/07_resources/livepeer-glossary", + "v2/pages/07_resources/concepts/livepeer-actors" + ] + }, + { + "group": "Developer References", + "icon": "book", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Gateway References", + "icon": "wand-magic-sparkles", + "pages": [ + "v2/pages/07_resources/ai-inference-on-livepeer/livepeer-ai/livepeer-ai-content-directory" + ] + }, + { + "group": "Orchestrator References", + "icon": "microchip", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "LPT & Delegator References", + "icon": "hand-holding-dollar", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Community Resources", + "icon": "", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Partner Resources", + "icon": "handshake", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/07_resources/livepeer-glossary"] + }, + { + "group": "Technical References", + "icon": "code", + "pages": [ + { + "group": "Protocol References", + "pages": [] + } + ] + }, + { + "group": "Changelog", + "icon": "swap", + "pages": [ + "v2/pages/00_home/changelog/changelog", + "v2/pages/00_home/changelog/migration-guide" + ] + } + ] + }, + { + "anchor": "Help Center", + "icon": "comments-question-check", + "pages": ["v2/pages/08_help/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + }, + { + "tab": "Help Center", + "hidden": true, + "icon": "comments-question-check", + "anchors": [ + { + "anchor": "Help Center", + "icon": "comments-question-check", + "groups": [ + { + "group": "Home", + "pages": ["v2/pages/08_help/README"] + }, + { + "group": "Delegating LPT", + "pages": [] + }, + { + "group": "Livepeer Governance", + "pages": [] + }, + { + "group": "Livepeer Treasury", + "pages": [] + }, + { + "group": "Token Resources", + "pages": [] + } + ] + }, + { + "anchor": "Reference HUB", + "icon": "books", + "pages": ["v2/pages/07_resources/redirect"] + }, + { + "anchor": " ", + "icon": "-", + "href": " " + } + ] + } + ] + } + ] + }, + { + "version": "v1", + "languages": [ + { + "language": "en", + "dropdowns": [ + { + "dropdown": "Developers", + "icon": "code", + "anchors": [ + { + "anchor": "Documentation", + "icon": "code", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "v1/developers/introduction", + "v1/developers/quick-start", + "v1/developers/livepeer-studio-cli" + ] + }, + { + "group": "Guides", + "pages": [ + "v1/developers/guides/overview", + { + "group": "Assets", + "icon": "video", + "pages": [ + "v1/developers/guides/upload-video-asset", + "v1/developers/guides/playback-an-asset", + "v1/developers/guides/listen-to-asset-events", + "v1/developers/guides/encrypted-asset", + "v1/developers/guides/thumbnails-vod" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/developers/guides/create-livestream", + "v1/developers/guides/playback-a-livestream", + "v1/developers/guides/stream-via-obs", + "v1/developers/guides/livestream-from-browser", + "v1/developers/guides/optimize-latency-of-a-livestream", + "v1/developers/guides/monitor-stream-health", + "v1/developers/guides/listen-to-stream-events", + "v1/developers/guides/multistream", + "v1/developers/guides/clip-a-livestream", + "v1/developers/guides/thumbnails-live" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/developers/guides/access-control-webhooks", + "v1/developers/guides/access-control-jwt" + ] + }, + { + "group": "Webhooks", + "icon": "bell", + "pages": [ + "v1/developers/guides/setup-and-listen-to-webhooks" + ] + }, + { + "group": "Transcode API", + "icon": "photo-film", + "pages": [ + "v1/developers/guides/transcode-video-storj", + "v1/developers/guides/transcode-video-w3s" + ] + }, + { + "group": "Viewership Metrics", + "icon": "chart-bar", + "pages": [ + "v1/developers/guides/get-engagement-analytics-via-api", + "v1/developers/guides/get-engagement-analytics-via-grafana", + "v1/developers/guides/get-engagement-analytics-via-timeplus" + ] + }, + { + "group": "Projects", + "icon": "folder-open", + "pages": ["v1/developers/guides/managing-projects"] + }, + { + "group": "Integrations", + "icon": "puzzle-piece", + "pages": [ + "v1/developers/tutorials/decentralized-app-with-fvm", + "v1/developers/tutorials/token-gate-videos-with-lit", + { + "group": "Storage Provider Integration", + "pages": [ + "v1/developers/tutorials/upload-playback-videos-4everland", + "v1/developers/tutorials/upload-playback-videos-on-arweave", + "v1/developers/tutorials/upload-playback-videos-on-ipfs" + ] + } + ] + } + ] + }, + { + "group": "References", + "pages": [ + "v1/references/api-support-matrix", + { + "group": "Livepeer Node Software", + "icon": "golang", + "pages": [ + "v1/references/go-livepeer/bandwidth-requirements", + "v1/references/go-livepeer/cli-reference", + "v1/references/go-livepeer/gpu-support", + "v1/references/go-livepeer/hardware-requirements", + "v1/references/go-livepeer/prometheus-metrics" + ] + }, + "v1/references/contract-addresses", + "v1/references/example-applications", + "v1/references/awesome-livepeer", + { + "group": "FAQs", + "icon": "book", + "pages": [ + "v1/references/knowledge-base/livestream", + "v1/references/knowledge-base/playback", + "v1/references/knowledge-base/vod" + ] + } + ] + } + ] + }, + { + "anchor": "API Reference", + "icon": "rectangle-terminal", + "groups": [ + { + "group": "Overview", + "pages": [ + "v1/api-reference/overview/introduction", + "v1/api-reference/overview/authentication" + ] + }, + { + "group": "APIs", + "pages": [ + { + "group": "Asset", + "icon": "video", + "pages": [ + "v1/api-reference/asset/overview", + "v1/api-reference/asset/upload", + "v1/api-reference/asset/upload-via-url", + "v1/api-reference/asset/get", + "v1/api-reference/asset/update", + "v1/api-reference/asset/delete", + "v1/api-reference/asset/get-all" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/api-reference/stream/overview", + "v1/api-reference/stream/create", + "v1/api-reference/stream/get", + "v1/api-reference/stream/update", + "v1/api-reference/stream/terminate", + "v1/api-reference/stream/add-multistream-target", + "v1/api-reference/stream/delete-multistream-target", + "v1/api-reference/stream/delete", + "v1/api-reference/stream/get-all", + "v1/api-reference/stream/create-clip", + "v1/api-reference/stream/get-clip" + ] + }, + { + "group": "Generate", + "icon": "microchip-ai", + "pages": [ + "v1/api-reference/generate/overview", + "v1/api-reference/generate/audio-to-text", + "v1/api-reference/generate/text-to-image", + "v1/api-reference/generate/image-to-image", + "v1/api-reference/generate/image-to-video", + "v1/api-reference/generate/llm", + "v1/api-reference/generate/segment-anything-2", + "v1/api-reference/generate/upscale" + ] + }, + { + "group": "Multistream target", + "icon": "arrows-split-up-and-left", + "pages": [ + "v1/api-reference/multistream/overview", + "v1/api-reference/multistream/create", + "v1/api-reference/multistream/get", + "v1/api-reference/multistream/update", + "v1/api-reference/multistream/delete", + "v1/api-reference/multistream/get-all" + ] + }, + { + "group": "Session", + "icon": "film", + "pages": [ + "v1/api-reference/session/overview", + "v1/api-reference/session/get", + "v1/api-reference/session/get-all", + "v1/api-reference/session/get-recording", + "v1/api-reference/session/get-clip" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/api-reference/signing-key/overview", + "v1/api-reference/signing-key/create", + "v1/api-reference/signing-key/get", + "v1/api-reference/signing-key/update", + "v1/api-reference/signing-key/delete", + "v1/api-reference/signing-key/get-all" + ] + }, + { + "group": "Webhook", + "icon": "bell", + "pages": [ + "v1/api-reference/webhook/overview", + "v1/api-reference/webhook/create", + "v1/api-reference/webhook/get", + "v1/api-reference/webhook/update", + "v1/api-reference/webhook/delete", + "v1/api-reference/webhook/get-all" + ] + }, + { + "group": "Task", + "icon": "gear", + "pages": [ + "v1/api-reference/task/overview", + "v1/api-reference/task/get-all", + "v1/api-reference/task/get" + ] + }, + { + "group": "Playback", + "icon": "play", + "pages": [ + "v1/api-reference/playback/overview", + "v1/api-reference/playback/get" + ] + }, + { + "group": "Transcode", + "icon": "photo-film", + "pages": [ + "v1/api-reference/transcode/overview", + "v1/api-reference/transcode/create" + ] + }, + { + "group": "Viewership", + "icon": "chart-bar", + "pages": [ + "v1/api-reference/viewership/get-realtime-viewership", + "v1/api-reference/viewership/get-viewership-metrics", + "v1/api-reference/viewership/get-usage-metrics", + "v1/api-reference/viewership/get-public-total-views", + "v1/api-reference/viewership/get-creators-metrics" + ] + } + ] + } + ] + }, + { + "anchor": "SDKs", + "icon": "brackets-curly", + "groups": [ + { + "group": "Overview", + "pages": ["v1/sdks/introduction"] + }, + { + "group": "Server-side SDKs", + "pages": [ + "v1/sdks/javascript", + "v1/sdks/go", + "v1/sdks/python" + ] + }, + { + "group": "React Components", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/getting-started", + { + "group": "Player", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/player/Root", + "v1/sdks/react/player/Container", + "v1/sdks/react/player/Video", + "v1/sdks/react/player/Error", + "v1/sdks/react/player/Loading", + "v1/sdks/react/player/Portal", + "v1/sdks/react/player/Poster", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/player/Controls", + "v1/sdks/react/player/Clip", + "v1/sdks/react/player/Fullscreen", + "v1/sdks/react/player/Live", + "v1/sdks/react/player/PictureInPicture", + "v1/sdks/react/player/Play", + "v1/sdks/react/player/RateSelect", + "v1/sdks/react/player/Seek", + "v1/sdks/react/player/Time", + "v1/sdks/react/player/VideoQualitySelect", + "v1/sdks/react/player/Volume" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/player/get-src", + "v1/sdks/react/player/useMediaContext" + ] + } + ] + }, + { + "group": "Broadcast", + "icon": "signal-stream", + "pages": [ + "v1/sdks/react/broadcast/Root", + "v1/sdks/react/broadcast/Container", + "v1/sdks/react/broadcast/Video", + "v1/sdks/react/broadcast/Enabled", + "v1/sdks/react/broadcast/Error", + "v1/sdks/react/broadcast/Loading", + "v1/sdks/react/broadcast/Portal", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/broadcast/Controls", + "v1/sdks/react/broadcast/Audio", + "v1/sdks/react/broadcast/Camera", + "v1/sdks/react/broadcast/Fullscreen", + "v1/sdks/react/broadcast/PictureInPicture", + "v1/sdks/react/broadcast/Screenshare", + "v1/sdks/react/broadcast/Source", + "v1/sdks/react/broadcast/Status" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/broadcast/get-ingest", + "v1/sdks/react/broadcast/useBroadcastContext" + ] + } + ] + }, + { + "group": "Examples", + "icon": "clipboard", + "pages": [ + "v1/sdks/react/Player", + "v1/sdks/react/Broadcast" + ] + }, + { + "group": "Migration", + "icon": "right-left", + "pages": [ + "v1/sdks/react/migration/migration-4.x", + { + "group": "Livepeer React (3.x and below)", + "pages": [ + "v1/sdks/react/migration/3.x/getting-started", + "v1/sdks/react/migration/3.x/client", + "v1/sdks/react/migration/3.x/LivepeerConfig", + "v1/sdks/react/migration/3.x/Player", + "v1/sdks/react/migration/3.x/Broadcast", + { + "group": "Asset", + "pages": [ + "v1/sdks/react/migration/3.x/asset/useCreateAsset", + "v1/sdks/react/migration/3.x/asset/useAsset", + "v1/sdks/react/migration/3.x/asset/useUpdateAsset", + "v1/sdks/react/migration/3.x/asset/useAssetMetrics" + ] + }, + { + "group": "Stream", + "pages": [ + "v1/sdks/react/migration/3.x/stream/useCreateStream", + "v1/sdks/react/migration/3.x/stream/useStream", + "v1/sdks/react/migration/3.x/stream/useUpdateStream", + "v1/sdks/react/migration/3.x/stream/useStreamSession", + "v1/sdks/react/migration/3.x/stream/useStreamSessions" + ] + }, + { + "group": "Playback", + "pages": [ + "v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + ] + }, + { + "group": "Constants", + "pages": [ + "v1/sdks/react/migration/3.x/constants/abis", + "v1/sdks/react/migration/3.x/constants/contract-addresses" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "anchor": "AI Video (Beta)", + "icon": { + "name": "microchip-ai", + "style": "regular" + }, + "groups": [ + { + "group": "AI Video", + "pages": [ + "v1/ai/introduction", + "v1/ai/whats-new", + { + "group": "AI Pipelines", + "icon": { + "name": "wand-magic-sparkles", + "style": "solid" + }, + "pages": [ + "v1/ai/pipelines/overview", + "v1/ai/pipelines/audio-to-text", + "v1/ai/pipelines/image-to-image", + "v1/ai/pipelines/image-to-text", + "v1/ai/pipelines/image-to-video", + "v1/ai/pipelines/llm", + "v1/ai/pipelines/segment-anything-2", + "v1/ai/pipelines/text-to-image", + "v1/ai/pipelines/text-to-speech", + "v1/ai/pipelines/upscale" + ] + }, + { + "group": "Setup an AI Orchestrator", + "icon": { + "name": "robot", + "style": "solid" + }, + "pages": [ + "v1/ai/orchestrators/get-started", + "v1/ai/orchestrators/models-config", + "v1/ai/orchestrators/models-download", + "v1/ai/orchestrators/start-orchestrator", + "v1/ai/orchestrators/ai-worker", + "v1/ai/orchestrators/benchmarking", + "v1/ai/orchestrators/onchain" + ] + }, + { + "group": "Setup an AI Gateway", + "icon": { + "name": "signal-stream", + "style": "solid" + }, + "pages": [ + "v1/ai/gateways/get-started", + "v1/ai/gateways/start-gateway", + "v1/ai/gateways/onchain" + ] + }, + { + "group": "AI Builders", + "icon": { + "name": "screwdriver-wrench", + "style": "solid" + }, + "pages": [ + "v1/ai/builders/get-started", + "v1/ai/builders/gateways", + "v1/ai/builders/showcase" + ] + }, + { + "group": "How to Contribute", + "icon": { + "name": "heart", + "style": "solid" + }, + "pages": ["ai/contributors/coming-soon"] + }, + { + "group": "SDKs", + "icon": "brackets-curly", + "pages": [ + "v1/ai/sdks/overview", + "v1/ai/sdks/go", + "v1/ai/sdks/javascript", + "v1/ai/sdks/python" + ] + }, + { + "group": "AI API Reference", + "icon": "rectangle-terminal", + "pages": [ + "v1/ai/api-reference/overview", + "v1/ai/api-reference/audio-to-text", + "v1/ai/api-reference/image-to-image", + "v1/ai/api-reference/image-to-text", + "v1/ai/api-reference/image-to-video", + "v1/ai/api-reference/llm", + "v1/ai/api-reference/segment-anything-2", + "v1/ai/api-reference/text-to-image", + "v1/ai/api-reference/text-to-speech", + "v1/ai/api-reference/upscale" + ] + } + ] + } + ] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "href": " " + }, + { + "anchor": "Livepeer Studio\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://livepeer.studio", + "icon": "clapperboard-play" + }, + { + "anchor": "What's New\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://livepeer.canny.io/changelog", + "icon": "rocket" + }, + { + "anchor": "Community\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://discord.gg/livepeer", + "icon": { + "name": "discord", + "style": "brands" + } + } + ] + }, + { + "dropdown": "Delegators", + "icon": "coins", + "anchors": [ + { + "anchor": "Documentation", + "icon": "code", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "v1/delegators/introduction", + "v1/delegators/quick-start", + "v1/delegators/livepeer-studio-cli" + ] + }, + { + "group": "Guides", + "pages": [ + "v1/delegators/guides/bridge-lpt-to-arbitrum", + "v1/delegators/guides/migrate-stake-to-arbitrum", + "v1/delegators/guides/yield-calculation" + ] + }, + { + "group": "References", + "pages": [ + "v1/references/api-support-matrix", + { + "group": "Livepeer Node Software", + "icon": "golang", + "pages": [ + "v1/references/go-livepeer/bandwidth-requirements", + "v1/references/go-livepeer/cli-reference", + "v1/references/go-livepeer/gpu-support", + "v1/references/go-livepeer/hardware-requirements", + "v1/references/go-livepeer/prometheus-metrics" + ] + }, + "v1/references/contract-addresses", + "v1/references/example-applications", + "v1/references/awesome-livepeer", + { + "group": "FAQs", + "icon": "book", + "pages": [ + "v1/references/knowledge-base/livestream", + "v1/references/knowledge-base/playback", + "v1/references/knowledge-base/vod" + ] + } + ] + } + ] + }, + { + "anchor": "API Reference", + "icon": "rectangle-terminal", + "groups": [ + { + "group": "Overview", + "pages": [ + "v1/api-reference/overview/introduction", + "v1/api-reference/overview/authentication" + ] + }, + { + "group": "APIs", + "pages": [ + { + "group": "Asset", + "icon": "video", + "pages": [ + "v1/api-reference/asset/overview", + "v1/api-reference/asset/upload", + "v1/api-reference/asset/upload-via-url", + "v1/api-reference/asset/get", + "v1/api-reference/asset/update", + "v1/api-reference/asset/delete", + "v1/api-reference/asset/get-all" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/api-reference/stream/overview", + "v1/api-reference/stream/create", + "v1/api-reference/stream/get", + "v1/api-reference/stream/update", + "v1/api-reference/stream/terminate", + "v1/api-reference/stream/add-multistream-target", + "v1/api-reference/stream/delete-multistream-target", + "v1/api-reference/stream/delete", + "v1/api-reference/stream/get-all", + "v1/api-reference/stream/create-clip", + "v1/api-reference/stream/get-clip" + ] + }, + { + "group": "Generate", + "icon": "microchip-ai", + "pages": [ + "v1/api-reference/generate/overview", + "v1/api-reference/generate/audio-to-text", + "v1/api-reference/generate/text-to-image", + "v1/api-reference/generate/image-to-image", + "v1/api-reference/generate/image-to-video", + "v1/api-reference/generate/llm", + "v1/api-reference/generate/segment-anything-2", + "v1/api-reference/generate/upscale" + ] + }, + { + "group": "Multistream target", + "icon": "arrows-split-up-and-left", + "pages": [ + "v1/api-reference/multistream/overview", + "v1/api-reference/multistream/create", + "v1/api-reference/multistream/get", + "v1/api-reference/multistream/update", + "v1/api-reference/multistream/delete", + "v1/api-reference/multistream/get-all" + ] + }, + { + "group": "Session", + "icon": "film", + "pages": [ + "v1/api-reference/session/overview", + "v1/api-reference/session/get", + "v1/api-reference/session/get-all", + "v1/api-reference/session/get-recording", + "v1/api-reference/session/get-clip" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/api-reference/signing-key/overview", + "v1/api-reference/signing-key/create", + "v1/api-reference/signing-key/get", + "v1/api-reference/signing-key/update", + "v1/api-reference/signing-key/delete", + "v1/api-reference/signing-key/get-all" + ] + }, + { + "group": "Webhook", + "icon": "bell", + "pages": [ + "v1/api-reference/webhook/overview", + "v1/api-reference/webhook/create", + "v1/api-reference/webhook/get", + "v1/api-reference/webhook/update", + "v1/api-reference/webhook/delete", + "v1/api-reference/webhook/get-all" + ] + }, + { + "group": "Task", + "icon": "gear", + "pages": [ + "v1/api-reference/task/overview", + "v1/api-reference/task/get-all", + "v1/api-reference/task/get" + ] + }, + { + "group": "Playback", + "icon": "play", + "pages": [ + "v1/api-reference/playback/overview", + "v1/api-reference/playback/get" + ] + }, + { + "group": "Transcode", + "icon": "photo-film", + "pages": [ + "v1/api-reference/transcode/overview", + "v1/api-reference/transcode/create" + ] + }, + { + "group": "Viewership", + "icon": "chart-bar", + "pages": [ + "v1/api-reference/viewership/get-realtime-viewership", + "v1/api-reference/viewership/get-viewership-metrics", + "v1/api-reference/viewership/get-usage-metrics", + "v1/api-reference/viewership/get-public-total-views", + "v1/api-reference/viewership/get-creators-metrics" + ] + } + ] + } + ] + }, + { + "anchor": "SDKs", + "icon": "brackets-curly", + "groups": [ + { + "group": "Overview", + "pages": ["v1/sdks/introduction"] + }, + { + "group": "Server-side SDKs", + "pages": [ + "v1/sdks/javascript", + "v1/sdks/go", + "v1/sdks/python" + ] + }, + { + "group": "React Components", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/getting-started", + { + "group": "Player", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/player/Root", + "v1/sdks/react/player/Container", + "v1/sdks/react/player/Video", + "v1/sdks/react/player/Error", + "v1/sdks/react/player/Loading", + "v1/sdks/react/player/Portal", + "v1/sdks/react/player/Poster", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/player/Controls", + "v1/sdks/react/player/Clip", + "v1/sdks/react/player/Fullscreen", + "v1/sdks/react/player/Live", + "v1/sdks/react/player/PictureInPicture", + "v1/sdks/react/player/Play", + "v1/sdks/react/player/RateSelect", + "v1/sdks/react/player/Seek", + "v1/sdks/react/player/Time", + "v1/sdks/react/player/VideoQualitySelect", + "v1/sdks/react/player/Volume" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/player/get-src", + "v1/sdks/react/player/useMediaContext" + ] + } + ] + }, + { + "group": "Broadcast", + "icon": "signal-stream", + "pages": [ + "v1/sdks/react/broadcast/Root", + "v1/sdks/react/broadcast/Container", + "v1/sdks/react/broadcast/Video", + "v1/sdks/react/broadcast/Enabled", + "v1/sdks/react/broadcast/Error", + "v1/sdks/react/broadcast/Loading", + "v1/sdks/react/broadcast/Portal", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/broadcast/Controls", + "v1/sdks/react/broadcast/Audio", + "v1/sdks/react/broadcast/Camera", + "v1/sdks/react/broadcast/Fullscreen", + "v1/sdks/react/broadcast/PictureInPicture", + "v1/sdks/react/broadcast/Screenshare", + "v1/sdks/react/broadcast/Source", + "v1/sdks/react/broadcast/Status" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/broadcast/get-ingest", + "v1/sdks/react/broadcast/useBroadcastContext" + ] + } + ] + }, + { + "group": "Examples", + "icon": "clipboard", + "pages": [ + "v1/sdks/react/Player", + "v1/sdks/react/Broadcast" + ] + }, + { + "group": "Migration", + "icon": "right-left", + "pages": [ + "v1/sdks/react/migration/migration-4.x", + { + "group": "Livepeer React (3.x and below)", + "pages": [ + "v1/sdks/react/migration/3.x/getting-started", + "v1/sdks/react/migration/3.x/client", + "v1/sdks/react/migration/3.x/LivepeerConfig", + "v1/sdks/react/migration/3.x/Player", + "v1/sdks/react/migration/3.x/Broadcast", + { + "group": "Asset", + "pages": [ + "v1/sdks/react/migration/3.x/asset/useCreateAsset", + "v1/sdks/react/migration/3.x/asset/useAsset", + "v1/sdks/react/migration/3.x/asset/useUpdateAsset", + "v1/sdks/react/migration/3.x/asset/useAssetMetrics" + ] + }, + { + "group": "Stream", + "pages": [ + "v1/sdks/react/migration/3.x/stream/useCreateStream", + "v1/sdks/react/migration/3.x/stream/useStream", + "v1/sdks/react/migration/3.x/stream/useUpdateStream", + "v1/sdks/react/migration/3.x/stream/useStreamSession", + "v1/sdks/react/migration/3.x/stream/useStreamSessions" + ] + }, + { + "group": "Playback", + "pages": [ + "v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + ] + }, + { + "group": "Constants", + "pages": [ + "v1/sdks/react/migration/3.x/constants/abis", + "v1/sdks/react/migration/3.x/constants/contract-addresses" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "anchor": "AI Video (Beta)", + "icon": { + "name": "microchip-ai", + "style": "regular" + }, + "groups": [ + { + "group": "AI Video", + "pages": [ + "v1/ai/introduction", + "v1/ai/whats-new", + { + "group": "AI Pipelines", + "icon": { + "name": "wand-magic-sparkles", + "style": "solid" + }, + "pages": [ + "v1/ai/pipelines/overview", + "v1/ai/pipelines/audio-to-text", + "v1/ai/pipelines/image-to-image", + "v1/ai/pipelines/image-to-text", + "v1/ai/pipelines/image-to-video", + "v1/ai/pipelines/llm", + "v1/ai/pipelines/segment-anything-2", + "v1/ai/pipelines/text-to-image", + "v1/ai/pipelines/text-to-speech", + "v1/ai/pipelines/upscale" + ] + }, + { + "group": "Setup an AI Orchestrator", + "icon": { + "name": "robot", + "style": "solid" + }, + "pages": [ + "v1/ai/orchestrators/get-started", + "v1/ai/orchestrators/models-config", + "v1/ai/orchestrators/models-download", + "v1/ai/orchestrators/start-orchestrator", + "v1/ai/orchestrators/ai-worker", + "v1/ai/orchestrators/benchmarking", + "v1/ai/orchestrators/onchain" + ] + }, + { + "group": "Setup an AI Gateway", + "icon": { + "name": "signal-stream", + "style": "solid" + }, + "pages": [ + "v1/ai/gateways/get-started", + "v1/ai/gateways/start-gateway", + "v1/ai/gateways/onchain" + ] + }, + { + "group": "AI Builders", + "icon": { + "name": "screwdriver-wrench", + "style": "solid" + }, + "pages": [ + "v1/ai/builders/get-started", + "v1/ai/builders/gateways", + "v1/ai/builders/showcase" + ] + }, + { + "group": "How to Contribute", + "icon": { + "name": "heart", + "style": "solid" + }, + "pages": ["ai/contributors/coming-soon"] + }, + { + "group": "SDKs", + "icon": "brackets-curly", + "pages": [ + "v1/ai/sdks/overview", + "v1/ai/sdks/go", + "v1/ai/sdks/javascript", + "v1/ai/sdks/python" + ] + }, + { + "group": "AI API Reference", + "icon": "rectangle-terminal", + "pages": [ + "v1/ai/api-reference/overview", + "v1/ai/api-reference/audio-to-text", + "v1/ai/api-reference/image-to-image", + "v1/ai/api-reference/image-to-text", + "v1/ai/api-reference/image-to-video", + "v1/ai/api-reference/llm", + "v1/ai/api-reference/segment-anything-2", + "v1/ai/api-reference/text-to-image", + "v1/ai/api-reference/text-to-speech", + "v1/ai/api-reference/upscale" + ] + } + ] + } + ] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "href": " " + }, + { + "anchor": "Livepeer Studio\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://livepeer.studio", + "icon": "clapperboard-play" + }, + { + "anchor": "What's New\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://livepeer.canny.io/changelog", + "icon": "rocket" + }, + { + "anchor": "Community\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://discord.gg/livepeer", + "icon": { + "name": "discord", + "style": "brands" + } + } + ] + }, + { + "dropdown": "Orchestrators", + "icon": "microchip", + "anchors": [ + { + "anchor": "Documentation", + "icon": "code", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "v1/orchestrators/introduction", + "v1/orchestrators/quick-start", + "v1/orchestrators/livepeer-studio-cli" + ] + }, + { + "group": "Guides", + "pages": [ + "v1/orchestrators/guides/get-started", + "v1/orchestrators/guides/install-go-livepeer", + "v1/orchestrators/guides/connect-to-arbitrum", + "v1/orchestrators/guides/configure-reward-calling", + "v1/orchestrators/guides/set-session-limits", + "v1/orchestrators/guides/set-pricing", + "v1/orchestrators/guides/benchmark-transcoding", + "v1/orchestrators/guides/assess-capabilities", + "v1/orchestrators/guides/monitor-metrics", + "v1/orchestrators/guides/vote", + "v1/orchestrators/guides/dual-mine", + "v1/orchestrators/guides/o-t-split", + "v1/orchestrators/guides/migrate-to-arbitrum", + "v1/orchestrators/guides/migrate-from-contract-wallet", + "v1/orchestrators/guides/gateway-introspection", + "v1/orchestrators/guides/troubleshoot" + ] + }, + { + "group": "References", + "pages": [ + "v1/references/api-support-matrix", + { + "group": "Livepeer Node Software", + "icon": "golang", + "pages": [ + "v1/references/go-livepeer/bandwidth-requirements", + "v1/references/go-livepeer/cli-reference", + "v1/references/go-livepeer/gpu-support", + "v1/references/go-livepeer/hardware-requirements", + "v1/references/go-livepeer/prometheus-metrics" + ] + }, + "v1/references/contract-addresses", + "v1/references/example-applications", + "v1/references/awesome-livepeer", + { + "group": "FAQs", + "icon": "book", + "pages": [ + "v1/references/knowledge-base/livestream", + "v1/references/knowledge-base/playback", + "v1/references/knowledge-base/vod" + ] + } + ] + } + ] + }, + { + "anchor": "API Reference", + "icon": "rectangle-terminal", + "groups": [ + { + "group": "Overview", + "pages": [ + "v1/api-reference/overview/introduction", + "v1/api-reference/overview/authentication" + ] + }, + { + "group": "APIs", + "pages": [ + { + "group": "Asset", + "icon": "video", + "pages": [ + "v1/api-reference/asset/overview", + "v1/api-reference/asset/upload", + "v1/api-reference/asset/upload-via-url", + "v1/api-reference/asset/get", + "v1/api-reference/asset/update", + "v1/api-reference/asset/delete", + "v1/api-reference/asset/get-all" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/api-reference/stream/overview", + "v1/api-reference/stream/create", + "v1/api-reference/stream/get", + "v1/api-reference/stream/update", + "v1/api-reference/stream/terminate", + "v1/api-reference/stream/add-multistream-target", + "v1/api-reference/stream/delete-multistream-target", + "v1/api-reference/stream/delete", + "v1/api-reference/stream/get-all", + "v1/api-reference/stream/create-clip", + "v1/api-reference/stream/get-clip" + ] + }, + { + "group": "Generate", + "icon": "microchip-ai", + "pages": [ + "v1/api-reference/generate/overview", + "v1/api-reference/generate/audio-to-text", + "v1/api-reference/generate/text-to-image", + "v1/api-reference/generate/image-to-image", + "v1/api-reference/generate/image-to-video", + "v1/api-reference/generate/llm", + "v1/api-reference/generate/segment-anything-2", + "v1/api-reference/generate/upscale" + ] + }, + { + "group": "Multistream target", + "icon": "arrows-split-up-and-left", + "pages": [ + "v1/api-reference/multistream/overview", + "v1/api-reference/multistream/create", + "v1/api-reference/multistream/get", + "v1/api-reference/multistream/update", + "v1/api-reference/multistream/delete", + "v1/api-reference/multistream/get-all" + ] + }, + { + "group": "Session", + "icon": "film", + "pages": [ + "v1/api-reference/session/overview", + "v1/api-reference/session/get", + "v1/api-reference/session/get-all", + "v1/api-reference/session/get-recording", + "v1/api-reference/session/get-clip" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/api-reference/signing-key/overview", + "v1/api-reference/signing-key/create", + "v1/api-reference/signing-key/get", + "v1/api-reference/signing-key/update", + "v1/api-reference/signing-key/delete", + "v1/api-reference/signing-key/get-all" + ] + }, + { + "group": "Webhook", + "icon": "bell", + "pages": [ + "v1/api-reference/webhook/overview", + "v1/api-reference/webhook/create", + "v1/api-reference/webhook/get", + "v1/api-reference/webhook/update", + "v1/api-reference/webhook/delete", + "v1/api-reference/webhook/get-all" + ] + }, + { + "group": "Task", + "icon": "gear", + "pages": [ + "v1/api-reference/task/overview", + "v1/api-reference/task/get-all", + "v1/api-reference/task/get" + ] + }, + { + "group": "Playback", + "icon": "play", + "pages": [ + "v1/api-reference/playback/overview", + "v1/api-reference/playback/get" + ] + }, + { + "group": "Transcode", + "icon": "photo-film", + "pages": [ + "v1/api-reference/transcode/overview", + "v1/api-reference/transcode/create" + ] + }, + { + "group": "Viewership", + "icon": "chart-bar", + "pages": [ + "v1/api-reference/viewership/get-realtime-viewership", + "v1/api-reference/viewership/get-viewership-metrics", + "v1/api-reference/viewership/get-usage-metrics", + "v1/api-reference/viewership/get-public-total-views", + "v1/api-reference/viewership/get-creators-metrics" + ] + } + ] + } + ] + }, + { + "anchor": "SDKs", + "icon": "brackets-curly", + "groups": [ + { + "group": "Overview", + "pages": ["v1/sdks/introduction"] + }, + { + "group": "Server-side SDKs", + "pages": [ + "v1/sdks/javascript", + "v1/sdks/go", + "v1/sdks/python" + ] + }, + { + "group": "React Components", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/getting-started", + { + "group": "Player", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/player/Root", + "v1/sdks/react/player/Container", + "v1/sdks/react/player/Video", + "v1/sdks/react/player/Error", + "v1/sdks/react/player/Loading", + "v1/sdks/react/player/Portal", + "v1/sdks/react/player/Poster", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/player/Controls", + "v1/sdks/react/player/Clip", + "v1/sdks/react/player/Fullscreen", + "v1/sdks/react/player/Live", + "v1/sdks/react/player/PictureInPicture", + "v1/sdks/react/player/Play", + "v1/sdks/react/player/RateSelect", + "v1/sdks/react/player/Seek", + "v1/sdks/react/player/Time", + "v1/sdks/react/player/VideoQualitySelect", + "v1/sdks/react/player/Volume" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/player/get-src", + "v1/sdks/react/player/useMediaContext" + ] + } + ] + }, + { + "group": "Broadcast", + "icon": "signal-stream", + "pages": [ + "v1/sdks/react/broadcast/Root", + "v1/sdks/react/broadcast/Container", + "v1/sdks/react/broadcast/Video", + "v1/sdks/react/broadcast/Enabled", + "v1/sdks/react/broadcast/Error", + "v1/sdks/react/broadcast/Loading", + "v1/sdks/react/broadcast/Portal", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/broadcast/Controls", + "v1/sdks/react/broadcast/Audio", + "v1/sdks/react/broadcast/Camera", + "v1/sdks/react/broadcast/Fullscreen", + "v1/sdks/react/broadcast/PictureInPicture", + "v1/sdks/react/broadcast/Screenshare", + "v1/sdks/react/broadcast/Source", + "v1/sdks/react/broadcast/Status" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/broadcast/get-ingest", + "v1/sdks/react/broadcast/useBroadcastContext" + ] + } + ] + }, + { + "group": "Examples", + "icon": "clipboard", + "pages": [ + "v1/sdks/react/Player", + "v1/sdks/react/Broadcast" + ] + }, + { + "group": "Migration", + "icon": "right-left", + "pages": [ + "v1/sdks/react/migration/migration-4.x", + { + "group": "Livepeer React (3.x and below)", + "pages": [ + "v1/sdks/react/migration/3.x/getting-started", + "v1/sdks/react/migration/3.x/client", + "v1/sdks/react/migration/3.x/LivepeerConfig", + "v1/sdks/react/migration/3.x/Player", + "v1/sdks/react/migration/3.x/Broadcast", + { + "group": "Asset", + "pages": [ + "v1/sdks/react/migration/3.x/asset/useCreateAsset", + "v1/sdks/react/migration/3.x/asset/useAsset", + "v1/sdks/react/migration/3.x/asset/useUpdateAsset", + "v1/sdks/react/migration/3.x/asset/useAssetMetrics" + ] + }, + { + "group": "Stream", + "pages": [ + "v1/sdks/react/migration/3.x/stream/useCreateStream", + "v1/sdks/react/migration/3.x/stream/useStream", + "v1/sdks/react/migration/3.x/stream/useUpdateStream", + "v1/sdks/react/migration/3.x/stream/useStreamSession", + "v1/sdks/react/migration/3.x/stream/useStreamSessions" + ] + }, + { + "group": "Playback", + "pages": [ + "v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + ] + }, + { + "group": "Constants", + "pages": [ + "v1/sdks/react/migration/3.x/constants/abis", + "v1/sdks/react/migration/3.x/constants/contract-addresses" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "anchor": "AI Video (Beta)", + "icon": { + "name": "microchip-ai", + "style": "regular" + }, + "groups": [ + { + "group": "AI Video", + "pages": [ + "v1/ai/introduction", + "v1/ai/whats-new", + { + "group": "AI Pipelines", + "icon": { + "name": "wand-magic-sparkles", + "style": "solid" + }, + "pages": [ + "v1/ai/pipelines/overview", + "v1/ai/pipelines/audio-to-text", + "v1/ai/pipelines/image-to-image", + "v1/ai/pipelines/image-to-text", + "v1/ai/pipelines/image-to-video", + "v1/ai/pipelines/llm", + "v1/ai/pipelines/segment-anything-2", + "v1/ai/pipelines/text-to-image", + "v1/ai/pipelines/text-to-speech", + "v1/ai/pipelines/upscale" + ] + }, + { + "group": "Setup an AI Orchestrator", + "icon": { + "name": "robot", + "style": "solid" + }, + "pages": [ + "v1/ai/orchestrators/get-started", + "v1/ai/orchestrators/models-config", + "v1/ai/orchestrators/models-download", + "v1/ai/orchestrators/start-orchestrator", + "v1/ai/orchestrators/ai-worker", + "v1/ai/orchestrators/benchmarking", + "v1/ai/orchestrators/onchain" + ] + }, + { + "group": "Setup an AI Gateway", + "icon": { + "name": "signal-stream", + "style": "solid" + }, + "pages": [ + "v1/ai/gateways/get-started", + "v1/ai/gateways/start-gateway", + "v1/ai/gateways/onchain" + ] + }, + { + "group": "AI Builders", + "icon": { + "name": "screwdriver-wrench", + "style": "solid" + }, + "pages": [ + "v1/ai/builders/get-started", + "v1/ai/builders/gateways", + "v1/ai/builders/showcase" + ] + }, + { + "group": "How to Contribute", + "icon": { + "name": "heart", + "style": "solid" + }, + "pages": ["ai/contributors/coming-soon"] + }, + { + "group": "SDKs", + "icon": "brackets-curly", + "pages": [ + "v1/ai/sdks/overview", + "v1/ai/sdks/go", + "v1/ai/sdks/javascript", + "v1/ai/sdks/python" + ] + }, + { + "group": "AI API Reference", + "icon": "rectangle-terminal", + "pages": [ + "v1/ai/api-reference/overview", + "v1/ai/api-reference/audio-to-text", + "v1/ai/api-reference/image-to-image", + "v1/ai/api-reference/image-to-text", + "v1/ai/api-reference/image-to-video", + "v1/ai/api-reference/llm", + "v1/ai/api-reference/segment-anything-2", + "v1/ai/api-reference/text-to-image", + "v1/ai/api-reference/text-to-speech", + "v1/ai/api-reference/upscale" + ] + } + ] + } + ] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "href": " " + }, + { + "anchor": "Livepeer Studio\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://livepeer.studio", + "icon": "clapperboard-play" + }, + { + "anchor": "What's New\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://livepeer.canny.io/changelog", + "icon": "rocket" + }, + { + "anchor": "Community\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://discord.gg/livepeer", + "icon": { + "name": "discord", + "style": "brands" + } + } + ] + }, + { + "dropdown": "Gateways", + "icon": "torii-gate", + "anchors": [ + { + "anchor": "Documentation", + "icon": "code", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "v1/gateways/introduction", + "v1/gateways/quick-start", + "v1/gateways/livepeer-studio-cli" + ] + }, + { + "group": "Guides", + "pages": [ + "v1/gateways/guides/gateway-overview", + "v1/gateways/guides/docker-install", + "v1/gateways/guides/linux-install", + "v1/gateways/guides/windows-install", + "v1/gateways/guides/transcoding-options", + "v1/gateways/guides/fund-gateway", + "v1/gateways/guides/publish-content", + "v1/gateways/guides/playback-content" + ] + }, + { + "group": "References", + "pages": [ + "v1/references/api-support-matrix", + { + "group": "Livepeer Node Software", + "icon": "golang", + "pages": [ + "v1/references/go-livepeer/bandwidth-requirements", + "v1/references/go-livepeer/cli-reference", + "v1/references/go-livepeer/gpu-support", + "v1/references/go-livepeer/hardware-requirements", + "v1/references/go-livepeer/prometheus-metrics" + ] + }, + "v1/references/contract-addresses", + "v1/references/example-applications", + "v1/references/awesome-livepeer", + { + "group": "FAQs", + "icon": "book", + "pages": [ + "v1/references/knowledge-base/livestream", + "v1/references/knowledge-base/playback", + "v1/references/knowledge-base/vod" + ] + } + ] + } + ] + }, + { + "anchor": "API Reference", + "icon": "rectangle-terminal", + "groups": [ + { + "group": "Overview", + "pages": [ + "v1/api-reference/overview/introduction", + "v1/api-reference/overview/authentication" + ] + }, + { + "group": "APIs", + "pages": [ + { + "group": "Asset", + "icon": "video", + "pages": [ + "v1/api-reference/asset/overview", + "v1/api-reference/asset/upload", + "v1/api-reference/asset/upload-via-url", + "v1/api-reference/asset/get", + "v1/api-reference/asset/update", + "v1/api-reference/asset/delete", + "v1/api-reference/asset/get-all" + ] + }, + { + "group": "Livestream", + "icon": "camera", + "pages": [ + "v1/api-reference/stream/overview", + "v1/api-reference/stream/create", + "v1/api-reference/stream/get", + "v1/api-reference/stream/update", + "v1/api-reference/stream/terminate", + "v1/api-reference/stream/add-multistream-target", + "v1/api-reference/stream/delete-multistream-target", + "v1/api-reference/stream/delete", + "v1/api-reference/stream/get-all", + "v1/api-reference/stream/create-clip", + "v1/api-reference/stream/get-clip" + ] + }, + { + "group": "Generate", + "icon": "microchip-ai", + "pages": [ + "v1/api-reference/generate/overview", + "v1/api-reference/generate/audio-to-text", + "v1/api-reference/generate/text-to-image", + "v1/api-reference/generate/image-to-image", + "v1/api-reference/generate/image-to-video", + "v1/api-reference/generate/llm", + "v1/api-reference/generate/segment-anything-2", + "v1/api-reference/generate/upscale" + ] + }, + { + "group": "Multistream target", + "icon": "arrows-split-up-and-left", + "pages": [ + "v1/api-reference/multistream/overview", + "v1/api-reference/multistream/create", + "v1/api-reference/multistream/get", + "v1/api-reference/multistream/update", + "v1/api-reference/multistream/delete", + "v1/api-reference/multistream/get-all" + ] + }, + { + "group": "Session", + "icon": "film", + "pages": [ + "v1/api-reference/session/overview", + "v1/api-reference/session/get", + "v1/api-reference/session/get-all", + "v1/api-reference/session/get-recording", + "v1/api-reference/session/get-clip" + ] + }, + { + "group": "Access control", + "icon": "lock", + "pages": [ + "v1/api-reference/signing-key/overview", + "v1/api-reference/signing-key/create", + "v1/api-reference/signing-key/get", + "v1/api-reference/signing-key/update", + "v1/api-reference/signing-key/delete", + "v1/api-reference/signing-key/get-all" + ] + }, + { + "group": "Webhook", + "icon": "bell", + "pages": [ + "v1/api-reference/webhook/overview", + "v1/api-reference/webhook/create", + "v1/api-reference/webhook/get", + "v1/api-reference/webhook/update", + "v1/api-reference/webhook/delete", + "v1/api-reference/webhook/get-all" + ] + }, + { + "group": "Task", + "icon": "gear", + "pages": [ + "v1/api-reference/task/overview", + "v1/api-reference/task/get-all", + "v1/api-reference/task/get" + ] + }, + { + "group": "Playback", + "icon": "play", + "pages": [ + "v1/api-reference/playback/overview", + "v1/api-reference/playback/get" + ] + }, + { + "group": "Transcode", + "icon": "photo-film", + "pages": [ + "v1/api-reference/transcode/overview", + "v1/api-reference/transcode/create" + ] + }, + { + "group": "Viewership", + "icon": "chart-bar", + "pages": [ + "v1/api-reference/viewership/get-realtime-viewership", + "v1/api-reference/viewership/get-viewership-metrics", + "v1/api-reference/viewership/get-usage-metrics", + "v1/api-reference/viewership/get-public-total-views", + "v1/api-reference/viewership/get-creators-metrics" + ] + } + ] + } + ] + }, + { + "anchor": "SDKs", + "icon": "brackets-curly", + "groups": [ + { + "group": "Overview", + "pages": ["v1/sdks/introduction"] + }, + { + "group": "Server-side SDKs", + "pages": [ + "v1/sdks/javascript", + "v1/sdks/go", + "v1/sdks/python" + ] + }, + { + "group": "React Components", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/getting-started", + { + "group": "Player", + "icon": "circle-play", + "pages": [ + "v1/sdks/react/player/Root", + "v1/sdks/react/player/Container", + "v1/sdks/react/player/Video", + "v1/sdks/react/player/Error", + "v1/sdks/react/player/Loading", + "v1/sdks/react/player/Portal", + "v1/sdks/react/player/Poster", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/player/Controls", + "v1/sdks/react/player/Clip", + "v1/sdks/react/player/Fullscreen", + "v1/sdks/react/player/Live", + "v1/sdks/react/player/PictureInPicture", + "v1/sdks/react/player/Play", + "v1/sdks/react/player/RateSelect", + "v1/sdks/react/player/Seek", + "v1/sdks/react/player/Time", + "v1/sdks/react/player/VideoQualitySelect", + "v1/sdks/react/player/Volume" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/player/get-src", + "v1/sdks/react/player/useMediaContext" + ] + } + ] + }, + { + "group": "Broadcast", + "icon": "signal-stream", + "pages": [ + "v1/sdks/react/broadcast/Root", + "v1/sdks/react/broadcast/Container", + "v1/sdks/react/broadcast/Video", + "v1/sdks/react/broadcast/Enabled", + "v1/sdks/react/broadcast/Error", + "v1/sdks/react/broadcast/Loading", + "v1/sdks/react/broadcast/Portal", + { + "group": "Controls", + "pages": [ + "v1/sdks/react/broadcast/Controls", + "v1/sdks/react/broadcast/Audio", + "v1/sdks/react/broadcast/Camera", + "v1/sdks/react/broadcast/Fullscreen", + "v1/sdks/react/broadcast/PictureInPicture", + "v1/sdks/react/broadcast/Screenshare", + "v1/sdks/react/broadcast/Source", + "v1/sdks/react/broadcast/Status" + ] + }, + { + "group": "Functions", + "pages": [ + "v1/sdks/react/broadcast/get-ingest", + "v1/sdks/react/broadcast/useBroadcastContext" + ] + } + ] + }, + { + "group": "Examples", + "icon": "clipboard", + "pages": [ + "v1/sdks/react/Player", + "v1/sdks/react/Broadcast" + ] + }, + { + "group": "Migration", + "icon": "right-left", + "pages": [ + "v1/sdks/react/migration/migration-4.x", + { + "group": "Livepeer React (3.x and below)", + "pages": [ + "v1/sdks/react/migration/3.x/getting-started", + "v1/sdks/react/migration/3.x/client", + "v1/sdks/react/migration/3.x/LivepeerConfig", + "v1/sdks/react/migration/3.x/Player", + "v1/sdks/react/migration/3.x/Broadcast", + { + "group": "Asset", + "pages": [ + "v1/sdks/react/migration/3.x/asset/useCreateAsset", + "v1/sdks/react/migration/3.x/asset/useAsset", + "v1/sdks/react/migration/3.x/asset/useUpdateAsset", + "v1/sdks/react/migration/3.x/asset/useAssetMetrics" + ] + }, + { + "group": "Stream", + "pages": [ + "v1/sdks/react/migration/3.x/stream/useCreateStream", + "v1/sdks/react/migration/3.x/stream/useStream", + "v1/sdks/react/migration/3.x/stream/useUpdateStream", + "v1/sdks/react/migration/3.x/stream/useStreamSession", + "v1/sdks/react/migration/3.x/stream/useStreamSessions" + ] + }, + { + "group": "Playback", + "pages": [ + "v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + ] + }, + { + "group": "Constants", + "pages": [ + "v1/sdks/react/migration/3.x/constants/abis", + "v1/sdks/react/migration/3.x/constants/contract-addresses" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "anchor": "AI Video (Beta)", + "icon": { + "name": "microchip-ai", + "style": "regular" + }, + "groups": [ + { + "group": "AI Video", + "pages": [ + "v1/ai/introduction", + "v1/ai/whats-new", + { + "group": "AI Pipelines", + "icon": { + "name": "wand-magic-sparkles", + "style": "solid" + }, + "pages": [ + "v1/ai/pipelines/overview", + "v1/ai/pipelines/audio-to-text", + "v1/ai/pipelines/image-to-image", + "v1/ai/pipelines/image-to-text", + "v1/ai/pipelines/image-to-video", + "v1/ai/pipelines/llm", + "v1/ai/pipelines/segment-anything-2", + "v1/ai/pipelines/text-to-image", + "v1/ai/pipelines/text-to-speech", + "v1/ai/pipelines/upscale" + ] + }, + { + "group": "Setup an AI Orchestrator", + "icon": { + "name": "robot", + "style": "solid" + }, + "pages": [ + "v1/ai/orchestrators/get-started", + "v1/ai/orchestrators/models-config", + "v1/ai/orchestrators/models-download", + "v1/ai/orchestrators/start-orchestrator", + "v1/ai/orchestrators/ai-worker", + "v1/ai/orchestrators/benchmarking", + "v1/ai/orchestrators/onchain" + ] + }, + { + "group": "Setup an AI Gateway", + "icon": { + "name": "signal-stream", + "style": "solid" + }, + "pages": [ + "v1/ai/gateways/get-started", + "v1/ai/gateways/start-gateway", + "v1/ai/gateways/onchain" + ] + }, + { + "group": "AI Builders", + "icon": { + "name": "screwdriver-wrench", + "style": "solid" + }, + "pages": [ + "v1/ai/builders/get-started", + "v1/ai/builders/gateways", + "v1/ai/builders/showcase" + ] + }, + { + "group": "How to Contribute", + "icon": { + "name": "heart", + "style": "solid" + }, + "pages": ["ai/contributors/coming-soon"] + }, + { + "group": "SDKs", + "icon": "brackets-curly", + "pages": [ + "v1/ai/sdks/overview", + "v1/ai/sdks/go", + "v1/ai/sdks/javascript", + "v1/ai/sdks/python" + ] + }, + { + "group": "AI API Reference", + "icon": "rectangle-terminal", + "pages": [ + "v1/ai/api-reference/overview", + "v1/ai/api-reference/audio-to-text", + "v1/ai/api-reference/image-to-image", + "v1/ai/api-reference/image-to-text", + "v1/ai/api-reference/image-to-video", + "v1/ai/api-reference/llm", + "v1/ai/api-reference/segment-anything-2", + "v1/ai/api-reference/text-to-image", + "v1/ai/api-reference/text-to-speech", + "v1/ai/api-reference/upscale" + ] + } + ] + } + ] + }, + { + "anchor": " ", + "icon": "horizontal-rule", + "href": " " + }, + { + "anchor": "Livepeer Studio\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://livepeer.studio", + "icon": "clapperboard-play" + }, + { + "anchor": "What's New\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://livepeer.canny.io/changelog", + "icon": "rocket" + }, + { + "anchor": "Community\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u279a", + "href": "https://discord.gg/livepeer", + "icon": { + "name": "discord", + "style": "brands" + } + } + ] + } + ] + } + ] + } + ] + }, + "logo": { + "light": "/logo/light.svg", + "dark": "/logo/dark.svg" + }, + "api": { + "openapi": "openapi.yaml", + "mdx": { + "server": "https://livepeer.studio/api" + } + }, + "appearance": { + "default": "dark" + }, + "search": { + "prompt": "Need help? Ask our AI" + }, + "footer": { + "links": [ + { + "header": "links", + "items": [ + { + "label": "custom link here", + "href": "https://livepeer.org" + }, + { + "label": "custom link here", + "href": "https://livepeer.org" + }, + { + "label": "custom link here", + "href": "https://livepeer.org" + } + ] + } + ], + "socials": { + "website": "https://forum.livepeer.org", + "github": "https://github.com/livepeer", + "twitter": "https://twitter.com/livepeer", + "discord": "https://discord.gg/livepeer", + "linkedin": "https://www.linkedin.com/company/livepeer" + } + }, + "integrations": { + "ga4": { + "measurementId": "G-P1Z15F6NX4" + } + }, + "navbar": { + "links": [ + { + "label": "", + "href": "https://twitter.com/Livepeer", + "icon": "x-twitter" + }, + { + "label": "", + "href": "https://github.com/livepeer", + "icon": "github" + }, + { + "label": "", + "href": "https://discord.gg/livepeer", + "icon": "discord" + } + ] + }, + "errors": { + "404": { + "redirect": false, + "title": "Ruh oh. This page doesn't exist.", + "description": "\"Rick

Sorry About That." + } + }, + "redirects": [ + { + "source": "/v2/pages/07_resources/redirect", + "destination": "/v2/pages/07_resources/resources_hub" + }, + { + "source": "/v2/pages/08_help/redirect", + "destination": "/v2/pages/08_help/README" + }, + { + "source": "/v1/guides/developing/quickstart", + "destination": "/v1/developers/quick-start" + }, + { + "source": "/v1/guides/overview", + "destination": "/v1/developers/guides/overview" + }, + { + "source": "/v1/guides/developing/player", + "destination": "/v1/developers/guides/playback-an-asset" + }, + { + "source": "/v1/guides/developing/create-a-livestream", + "destination": "/v1/developers/guides/create-livestream" + }, + { + "source": "/v1/guides/developing/stream-via-obs", + "destination": "/v1/developers/guides/stream-via-obs" + }, + { + "source": "/v1/developing/stream-via-browser", + "destination": "/v1/developers/guides/livestream-from-browser" + }, + { + "source": "/v1/guides/developing/upload-a-video-asset", + "destination": "/v1/developers/guides/upload-video-asset" + }, + { + "source": "/v1/guides/developing/mint-a-video-nft", + "destination": "/v1/developers/guides/mint-video-nft" + }, + { + "source": "/v1/guides/developing/dstorage-playback", + "destination": "/v1/developers/guides/dstorage-playback" + }, + { + "source": "/v1/developers/guides/dstorage-playback", + "destination": "/v1/developers/guides/upload-video-asset" + }, + { + "source": "/v1/guides/developing/access-control", + "destination": "/v1/developers/guides/access-control-webhooks" + }, + { + "source": "/v1/guides/developing/access-control-vod", + "destination": "/v1/developers/guides/access-control-webhooks" + }, + { + "source": "/v1/guides/developing/encrypted-vod", + "destination": "/v1/developers/guides/encrypted-asset" + }, + { + "source": "/v1/guides/developing/listen-for-webhooks", + "destination": "/v1/developers/guides/setup-and-listen-to-webhooks" + }, + { + "source": "/v1/guides/developing/multistream", + "destination": "/v1/developers/guides/multistream" + }, + { + "source": "/v1/guides/developing/monitor-stream-health", + "destination": "/v1/developers/guides/monitor-stream-health" + }, + { + "source": "/v1/guides/developing/viewer-engagement", + "destination": "/v1/developers/guides/get-engagement-analytics-via-api" + }, + { + "source": "/v1/guides/developing/transcode-video-storj", + "destination": "/v1/developers/guides/transcode-video-storj" + }, + { + "source": "/v1/guides/developing/transcode-video-w3s", + "destination": "/v1/developers/guides/transcode-video-w3s" + }, + { + "source": "/v1/tutorials/developing/optimize-latency", + "destination": "/v1/developers/guides/optimize-latency-of-a-livestream" + }, + { + "source": "/v1/tutorials/developing/analyze-engagement-timeplus", + "destination": "/v1/developers/guides/get-engagement-analytics-via-timeplus" + }, + { + "source": "/v1/tutorials/developing/visualize-engagement-metrics-grafana", + "destination": "/v1/developers/guides/get-engagement-analytics-via-grafana" + }, + { + "source": "/v1/tutorials/developing/token-gate-videos-using-guildxyz", + "destination": "/v1/developers/tutorials/token-gate-videos-with-lit" + }, + { + "source": "/v1/tutorials/developing/token-gate-videos-using-lit", + "destination": "/v1/developers/tutorials/token-gate-videos-with-lit" + }, + { + "source": "/v1/tutorials/developing/build-decentralized-video-app-with-fvm", + "destination": "/v1/developers/tutorials/decentralized-app-with-fvm" + }, + { + "source": "/v1/tutorials/developing/upload-playback-videos-on-ipfs-4everland", + "destination": "/v1/developers/tutorials/upload-playback-videos-4everland" + }, + { + "source": "/v1/tutorials/developing/upload-playback-videos-on-ipfs", + "destination": "/v1/developers/tutorials/upload-playback-videos-on-ipfs" + }, + { + "source": "/v1/tutorials/developing/upload-playback-videos-on-arweave", + "destination": "/v1/developers/tutorials/upload-playback-videos-on-arweave" + }, + { + "source": "/v1/reference/api", + "destination": "/v1/api-reference/overview/introduction" + }, + { + "source": "/v1/reference/deployed-contract-addresses", + "destination": "/v1/references/contract-addresses" + }, + { + "source": "/v1/reference/example-applications", + "destination": "/v1/references/example-applications" + }, + { + "source": "/v1/reference/api-support-matrix", + "destination": "/v1/references/api-support-matrix" + }, + { + "source": "/v1/reference/go-livepeer", + "destination": "/v1/references/go-livepeer/bandwidth-requirements" + }, + { + "source": "/v1/reference/go-livepeer/cli-reference", + "destination": "/v1/references/go-livepeer/cli-reference" + }, + { + "source": "/v1/reference/go-livepeer/gpu-support", + "destination": "/v1/references/go-livepeer/gpu-support" + }, + { + "source": "/v1/reference/go-livepeer/hardware-requirements", + "destination": "/v1/references/go-livepeer/hardware-requirements" + }, + { + "source": "/v1/reference/go-livepeer/bandwidth-requirements", + "destination": "/v1/references/go-livepeer/bandwidth-requirements" + }, + { + "source": "/v1/reference/go-livepeer/prometheus-metrics", + "destination": "/v1/references/go-livepeer/prometheus-metrics" + }, + { + "source": "/v1/guides/delegating/bridge-lpt-to-arbitrum", + "destination": "/v1/delegators/guides/bridge-lpt-to-arbitrum" + }, + { + "source": "/v1/guides/delegating/migrate-stake-to-arbitrum", + "destination": "/v1/delegators/guides/migrate-stake-to-arbitrum" + }, + { + "source": "/v1/delegators/reference/yield-calculation", + "destination": "/v1/delegators/guides/yield-calculation" + }, + { + "source": "/v1/guides/orchestrating/get-started", + "destination": "/v1/orchestrators/guides/get-started" + }, + { + "source": "/v1/guides/orchestrating/install-go-livepeer", + "destination": "/v1/orchestrators/guides/install-go-livepeer" + }, + { + "source": "/v1/guides/orchestrating/connect-to-arbitrum", + "destination": "/v1/orchestrators/guides/connect-to-arbitrum" + }, + { + "source": "/v1/guides/orchestrating/configure-reward-calling", + "destination": "/v1/orchestrators/guides/configure-reward-calling" + }, + { + "source": "/v1/guides/orchestrating/set-session-limits", + "destination": "/v1/orchestrators/guides/set-session-limits" + }, + { + "source": "/v1/guides/orchestrating/set-pricing", + "destination": "/v1/orchestrators/guides/set-pricing" + }, + { + "source": "/v1/guides/orchestrating/benchmark-transcoding", + "destination": "/v1/orchestrators/guides/benchmark-transcoding" + }, + { + "source": "/v1/guides/orchestrating/assess-capabilities", + "destination": "/v1/orchestrators/guides/assess-capabilities" + }, + { + "source": "/v1/guides/orchestrating/monitor-metrics", + "destination": "/v1/orchestrators/guides/monitor-metrics" + }, + { + "source": "/v1/guides/orchestrating/vote", + "destination": "/v1/orchestrators/guides/vote" + }, + { + "source": "/v1/guides/orchestrating/dual-mine", + "destination": "/v1/orchestrators/guides/dual-mine" + }, + { + "source": "/v1/guides/orchestrating/o-t-split", + "destination": "/v1/orchestrators/guides/o-t-split" + }, + { + "source": "/v1/guides/orchestrating/migrate-to-arbitrum", + "destination": "/v1/orchestrators/guides/migrate-to-arbitrum" + }, + { + "source": "/v1/guides/orchestrating/migrate-from-contract-wallet", + "destination": "/v1/orchestrators/guides/migrate-from-contract-wallet" + }, + { + "source": "/v1/guides/orchestrating/gateway-introspection", + "destination": "/v1/orchestrators/guides/gateway-introspection" + }, + { + "source": "/v1/guides/orchestrating/troubleshoot", + "destination": "/v1/orchestrators/guides/troubleshoot" + }, + { + "source": "/v1/reference/react", + "destination": "/v1/react/getting-started" + }, + { + "source": "/v1/reference/react/getting-started", + "destination": "/v1/react/getting-started" + }, + { + "source": "/v1/reference/react/client", + "destination": "/v1/react/getting-started" + }, + { + "source": "/v1/reference/react/LivepeerConfig", + "destination": "/v1/sdks/react/migration/3.x/LivepeerConfig" + }, + { + "source": "/v1/reference/react/Player", + "destination": "/v1/react/player/Root" + }, + { + "source": "/v1/reference/react/Broadcast", + "destination": "/v1/react/broadcast/Root" + }, + { + "source": "/v1/reference/react/providers/studio", + "destination": "/v1/sdks/react/migration/3.x/providers/studio" + }, + { + "source": "/v1/reference/react/asset/useAsset", + "destination": "/v1/sdks/react/migration/3.x/asset/useAsset" + }, + { + "source": "/v1/reference/react/asset/useCreateAsset", + "destination": "/v1/sdks/react/migration/3.x/asset/useCreateAsset" + }, + { + "source": "/v1/reference/react/asset/useAssetMetrics", + "destination": "/v1/sdks/react/migration/3.x/asset/useAssetMetrics" + }, + { + "source": "/v1/reference/react/asset/useUpdateAsset", + "destination": "/v1/sdks/react/migration/3.x/asset/useUpdateAsset" + }, + { + "source": "/v1/reference/react/stream/useStream", + "destination": "/v1/sdks/react/migration/3.x/stream/useStream" + }, + { + "source": "/v1/reference/react/stream/useStreamSession", + "destination": "/v1/sdks/react/migration/3.x/stream/useStreamSession" + }, + { + "source": "/v1/reference/react/stream/useStreamSessions", + "destination": "/v1/sdks/react/migration/3.x/stream/useStreamSessions" + }, + { + "source": "/v1/reference/react/stream/useCreateStream", + "destination": "/v1/sdks/react/migration/3.x/stream/useCreateStream" + }, + { + "source": "/v1/reference/react/stream/useUpdateStream", + "destination": "/v1/sdks/react/migration/3.x/stream/useUpdateStream" + }, + { + "source": "/v1/reference/react/playback/usePlaybackInfo", + "destination": "/v1/sdks/react/migration/3.x/playback/usePlaybackInfo" + }, + { + "source": "/v1/reference/react/constants/abis", + "destination": "/v1/sdks/react/migration/3.x/constants/abis" + }, + { + "source": "/v1/reference/react/constants/contract-addresses", + "destination": "/v1/sdks/react/migration/3.x/constants/contract-addresses" + } + ], + "styling": { + "codeblocks": { + "theme": { + "light": "github-light", + "dark": "dark-plus" + } + } + } +} diff --git a/llms.txt.information.md b/llms.txt.information.md new file mode 100644 index 00000000..21254206 --- /dev/null +++ b/llms.txt.information.md @@ -0,0 +1,30 @@ +This page is deliberately named incorrectly so as not to override the default +llms.txt file. + +https://www.mintlify.com/docs/ai/llmstxt + +An llms.txt file is a plain Markdown file that contains: Site title as an H1 +heading. Structured content sections with links and a description of each page +in your documentation. + +**Ensure all pages have a description for LLMs.txt to be useful.** + +Each page’s description comes from the description field in its frontmatter. +Pages without a description field appear in the llms.txt file without a +description. + +Example + +``` +# Site title + +## Docs + +- [API](https://example.com/docs/api): Endpoint list and usage +- [Install](https://example.com/docs/install): Setup steps +- [Getting started](https://example.com/docs/start): Intro guide +``` + +This structured approach allows LLMs to efficiently process your documentation +at a high level and locate relevant content for user queries, improving the +accuracy and speed of AI-assisted documentation searches. diff --git a/mint.json b/mintOld.json similarity index 97% rename from mint.json rename to mintOld.json index b70dc247..ab684bf8 100644 --- a/mint.json +++ b/mintOld.json @@ -336,12 +336,7 @@ "url": "https://discord.gg/livepeer" } ], - "versions": [ - "Developers", - "Delegators", - "Orchestrators", - "Gateways" - ], + "versions": ["Developers", "Delegators", "Orchestrators", "Gateways"], "topbarCtaButton": { "name": "Dashboard", "url": "https://livepeer.studio" @@ -430,9 +425,7 @@ { "group": "Webhooks", "icon": "bell", - "pages": [ - "developers/guides/setup-and-listen-to-webhooks" - ] + "pages": ["developers/guides/setup-and-listen-to-webhooks"] }, { "group": "Transcode API", @@ -454,9 +447,7 @@ { "group": "Projects", "icon": "folder-open", - "pages": [ - "developers/guides/managing-projects" - ] + "pages": ["developers/guides/managing-projects"] }, { "group": "Integrations", @@ -582,9 +573,7 @@ "group": "How to Contribute", "icon": "heart", "iconType": "solid", - "pages": [ - "ai/contributors/coming-soon" - ] + "pages": ["ai/contributors/coming-soon"] }, { "group": "SDKs", @@ -645,17 +634,11 @@ }, { "group": "Overview", - "pages": [ - "sdks/introduction" - ] + "pages": ["sdks/introduction"] }, { "group": "Server-side SDKs", - "pages": [ - "sdks/javascript", - "sdks/go", - "sdks/python" - ] + "pages": ["sdks/javascript", "sdks/go", "sdks/python"] }, { "group": "React Components", @@ -734,10 +717,7 @@ { "group": "Examples", "icon": "clipboard", - "pages": [ - "sdks/react/Player", - "sdks/react/Broadcast" - ] + "pages": ["sdks/react/Player", "sdks/react/Broadcast"] }, { "group": "Migration", @@ -773,9 +753,7 @@ }, { "group": "Playback", - "pages": [ - "sdks/react/migration/3.x/playback/usePlaybackInfo" - ] + "pages": ["sdks/react/migration/3.x/playback/usePlaybackInfo"] }, { "group": "Constants", diff --git a/openapi.yaml b/openapi.yaml index d1674c23..220a7f78 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -84,7 +84,10 @@ components: quality: type: integer description: > - Restricts the size of the output video using the constant quality feature. Increasing this value will result in a lower quality video. Note that this parameter might not work if the transcoder lacks support for it. + Restricts the size of the output video using the constant quality + feature. Increasing this value will result in a lower quality video. + Note that this parameter might not work if the transcoder lacks + support for it. minimum: 0 maximum: 44 @@ -131,7 +134,10 @@ components: quality: type: integer description: > - Restricts the size of the output video using the constant quality feature. Increasing this value will result in a lower quality video. Note that this parameter might not work if the transcoder lacks support for it. + Restricts the size of the output video using the constant quality + feature. Increasing this value will result in a lower quality video. + Note that this parameter might not work if the transcoder lacks + support for it. minimum: 0 maximum: 44 @@ -193,7 +199,8 @@ components: createdAt: type: number readOnly: true - description: Timestamp (in milliseconds) at which stream object was created + description: + Timestamp (in milliseconds) at which stream object was created example: 1587667174725 events: type: array @@ -251,7 +258,8 @@ components: type: number readOnly: true example: 1587667174725 - description: Timestamp (in milliseconds) at which the webhook last failed + description: + Timestamp (in milliseconds) at which the webhook last failed error: readOnly: true type: string @@ -354,17 +362,24 @@ components: playbackId: type: string description: >- - The playback ID of the stream or stream recording to clip. Asset playback IDs are not supported yet. + The playback ID of the stream or stream recording to clip. Asset + playback IDs are not supported yet. example: eaw4nk06ts2d0mzb startTime: type: number description: >- - The start timestamp of the clip in Unix milliseconds. _See the ClipTrigger in the UI Kit for an example of how this is calculated (for HLS, it uses `Program Date-Time` tags, and for WebRTC, it uses the latency from server to client at stream startup)._ + The start timestamp of the clip in Unix milliseconds. _See the + ClipTrigger in the UI Kit for an example of how this is calculated + (for HLS, it uses `Program Date-Time` tags, and for WebRTC, it uses + the latency from server to client at stream startup)._ example: 1587667174725 endTime: type: number description: >- - The end timestamp of the clip in Unix milliseconds. _See the ClipTrigger in the UI Kit for an example of how this is calculated (for HLS, it uses `Program Date-Time` tags, and for WebRTC, it uses the latency from server to client at stream startup)._ + The end timestamp of the clip in Unix milliseconds. _See the + ClipTrigger in the UI Kit for an example of how this is calculated + (for HLS, it uses `Program Date-Time` tags, and for WebRTC, it uses + the latency from server to client at stream startup)._ example: 1587667174725 name: type: string @@ -373,7 +388,9 @@ components: sessionId: type: string description: >- - The optional session ID of the stream to clip. This can be used to clip _recordings_ - if it is not specified, it will clip the ongoing livestream. + The optional session ID of the stream to clip. This can be used to + clip _recordings_ - if it is not specified, it will clip the ongoing + livestream. example: de7818e7-610a-4057-8f6f-b785dc1e6f88 target: type: object @@ -432,7 +449,8 @@ components: createdAt: type: number readOnly: true - description: Timestamp (in milliseconds) at which stream object was created + description: + Timestamp (in milliseconds) at which stream object was created example: 1587667174725 stream: type: object @@ -512,7 +530,8 @@ components: createdAt: type: number readOnly: true - description: Timestamp (in milliseconds) at which stream object was created + description: + Timestamp (in milliseconds) at which stream object was created example: 1587667174725 parentId: type: string @@ -585,7 +604,9 @@ components: playbackId: type: string example: eaw4nk06ts2d0mzb - description: The playback ID to use with the Playback Info endpoint to retrieve playback URLs. + description: + The playback ID to use with the Playback Info endpoint to retrieve + playback URLs. playbackPolicy: $ref: "#/components/schemas/playback-policy" profiles: @@ -664,7 +685,8 @@ components: - number - "null" example: 1713281212993 - description: Timestamp (in milliseconds) when the stream was last terminated + description: + Timestamp (in milliseconds) when the stream was last terminated userId: type: string readOnly: true @@ -789,7 +811,9 @@ components: - type: "array" items: type: "string" - description: "A string array of human-readable errors describing issues affecting the stream, if any." + description: + "A string array of human-readable errors describing issues affecting + the stream, if any." tracks: type: object description: | @@ -806,7 +830,8 @@ components: description: "The bitrate of the track, in kilobits per second." keys: type: object - description: An object containing additional track-specific metrics. + description: + An object containing additional track-specific metrics. additionalProperties: type: number fpks: @@ -890,7 +915,8 @@ components: createdAt: readOnly: true type: number - description: Timestamp (in milliseconds) at which stream object was created + description: + Timestamp (in milliseconds) at which stream object was created example: 1587667174725 parentId: type: string @@ -902,14 +928,16 @@ components: example: aac12556-4d65-4d34-9fb6-d1f0985eb0a9 record: description: > - Whether the stream should be recorded. Uses default settings. For more customization, create and configure an object store. + Whether the stream should be recorded. Uses default settings. For + more customization, create and configure an object store. type: boolean example: false recordingStatus: readOnly: true type: string - description: The status of the recording process of this stream session. + description: + The status of the recording process of this stream session. enum: - waiting - ready @@ -923,12 +951,14 @@ components: mp4Url: type: string readOnly: true - description: The URL for the stream session recording packaged in an MP4. + description: + The URL for the stream session recording packaged in an MP4. playbackId: type: string example: eaw4nk06ts2d0mzb description: >- - The playback ID to use with the Playback Info endpoint to retrieve playback URLs. + The playback ID to use with the Playback Info endpoint to retrieve + playback URLs. profiles: $ref: "#/components/schemas/stream/properties/profiles" recordingSpec: @@ -964,7 +994,8 @@ components: url: type: string writeOnly: true - description: Livepeer-compatible multistream target URL (RTMP(S) or SRT) + description: + Livepeer-compatible multistream target URL (RTMP(S) or SRT) example: "rtmps://live.my-service.tv/channel/secretKey" format: uri pattern: "^(srt|rtmps?)://" @@ -1003,7 +1034,8 @@ components: type: string example: eaw4nk06ts2d0mzb description: >- - The playback ID to use with the Playback Info endpoint to retrieve playback URLs. + The playback ID to use with the Playback Info endpoint to retrieve + playback URLs. userId: type: string readOnly: true @@ -1019,13 +1051,18 @@ components: example: >- https://livepeercdn.com/asset/ea03f37e-f861-4cdd-b495-0e60b6d753ad/index.m3u8 description: >- - URL for HLS playback. **It is recommended to not use this URL**, and instead use playback IDs with the Playback Info endpoint to retrieve the playback URLs - this URL format is subject to change (e.g. https://livepeercdn.com/asset/ea03f37e-f861-4cdd-b495-0e60b6d753ad/index.m3u8). + URL for HLS playback. **It is recommended to not use this URL**, and + instead use playback IDs with the Playback Info endpoint to retrieve + the playback URLs - this URL format is subject to change (e.g. + https://livepeercdn.com/asset/ea03f37e-f861-4cdd-b495-0e60b6d753ad/index.m3u8). downloadUrl: readOnly: true type: string example: "https://livepeercdn.com/asset/eaw4nk06ts2d0mzb/video/download.mp4" description: >- - The URL to directly download the asset, e.g. `https://livepeercdn.com/asset/eawrrk06ts2d0mzb/video`. It is not recommended to use this for playback. + The URL to directly download the asset, e.g. + `https://livepeercdn.com/asset/eawrrk06ts2d0mzb/video`. It is not + recommended to use this for playback. playbackPolicy: $ref: "#/components/schemas/playback-policy" source: @@ -1045,7 +1082,8 @@ components: gatewayUrl: type: string description: >- - Gateway URL from asset if parsed from provided URL on upload. + Gateway URL from asset if parsed from provided URL on + upload. encryption: $ref: "#/components/schemas/new-asset-payload/properties/encryption" - additionalProperties: false @@ -1059,7 +1097,8 @@ components: - recording sessionId: type: string - description: ID of the session from which this asset was created + description: + ID of the session from which this asset was created - additionalProperties: false required: - type @@ -1073,20 +1112,25 @@ components: $ref: "#/components/schemas/new-asset-payload/properties/encryption" sourceId: type: string - description: ID of the asset or stream from which this asset was created. + description: + ID of the asset or stream from which this asset was created. sessionId: type: string - description: ID of the session from which this asset was created. + description: + ID of the session from which this asset was created. playbackId: type: string description: >- - Playback ID of the asset or stream from which this asset was created. + Playback ID of the asset or stream from which this asset was + created. requesterId: type: string - description: ID of the requester from which this asset was created. + description: + ID of the requester from which this asset was created. assetId: type: string - description: ID of the asset from which this asset was created. + description: + ID of the asset from which this asset was created. creatorId: $ref: "#/components/schemas/creator-id" profiles: @@ -1162,7 +1206,8 @@ components: - deleted updatedAt: type: number - description: Timestamp (in milliseconds) at which the asset was last updated + description: + Timestamp (in milliseconds) at which the asset was last updated example: 1587667174725 progress: type: number @@ -1173,7 +1218,8 @@ components: name: type: string description: > - The name of the asset. This is not necessarily the filename - it can be a custom name or title. + The name of the asset. This is not necessarily the filename - it can + be a custom name or title. example: filename.mp4 projectId: @@ -1275,7 +1321,8 @@ components: example: 1080 pixelFormat: type: string - description: Pixel format of the track - only for video tracks + description: + Pixel format of the track - only for video tracks example: yuv420p fps: type: number @@ -1329,7 +1376,8 @@ components: name: type: string description: > - The name of the asset. This is not necessarily the filename - it can be a custom name or title. + The name of the asset. This is not necessarily the filename - it can + be a custom name or title. example: filename.mp4 staticMp4: @@ -1370,7 +1418,8 @@ components: type: string writeOnly: true description: >- - Encryption key used to encrypt the asset. Only writable in the upload asset endpoints and cannot be retrieved back. + Encryption key used to encrypt the asset. Only writable in the + upload asset endpoints and cannot be retrieved back. c2pa: type: boolean description: Decides if the output video should include C2PA signature @@ -1382,7 +1431,8 @@ components: $ref: "#/components/schemas/transcode-profile" targetSegmentSizeSecs: type: number - description: How many seconds the duration of each output segment should be + description: + How many seconds the duration of each output segment should be room-user-payload: type: object required: @@ -1399,7 +1449,8 @@ components: example: true canPublishData: type: boolean - description: Whether a user is allowed to publish data messages to the room + description: + Whether a user is allowed to publish data messages to the room example: true metadata: type: string @@ -1421,12 +1472,14 @@ components: joinUrl: type: string description: >- - Joining URL - use this for Livepeer's default meeting app (see the multiparticipant streaming guide for more info). + Joining URL - use this for Livepeer's default meeting app (see the + multiparticipant streaming guide for more info). example: "https://meet.livepeer.chat" token: type: string description: >- - Joining JWT - this can be used if you have a custom meeting app (see the multiparticipant streaming guide for more info). + Joining JWT - this can be used if you have a custom meeting app (see + the multiparticipant streaming guide for more info). example: token get-room-user-response: type: object @@ -1470,12 +1523,14 @@ components: canPublish: type: boolean description: >- - Whether a user is allowed to publish audio/video tracks (i.e. their microphone and webcam) + Whether a user is allowed to publish audio/video tracks (i.e. their + microphone and webcam) example: true default: true canPublishData: type: boolean - description: Whether a user is allowed to publish data messages to the room + description: + Whether a user is allowed to publish data messages to the room example: true default: true metadata: @@ -1566,7 +1621,10 @@ components: format: uri pattern: "^http(s)?://" description: >- - Service endpoint URL (AWS S3 endpoint list: https://docs.aws.amazon.com/general/latest/gr/s3.html, GCP S3 endpoint: https://storage.googleapis.com, Storj: https://gateway.storjshare.io) + Service endpoint URL (AWS S3 endpoint list: + https://docs.aws.amazon.com/general/latest/gr/s3.html, GCP + S3 endpoint: https://storage.googleapis.com, Storj: + https://gateway.storjshare.io) example: "https://gateway.storjshare.io" bucket: type: string @@ -1613,7 +1671,10 @@ components: format: uri pattern: "^http(s)?://" description: >- - Service endpoint URL (AWS S3 endpoint list: https://docs.aws.amazon.com/general/latest/gr/s3.html, GCP S3 endpoint: https://storage.googleapis.com, Storj: https://gateway.storjshare.io) + Service endpoint URL (AWS S3 endpoint list: + https://docs.aws.amazon.com/general/latest/gr/s3.html, GCP + S3 endpoint: https://storage.googleapis.com, Storj: + https://gateway.storjshare.io) example: "https://gateway.storjshare.io" bucket: type: string @@ -1705,7 +1766,8 @@ components: $ref: "#/components/schemas/transcode-profile" targetSegmentSizeSecs: type: number - description: How many seconds the duration of each output segment should be + description: + How many seconds the duration of each output segment should be creatorId: $ref: "#/components/schemas/input-creator-id" c2pa: @@ -1776,7 +1838,8 @@ components: $ref: "#/components/schemas/new-asset-payload/properties/encryption" c2pa: type: boolean - description: Decides if the output video should include C2PA signature + description: + Decides if the output video should include C2PA signature example: true profiles: type: array @@ -1785,7 +1848,8 @@ components: targetSegmentSizeSecs: type: number description: >- - How many seconds the duration of each output segment should be + How many seconds the duration of each output segment should + be example: 6 export: $ref: "#/components/schemas/export-task-params" @@ -1875,7 +1939,8 @@ components: $ref: "#/components/schemas/input-creator-id" c2pa: type: boolean - description: Decides if the output video should include C2PA signature + description: + Decides if the output video should include C2PA signature example: false clip: properties: @@ -1887,7 +1952,10 @@ components: clipStrategy: type: object description: >- - Strategy to use for clipping the asset. If not specified, the default strategy that Catalyst is configured for will be used. This field only available for admin users, and is only used for E2E testing. + Strategy to use for clipping the asset. If not specified, + the default strategy that Catalyst is configured for will be + used. This field only available for admin users, and is only + used for E2E testing. additionalProperties: false properties: startTime: @@ -1899,7 +1967,10 @@ components: catalystPipelineStrategy: type: string description: >- - Force to use a specific strategy in the Catalyst pipeline. If not specified, the default strategy that Catalyst is configured for will be used. This field only available for admin users, and is only used for E2E testing. + Force to use a specific strategy in the Catalyst pipeline. + If not specified, the default strategy that Catalyst is + configured for will be used. This field only available for + admin users, and is only used for E2E testing. enum: - catalyst - catalyst_ffmpeg @@ -2000,16 +2071,19 @@ components: videoFileGatewayUrl: readOnly: true type: string - description: URL to access file via HTTP through an IPFS gateway + description: + URL to access file via HTTP through an IPFS gateway example: "https://gateway.ipfs.io/ipfs/Qmabc123xyz341" nftMetadataCid: type: string - description: IPFS CID of the default metadata exported for the video + description: + IPFS CID of the default metadata exported for the video example: Qmabc123xyz341 nftMetadataUrl: readOnly: true type: string - description: URL for the metadata file with the IPFS protocol + description: + URL for the metadata file with the IPFS protocol example: "ipfs://Qmabc123xyz341" nftMetadataGatewayUrl: readOnly: true @@ -2049,7 +2123,8 @@ components: type: string - type: string description: >- - Helper syntax to specify an unverified creator ID, fully managed by the developer. + Helper syntax to specify an unverified creator ID, fully managed by + the developer. creator-id: oneOf: - type: object @@ -2065,7 +2140,8 @@ components: example: "unverified" value: type: string - description: Developer-managed ID of the user who created the resource. + description: + Developer-managed ID of the user who created the resource. example: "user123" export-task-params: description: Parameters for the export task @@ -2128,12 +2204,14 @@ components: createdAt: readOnly: true type: number - description: Timestamp (in milliseconds) at which the signing-key was created + description: + Timestamp (in milliseconds) at which the signing-key was created example: 1587667174725 lastSeen: readOnly: true type: number - description: Timestamp (in milliseconds) at which the signing-key was last used + description: + Timestamp (in milliseconds) at which the signing-key was last used example: 1587667174725 publicKey: type: string @@ -2201,15 +2279,18 @@ components: example: 1234 createdAt: type: number - description: Timestamp (in milliseconds) at which user object was created + description: + Timestamp (in milliseconds) at which user object was created example: 1587667174725 verifiedAt: type: number - description: Timestamp (in milliseconds) at which user object was verified + description: + Timestamp (in milliseconds) at which user object was verified example: 1587667174725 planChangedAt: type: number - description: Timestamp (in milliseconds) at which user object was verified + description: + Timestamp (in milliseconds) at which user object was verified example: 1587667174725 lastStreamedAt: type: number @@ -2219,7 +2300,8 @@ components: example: 1587667174725 lastSeen: type: number - description: Timestamp (in milliseconds) at which user's password was used + description: + Timestamp (in milliseconds) at which user's password was used example: 1587667174725 usage: type: object @@ -2252,7 +2334,8 @@ components: type: - object - "null" - description: Whether the playback policy for an asset or stream is public or signed + description: + Whether the playback policy for an asset or stream is public or signed additionalProperties: false required: - type @@ -2272,7 +2355,7 @@ components: type: object description: User-defined webhook context additionalProperties: true - example: {"streamerId": "my-custom-id"} + example: { "streamerId": "my-custom-id" } refreshInterval: type: number description: | @@ -2281,7 +2364,9 @@ components: example: 600 allowedOrigins: type: array - description: List of allowed origins for CORS playback (://:, ://) + description: + List of allowed origins for CORS playback + (://:, ://) items: type: string usage-metric: @@ -2417,7 +2502,8 @@ components: example: America/Los_Angeles geohash: type: string - description: Geographic encoding of the viewers location. Accurate to 3 digits. + description: + Geographic encoding of the viewers location. Accurate to 3 digits. example: 123 viewCount: type: integer @@ -2624,7 +2710,8 @@ components: createdAt: type: number readOnly: true - description: Timestamp (in milliseconds) at which the object was created + description: + Timestamp (in milliseconds) at which the object was created example: 1587667174725 signatureType: type: string @@ -2644,7 +2731,8 @@ components: readOnly: true type: number description: > - Timestamp (in milliseconds) at which IPFS export task was updated + Timestamp (in milliseconds) at which IPFS export task was + updated example: 1587667174725 status: @@ -2685,7 +2773,8 @@ components: apiSecret: type: string writeOnly: true - description: Will be added to the pinata_secret_api_key header. + description: + Will be added to the pinata_secret_api_key header. example: 1234567890abcdef storage-status: readOnly: true @@ -2753,9 +2842,11 @@ components: format: uri pattern: "^(https?|ipfs|ar)://" description: > - URL where the asset contents can be retrieved, e.g. `https://s3.amazonaws.com/my-bucket/path/filename.mp4`. + URL where the asset contents can be retrieved, e.g. + `https://s3.amazonaws.com/my-bucket/path/filename.mp4`. - For an IPFS source, this should be similar to: `ipfs://{CID}`. For an Arweave + For an IPFS source, this should be similar to: `ipfs://{CID}`. For + an Arweave source: `ar://{CID}`. @@ -2796,8 +2887,11 @@ components: type: string title: Return Timestamps description: >- - Return timestamps for the transcribed text. Supported values: 'sentence', 'word', or a string boolean ('true' or 'false'). Default is 'true' ('sentence'). 'false' means no timestamps. 'word' means word-based timestamps. - default: 'true' + Return timestamps for the transcribed text. Supported values: + 'sentence', 'word', or a string boolean ('true' or 'false'). Default + is 'true' ('sentence'). 'false' means no timestamps. 'word' means + word-based timestamps. + default: "true" type: object required: - audio @@ -2823,36 +2917,43 @@ components: type: string title: Loras description: >- - A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}. - default: '' + A LoRA (Low-Rank Adaptation) model and its corresponding weight for + image generation. Example: { "latent-consistency/lcm-lora-sdxl": + 1.0, "nerijs/pixel-art-xl": 1.2}. + default: "" strength: type: number title: Strength - description: Degree of transformation applied to the reference image (0 to 1). + description: + Degree of transformation applied to the reference image (0 to 1). default: 0.8 guidance_scale: type: number title: Guidance Scale description: >- - Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). + Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). default: 7.5 image_guidance_scale: type: number title: Image Guidance Scale description: >- - Degree to which the generated image is pushed towards the initial image. + Degree to which the generated image is pushed towards the initial + image. default: 1.5 negative_prompt: type: string title: Negative Prompt description: >- - Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. - default: '' + Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: "" safety_check: type: boolean title: Safety Check description: >- - Perform a safety check to estimate if generated images could be offensive or harmful. + Perform a safety check to estimate if generated images could be + offensive or harmful. default: true seed: type: integer @@ -2862,7 +2963,8 @@ components: type: integer title: Num Inference Steps description: >- - Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. default: 100 num_images_per_prompt: type: integer @@ -2906,19 +3008,22 @@ components: type: integer title: Motion Bucket Id description: >- - Used for conditioning the amount of motion for the generation. The higher the number the more motion will be in the video. + Used for conditioning the amount of motion for the generation. The + higher the number the more motion will be in the video. default: 127 noise_aug_strength: type: number title: Noise Aug Strength description: >- - Amount of noise added to the conditioning image. Higher values reduce resemblance to the conditioning image and increase motion. + Amount of noise added to the conditioning image. Higher values + reduce resemblance to the conditioning image and increase motion. default: 0.02 safety_check: type: boolean title: Safety Check description: >- - Perform a safety check to estimate if generated images could be offensive or harmful. + Perform a safety check to estimate if generated images could be + offensive or harmful. default: true seed: type: integer @@ -2928,7 +3033,8 @@ components: type: integer title: Num Inference Steps description: >- - Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. default: 25 type: object required: @@ -2947,7 +3053,7 @@ components: system_msg: type: string title: System Msg - default: '' + default: "" temperature: type: number title: Temperature @@ -2959,7 +3065,7 @@ components: history: type: string title: History - default: '[]' + default: "[]" stream: type: boolean title: Stream @@ -2985,38 +3091,47 @@ components: type: string title: Point Coords description: >- - Nx2 array of point prompts to the model, where each point is in (X,Y) in pixels. + Nx2 array of point prompts to the model, where each point is in + (X,Y) in pixels. point_labels: type: string title: Point Labels description: >- - Labels for the point prompts, where 1 indicates a foreground point and 0 indicates a background point. + Labels for the point prompts, where 1 indicates a foreground point + and 0 indicates a background point. box: type: string title: Box - description: 'A length 4 array given as a box prompt to the model, in XYXY format.' + description: + "A length 4 array given as a box prompt to the model, in XYXY + format." mask_input: type: string title: Mask Input description: >- - A low-resolution mask input to the model, typically from a previous prediction iteration, with the form 1xHxW (H=W=256 for SAM). + A low-resolution mask input to the model, typically from a previous + prediction iteration, with the form 1xHxW (H=W=256 for SAM). multimask_output: type: boolean title: Multimask Output description: >- - If true, the model will return three masks for ambiguous input prompts, often producing better masks than a single prediction. + If true, the model will return three masks for ambiguous input + prompts, often producing better masks than a single prediction. default: true return_logits: type: boolean title: Return Logits description: >- - If true, returns un-thresholded mask logits instead of a binary mask. + If true, returns un-thresholded mask logits instead of a binary + mask. default: true normalize_coords: type: boolean title: Normalize Coords description: >- - If true, the point coordinates will be normalized to the range [0,1], with point_coords expected to be with respect to image dimensions. + If true, the point coordinates will be normalized to the range + [0,1], with point_coords expected to be with respect to image + dimensions. default: true type: object required: @@ -3043,7 +3158,8 @@ components: type: boolean title: Safety Check description: >- - Perform a safety check to estimate if generated images could be offensive or harmful. + Perform a safety check to estimate if generated images could be + offensive or harmful. default: true seed: type: integer @@ -3053,7 +3169,8 @@ components: type: integer title: Num Inference Steps description: >- - Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. default: 75 type: object required: @@ -3082,7 +3199,7 @@ components: properties: detail: allOf: - - $ref: '#/components/schemas/APIError' + - $ref: "#/components/schemas/APIError" description: Detailed error information. type: object required: @@ -3093,7 +3210,7 @@ components: properties: detail: items: - $ref: '#/components/schemas/ValidationError' + $ref: "#/components/schemas/ValidationError" type: array title: Detail type: object @@ -3102,7 +3219,7 @@ components: properties: images: items: - $ref: '#/components/schemas/Media' + $ref: "#/components/schemas/Media" type: array title: Images description: The generated images. @@ -3137,7 +3254,8 @@ components: logits: type: string title: Logits - description: 'The raw, unnormalized predictions (logits) for the masks.' + description: + "The raw, unnormalized predictions (logits) for the masks." type: object required: - masks @@ -3165,7 +3283,8 @@ components: - seed - nsfw title: Media - description: A media object containing information about the generated media. + description: + A media object containing information about the generated media. TextResponse: properties: text: @@ -3174,7 +3293,7 @@ components: description: The generated text. chunks: items: - $ref: '#/components/schemas/Chunk' + $ref: "#/components/schemas/Chunk" type: array title: Chunks description: The generated text chunks. @@ -3195,13 +3314,16 @@ components: type: string title: Loras description: >- - A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}. - default: '' + A LoRA (Low-Rank Adaptation) model and its corresponding weight for + image generation. Example: { "latent-consistency/lcm-lora-sdxl": + 1.0, "nerijs/pixel-art-xl": 1.2}. + default: "" prompt: type: string title: Prompt description: >- - Text prompt(s) to guide image generation. Separate multiple prompts with '|' if supported by the model. + Text prompt(s) to guide image generation. Separate multiple prompts + with '|' if supported by the model. height: type: integer title: Height @@ -3216,19 +3338,22 @@ components: type: number title: Guidance Scale description: >- - Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). + Encourages model to generate images closely linked to the text + prompt (higher values may reduce image quality). default: 7.5 negative_prompt: type: string title: Negative Prompt description: >- - Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. - default: '' + Text prompt(s) to guide what to exclude from image generation. + Ignored if guidance_scale < 1. + default: "" safety_check: type: boolean title: Safety Check description: >- - Perform a safety check to estimate if generated images could be offensive or harmful. + Perform a safety check to estimate if generated images could be + offensive or harmful. default: true seed: type: integer @@ -3238,7 +3363,8 @@ components: type: integer title: Num Inference Steps description: >- - Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. + Number of denoising steps. More steps usually lead to higher quality + images but slower inference. Modulated by strength. default: 50 num_images_per_prompt: type: integer @@ -3275,7 +3401,7 @@ components: properties: images: items: - $ref: '#/components/schemas/Media' + $ref: "#/components/schemas/Media" type: array title: Images description: The generated images. @@ -4800,7 +4926,9 @@ paths: tags: - webhook description: > - To create a new webhook, you need to make an API call with the events you want to listen for and the URL that will be called when those events occur. + To create a new webhook, you need to make an API call with the events + you want to listen for and the URL that will be called when those events + occur. requestBody: required: true @@ -5657,13 +5785,19 @@ paths: url: type: string description: >- - The direct upload endpoint for which supports PUT requests. **It is recommended to use the Tus endpoint for a better upload experience.** + The direct upload endpoint for which supports PUT + requests. **It is recommended to use the Tus endpoint for + a better upload experience.** example: >- https://origin.livepeer.com/api/asset/upload/direct?token=eyJhbGciOiJIUzI1NiJ9.eyJtc2ciOiJoZWxsbyBoYWNrZXIsIHRoZXJlJ3Mgbm90aGluZyBmb3IgeW91IGhlcmUg8J-YhiJ9.1YDjmXsqLcgNyMSzT4kXl_kIni46_EuGX_xfqmC7e0Q tusEndpoint: type: string description: >- - The [Tus-compatible](https://tus.io/) endpoint for resumable uploads. **This is the recommended way to upload assets.** See the [Tus-js](https://github.com/tus/tus-js-client) client for more information. + The [Tus-compatible](https://tus.io/) endpoint for + resumable uploads. **This is the recommended way to upload + assets.** See the + [Tus-js](https://github.com/tus/tus-js-client) client for + more information. example: >- https://origin.livepeer.com/api/asset/upload/tus?token=eyJhbGciOiJIUzI1NiJ9.eyJtc2ciOiJoZWxsbyBoYWNrZXIsIHRoZXJlJ3Mgbm90aGluZyBmb3IgeW91IGhlcmUg8J-YhiJ9.1YDjmXsqLcgNyMSzT4kXl_kIni46_EuGX_xfqmC7e0Q asset: @@ -7020,7 +7154,8 @@ paths: description: > Create a livestream for your room. - This allows you to leverage livestreaming features like recording and HLS output. + This allows you to leverage livestreaming features like recording and + HLS output. responses: default: @@ -7159,9 +7294,11 @@ paths: type: string summary: Create a room user description: > - Call this endpoint to add a user to a room, specifying a display name at a minimum. + Call this endpoint to add a user to a room, specifying a display name at + a minimum. - The response will contain a joining URL for Livepeer's default meeting app. + The response will contain a joining URL for Livepeer's default meeting + app. Alternatively the joining token can be used with a custom app. @@ -7725,7 +7862,9 @@ paths: tags: - metrics description: > - Requires a proof of ownership to be sent in the request, which for now is just the assetId or streamId parameters (1 of those must be in the query-string). + Requires a proof of ownership to be sent in the request, which for now + is just the assetId or streamId parameters (1 of those must be in the + query-string). parameters: - name: from @@ -7894,7 +8033,8 @@ paths: schema: $ref: "#/components/schemas/error" "200": - description: A single Metric object with the viewCount and playtimeMins metrics. + description: + A single Metric object with the viewCount and playtimeMins metrics. content: application/json: schema: @@ -8363,9 +8503,14 @@ paths: tags: - accessControl description: > - The publicKey is a representation of the public key, encoded as base 64 and is passed as a string, and the privateKey is displayed only on creation. This is the only moment where the client can save the private key, otherwise it will be lost. Remember to decode your string when signing JWTs. + The publicKey is a representation of the public key, encoded as base 64 + and is passed as a string, and the privateKey is displayed only on + creation. This is the only moment where the client can save the private + key, otherwise it will be lost. Remember to decode your string when + signing JWTs. - Up to 10 signing keys can be generated, after that you must delete at least one signing key to create a new one. + Up to 10 signing keys can be generated, after that you must delete at + least one signing key to create a new one. responses: default: @@ -9276,7 +9421,8 @@ paths: schema: type: string description: >- - The playback ID from the asset or livestream, e.g. `eaw4nk06ts2d0mzb`. + The playback ID from the asset or livestream, e.g. + `eaw4nk06ts2d0mzb`. responses: default: description: Error @@ -9365,7 +9511,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/TextToImageParams' + $ref: "#/components/schemas/TextToImageParams" required: true responses: default: @@ -9373,46 +9519,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/ImageResponse' + $ref: "#/components/schemas/ImageResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: textToImage x-codeSamples: - lang: typescript @@ -9445,7 +9591,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genImageToImage' + $ref: "#/components/schemas/Body_genImageToImage" required: true responses: default: @@ -9453,46 +9599,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/ImageResponse' + $ref: "#/components/schemas/ImageResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: imageToImage x-codeSamples: - lang: typescript @@ -9527,7 +9673,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genImageToVideo' + $ref: "#/components/schemas/Body_genImageToVideo" required: true responses: default: @@ -9535,46 +9681,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/VideoResponse' + $ref: "#/components/schemas/VideoResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: imageToVideo x-codeSamples: - lang: typescript @@ -9608,7 +9754,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genUpscale' + $ref: "#/components/schemas/Body_genUpscale" required: true responses: default: @@ -9616,46 +9762,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/ImageResponse' + $ref: "#/components/schemas/ImageResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: upscale x-codeSamples: - lang: typescript @@ -9690,7 +9836,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genAudioToText' + $ref: "#/components/schemas/Body_genAudioToText" required: true responses: default: @@ -9698,62 +9844,62 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/TextResponse' + $ref: "#/components/schemas/TextResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '413': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "413": description: Request Entity Too Large content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '415': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "415": description: Unsupported Media Type content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: audioToText x-codeSamples: - lang: typescript @@ -9787,7 +9933,7 @@ paths: content: multipart/form-data: schema: - $ref: '#/components/schemas/Body_genSegmentAnything2' + $ref: "#/components/schemas/Body_genSegmentAnything2" required: true responses: default: @@ -9795,46 +9941,46 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/MasksResponse' + $ref: "#/components/schemas/MasksResponse" x-speakeasy-name-override: data - '400': + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: segmentAnything2 x-codeSamples: - lang: typescript @@ -9868,7 +10014,7 @@ paths: content: application/x-www-form-urlencoded: schema: - $ref: '#/components/schemas/Body_genLLM' + $ref: "#/components/schemas/Body_genLLM" required: true responses: default: @@ -9876,45 +10022,45 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/studio-api-error' - '200': + $ref: "#/components/schemas/studio-api-error" + "200": description: Successful Response content: application/json: schema: - $ref: '#/components/schemas/LLMResponse' - '400': + $ref: "#/components/schemas/LLMResponse" + "400": description: Bad Request content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '401': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "401": description: Unauthorized content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' - '422': + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" + "422": description: Validation Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPValidationError' - - $ref: '#/components/schemas/studio-api-error' - '500': + - $ref: "#/components/schemas/HTTPValidationError" + - $ref: "#/components/schemas/studio-api-error" + "500": description: Internal Server Error content: application/json: schema: oneOf: - - $ref: '#/components/schemas/HTTPError' - - $ref: '#/components/schemas/studio-api-error' + - $ref: "#/components/schemas/HTTPError" + - $ref: "#/components/schemas/studio-api-error" x-speakeasy-name-override: llm x-codeSamples: - lang: typescript diff --git a/snippets/api-base-urls-table.mdx b/snippets/api-base-urls-table.mdx new file mode 100644 index 00000000..ad68492e --- /dev/null +++ b/snippets/api-base-urls-table.mdx @@ -0,0 +1,59 @@ +{/_ API Base URLs Table Snippet Usage: Include this in API landing pages for +styled base URL tables Pass props: urls (array of {name, url} objects) _/} + +export const ApiBaseUrlsTable = ({ urls }) => ( +
+ + + + + + + + + {urls.map((item, index) => ( + + + + + ))} + +
+ Environment + + URL +
+ {item.name} + + {item.url} +
+
+); diff --git a/snippets/apiSpecs/README.md b/snippets/apiSpecs/README.md new file mode 100644 index 00000000..5ead7ff5 --- /dev/null +++ b/snippets/apiSpecs/README.md @@ -0,0 +1,84 @@ +# ELI5: Generate API Docs in Mintlify + +Follow these super simple steps to turn an OpenAPI spec (YAML/JSON) into +Mintlify docs. + +## 1) Install Mintlify (first time only) + +```bash +npm i -g mintlify +``` + +## 2) Pick your OpenAPI spec and output folder + +- Spec file: for example `ai/worker/api/openapi.yaml` +- Output folder: where the generated MDX pages go, e.g. + `v2/pages/04_gateways/guides-references/api-reference/CLI-HTTP` +- Title: a friendly name shown in nav, e.g. `"CLI HTTP"` + +## 3) Run the generator script + +From the repo root: + +```bash +./v2/scripts/generate-api-docs.sh ai/worker/api/openapi.yaml v2/pages/04_gateways/guides-references/api-reference/CLI-HTTP "CLI HTTP" +``` + +Examples: + +```bash +# AI API example +./v2/scripts/generate-api-docs.sh ai/worker/api/openapi.yaml v2/pages/04_gateways/guides-references/api-reference/AI-API "AI API" + +# CLI HTTP example +./v2/scripts/generate-api-docs.sh ai/worker/api/openapi.yaml v2/pages/04_gateways/guides-references/api-reference/CLI-HTTP "CLI HTTP" +``` + +## 4) What gets created + +- MDX pages inside your chosen output folder +- A navigation snippet for `docs.json` (list of page paths as strings) + +## 5) Add the pages to `docs.json` + +Open [docs.json](docs.json) and include the generated pages under the right +group. Important: each item inside `pages` must be a string path. + +Example: + +```json +{ + "group": "CLI HTTP API", + "pages": [ + "v2/pages/04_gateways/guides-references/api-reference/CLI-HTTP/overview", + "v2/pages/04_gateways/guides-references/api-reference/CLI-HTTP/reference" + ] +} +``` + +## 6) Preview locally + +```bash +mint dev +``` + +Open the local preview and click into the new group to see the generated API +docs. + +## 7) Troubleshooting (ELI5) + +- Error: "Incorrect type. Expected string": make sure every entry in `pages` is + a string path (no objects). +- Pages not showing: double-check the output folder path matches what you put in + `docs.json`. +- Need to regenerate: rerun the script after updating your OpenAPI spec. + +## 8) Optional: Build via Docker or Makefile + +```bash +# Docker build (amd64) +docker buildx build --platform linux/amd64 --load -t livepeer/docs . + +# Makefile build +make all +``` diff --git a/snippets/apiSpecs/cli-http-api.yaml b/snippets/apiSpecs/cli-http-api.yaml new file mode 100644 index 00000000..b6fcf5ed --- /dev/null +++ b/snippets/apiSpecs/cli-http-api.yaml @@ -0,0 +1,432 @@ +openapi: 3.0.3 +info: + title: Livepeer CLI Local HTTP API + description: | + Local control-plane HTTP endpoints exposed by the Livepeer node. + + The CLI HTTP API provides programmatic access to node management functions + including status monitoring, staking operations, orchestrator configuration, + and broadcast settings. + + **Default Ports:** + - Gateway: http://localhost:5935 + - Orchestrator: http://localhost:7935 + version: "1.0.0" + contact: + name: Livepeer Documentation + url: https://docs.livepeer.org + +servers: + - url: http://localhost:5935 + description: Gateway CLI API (default) + - url: http://localhost:7935 + description: Orchestrator CLI API + +tags: + - name: Status + description: Node status and information + - name: Staking + description: Token bonding and delegation operations + - name: Orchestrator + description: Orchestrator configuration and management + - name: Gateway + description: Gateway/broadcaster configuration + - name: Ethereum + description: Ethereum operations and token transfers + +paths: + /status: + get: + tags: + - Status + summary: Get node status + description: Display node information, balances, and configuration + operationId: getStatus + responses: + "200": + description: Node status retrieved successfully + content: + application/json: + schema: + $ref: "#/components/schemas/NodeStatus" + + /protocolParameters: + get: + tags: + - Status + summary: View protocol parameters + description: Show protocol state and parameters + operationId: getProtocolParameters + responses: + "200": + description: Protocol parameters retrieved successfully + content: + application/json: + schema: + $ref: "#/components/schemas/ProtocolParameters" + + /registeredOrchestrators: + get: + tags: + - Status + summary: List registered orchestrators + description: Display available orchestrators on the network + operationId: getRegisteredOrchestrators + responses: + "200": + description: Orchestrator list retrieved successfully + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Orchestrator" + + /bond: + post: + tags: + - Staking + summary: Bond tokens to an orchestrator + description: Delegate LPT tokens to an orchestrator + operationId: bondTokens + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - amount + - to + properties: + amount: + type: string + description: Amount of LPT to bond (in wei) + example: "1000000000000000000" + to: + type: string + description: Orchestrator address to bond to + example: "0x1234567890abcdef1234567890abcdef12345678" + responses: + "200": + description: Bond successful + content: + application/json: + schema: + $ref: "#/components/schemas/TransactionResponse" + + /unbond: + post: + tags: + - Staking + summary: Unbond tokens + description: Remove delegated tokens (starts unbonding period) + operationId: unbondTokens + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - amount + properties: + amount: + type: string + description: Amount of LPT to unbond (in wei) + example: "1000000000000000000" + responses: + "200": + description: Unbond initiated successfully + content: + application/json: + schema: + $ref: "#/components/schemas/TransactionResponse" + + /rebond: + post: + tags: + - Staking + + summary: Rebond tokens + description: Rebond tokens in unbonding state + operationId: rebondTokens + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + unbondingLockId: + type: integer + description: ID of the unbonding lock to rebond + example: 0 + responses: + "200": + description: Rebond successful + content: + application/json: + schema: + $ref: "#/components/schemas/TransactionResponse" + + /activateOrchestrator: + post: + tags: + - Orchestrator + summary: Activate orchestrator + description: Multi-step process to become an orchestrator + operationId: activateOrchestrator + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - blockRewardCut + - feeShare + - pricePerUnit + - serviceURI + properties: + blockRewardCut: + type: string + description: Percentage of block rewards to keep (0-100) + example: "10" + feeShare: + type: string + description: + Percentage of fees to share with delegators (0-100) + example: "5" + pricePerUnit: + type: string + description: Price per pixel in wei + example: "1000" + serviceURI: + type: string + description: Service URI for the orchestrator + example: "https://orchestrator.example.com:8935" + responses: + "200": + description: Orchestrator activated successfully + content: + application/json: + schema: + $ref: "#/components/schemas/TransactionResponse" + + /setBroadcastConfig: + post: + tags: + - Gateway + summary: Set broadcast configuration + description: Configure transcoding options and max price + operationId: setBroadcastConfig + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + maxPricePerUnit: + type: string + description: Maximum price per pixel in wei + example: "1000" + pixelsPerUnit: + type: integer + description: Number of pixels per unit + example: 1 + responses: + "200": + description: Broadcast config updated successfully + + /setMaxPriceForCapability: + post: + tags: + - Gateway + summary: Set max price per AI capability + description: Set pricing for specific AI pipelines and models + operationId: setMaxPriceForCapability + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - capabilities_prices + properties: + capabilities_prices: + type: array + items: + type: object + required: + - pipeline + - price_per_unit + properties: + pipeline: + type: string + description: AI pipeline name + example: "text-to-image" + model_id: + type: string + description: Specific model ID or "default" + example: "stabilityai/sd-turbo" + price_per_unit: + type: integer + description: Maximum price per unit + example: 1000 + pixels_per_unit: + type: integer + description: Pixels per unit + example: 1 + responses: + "200": + description: AI pricing updated successfully + + /reward: + post: + tags: + - Orchestrator + summary: Claim orchestrator rewards + description: Claim orchestrator rewards for current round + operationId: claimReward + responses: + "200": + description: Reward claimed successfully + content: + application/json: + schema: + $ref: "#/components/schemas/TransactionResponse" + + /transferTokens: + post: + tags: + - Ethereum + summary: Transfer LPT tokens + description: Transfer LPT to another address + operationId: transferTokens + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - to + - amount + properties: + to: + type: string + description: Recipient address + example: "0x1234567890abcdef1234567890abcdef12345678" + amount: + type: string + description: Amount of LPT to transfer (in wei) + example: "1000000000000000000" + responses: + "200": + description: Transfer successful + content: + application/json: + schema: + $ref: "#/components/schemas/TransactionResponse" + + /signMessage: + post: + tags: + - Ethereum + summary: Sign message + description: Sign message with node's private key + operationId: signMessage + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + type: string + description: Message to sign + example: "Hello Livepeer" + responses: + "200": + description: Message signed successfully + content: + application/json: + schema: + type: object + properties: + signature: + type: string + description: Hex-encoded signature + example: "0x..." + +components: + schemas: + NodeStatus: + type: object + properties: + node: + type: object + properties: + version: + type: string + example: "0.7.0" + chainId: + type: integer + example: 42161 + account: + type: object + properties: + address: + type: string + example: "0x..." + ethBalance: + type: string + example: "1000000000000000000" + lptBalance: + type: string + example: "1000000000000000000" + + ProtocolParameters: + type: object + properties: + totalBondedToken: + type: string + totalSupply: + type: string + paused: + type: boolean + currentRound: + type: string + + Orchestrator: + type: object + properties: + address: + type: string + example: "0x..." + serviceURI: + type: string + example: "https://orchestrator.example.com:8935" + pricePerPixel: + type: string + example: "1000" + activationRound: + type: string + + TransactionResponse: + type: object + properties: + txHash: + type: string + description: Transaction hash + example: "0x..." + status: + type: string + description: Transaction status + example: "success" diff --git a/snippets/assets/Livepeer-Logo-Symbol-Light.svg b/snippets/assets/Livepeer-Logo-Symbol-Light.svg new file mode 100644 index 00000000..3ea275c4 --- /dev/null +++ b/snippets/assets/Livepeer-Logo-Symbol-Light.svg @@ -0,0 +1,3 @@ + + + diff --git a/snippets/automationData/blog/ghostBlogData.jsx b/snippets/automationData/blog/ghostBlogData.jsx new file mode 100644 index 00000000..3cf1fd8f --- /dev/null +++ b/snippets/automationData/blog/ghostBlogData.jsx @@ -0,0 +1,191 @@ +export const ghostData = [ + { + title: `A Real-time Update to the Livepeer Network Vision`, + href: `https://blog.livepeer.org/a-real-time-update-to-the-livepeer-network-vision/`, + author: `By Livepeer Team`, + content: `

For the past year, the Livepeer Ecosystem has been guided by the Cascade vision:  a path to transition from a pure streaming and transcoding infrastructure, to an infrastructure that could succeed at providing compute for the future of real-time AI video. The latest Livepeer quarterly report from Messari highlights that this transition is paying off, with network fees up 3x from this time last year, and over 72% of the fees now driven via AI inference. This is exemplified by the growing inspirational examples emerging from Daydream powered real-time AI, and real-time Agent avatar generation through Embody and the Agent SPE.

Source: Livepeer Q3 2025 Report by Messari

This shift has been an ecosystem wide effort – ranging from branding and communications, to productization and go to market, to hardware upgrades for orchestrators. It has successfully shifted the project under an updated mission and direction, however it has still left ambiguity in terms of what the Livepeer network itself offers as killer value propositions to new builders outside of the existing ecosystem. Is it a GPU cloud? A transcoding infra? An API engine? Now that there are signs of validation and accelerated momentum around an exciting opportunity, it’s time to really hone in on a refined vision for the future of the Livepeer network as a product itself. 

The market for video is set to massively expand

The concept of live video itself is expanding well beyond a simple single stream of video captured from a camera. Now entire worlds and scenes are generated or enhanced in real-time via AI assistance, leading to more immersive and interactive experiences than possible via old-school streaming alone. For a taste of the future, see the following examples:

  1. The future of gaming will be AI generated video and worlds in real-time:
+
  1. Video streams can be analyzed and data leveraged programmatically in real-time, for instant insight generation and decision making:
+
  1. Real-time style transfer can enable avatars and agents to participate in the global economy:
+

Video world models and real-time AI video are merging, as they both use AI to generate frame-by-frame video output with low latency on the fly, based on user input and AI inference. This requires a tremendous amount of GPU compute, and requires an amazing low latency video streaming and compute stack – two areas in which the Livepeer network and community thrive, and two areas to which the many other generic GPU inference providers in the market bring no unique skillset, experience, or software advantage. 

The big opportunity for the Livepeer network is to be the leading AI Infrastructure For Real-Time Video.
From interactive live streaming to generative world models, Livepeer’s open-access, low-latency network of GPUs will be the best compute solution for cutting edge AI video workflows. 

World models are a game changing category, and Livepeer is well suited to offer a unique and differentiated product here, that serves a huge market of diverse and varying use cases. These range from creative entertainment, to gaming, to robotics, to data analysis, to monitoring and security, to synthetic data generation for AGI itself.

While an ambitious stretch, Nvidia executives responsible for the category have even projected that due to the impact in robotics, the economic opportunity for world models could exceed $100 trillion, or approximately the size of the entire global economic output itself!  

What does it mean to productize the Livepeer network to succeed as a valuable infrastructure in this category?

From a simplified viewpoint, it needs to deliver on the following:

1. Ability for users to deploy real-time AI workflows to the Livepeer network and request inference on them

2. Industry leading latency for providing inference on real-time AI and world model workflows.

3. Cost effective scalability – users can pay as they go to scale up and down capacity and the network automagically delivers the scale required.

Imagine a gaming platform is powering world-model generated games using their unique workflows that generate game levels or areas in a certain style by combining several real-time models, LLMs, and style transfer mechanisms. Each game its powering has users exploring and creating their own corners of the interactive worlds, based on prompts and gameplay inputs. Every gamer that joins a game represents a new stream of AI video compute, and the Livepeer network is the backing infrastructure that provides the compute for this video world generation, leveraging hundreds or thousands of GPUs concurrently.

For this to be possible the Livepeer network needs to enable that game platform to deploy their game generation workflow. It needs to offer low latency on the inference that runs this workflow, relative to the generic GPU compute clouds. The pricing needs to be competitive vs alternative options in the market for this GPU compute. And the network needs to allow this company to scale up and down the number of GPUs that are currently live ready to accept new real-time inference streams based on the number of users currently live on the games it is powering.

All of this is possible on the Livepeer network, and it isn’t far away from where we are now. If we work to build, test, and iterate on the Livepeer network itself towards supporting the latency and scale required for these types of workflows, we’ll be set up to power them.
Now multiply this example gaming company by the high number of diverse industries and verticals that real-time AI and world models will touch. Each category can have one or multiple companies competing to leverage this scalable and cost effective infrastructure for unique go to markets targeting different segments. And they can all be powered by the Livepeer network’s unique value propositions.

Livepeer’s core network is strategically positioned

What are these value propositions that make the Livepeer network differentiated relative to alternative options in the market? I’d argue that there are three primary, table stakes, must-have value propositions if Livepeer is to succeed. 

1. Industry standard low latency infrastructure specializing in real-time AI and world model workflows: First of all, the network needs to let its users deploy custom workflows. Inference alone on base models is not enough and does not represent scaled demand. Users want to take base models, chain them together with other models and pre/post processors, and create unique and specialized capabilities. When one of these capabilities is defined as a workflow, that is the unit that needs to be deployed as a job on the Livepeer network, and the network needs to be able to run inference on it. Secondly, for these real-time interactive use cases, latency matters a lot. Generic GPU clouds don’t offer the specialized low latency video stacks to ingest, process, and serve video with optimal latency, but Livepeer does. And Livepeer needs to benchmark itself to have lower or equal latency to alternative GPU clouds for these particular real-time and world model use cases.

2. Cost effective scalability: GPU provisioning, reservations, and competing for scarce supply procurement creates major challenges for AI companies – often overpaying for GPUs that sit idle most of the time in order to guarantee the capacity that they need. The Livepeer network’s value proposition is that users should be able to “automagically” scale up almost instantly and pay on demand for the compute that they use, rather than having to pre-pay for reservations and let capacity sit idle. This is enabled by Livepeer taking advantage of otherwise existing idle longtail compute through its open marketplace, and its supply side incentives. The Livepeer network needs to be more cost effective than alternative GPU clouds within this category - with impacts comparable to the 10x+ cost reduction already demonstrated in live video transcoding delivered by the network.

3. Community driven, open source, open access: The Livepeer project and software stack is open source. Users can control, update, and contribute to the software they are using. They also can be owners in the infrastructure itself through the Livepeer Token, and can benefit from the network’s improvements and adoption, creating a network effect. The community that cares about its success and pushes it forward collectively, can be a superpower, relative to the uncertain and shaky relationship between builders and centralized platform providers, who have a history of getting rugged based on limitations to access, changes in functionality, or discontinuity of the platforms. Anyone can build on the Livepeer network regardless of location, jurisdiction, use case, or central party control.

The above are primary value propositions that should appeal to nearly all users. And we must work to close the gaps to live up to those value props before we could successfully hope to go to market and attract new vertical-specific companies to build directly on top of the network. Luckily, in addition to all of Livepeer’s streaming users, we have a great realtime AI design partner in Daydream, which is already going to market around creative real-time AI, using the network, and contributing to its development to live up to these requirements. While building with this design partner, the ecosystem should be working to productize to live up to these promises in a more generic perspective – it should be setting up benchmarks, testing frameworks, and building mechanisms for scaling up supply ahead of demand, so that it can represent this power to the world alongside successful Daydream case studies.

Opportunities to push towards this vision

To truly live up to these value propositions, there are a number of opportunities for the community to focus on in order to close some key gaps. There are many details to come in more technical posts laying out roadmaps and execution frameworks, but at a high level, consider a series of milestones that take the network as a product from technically functional, to production usable, to extensible, to infinitely scalable:

  1. Network MVP - Measure what matters: Establish key network performance SLAs, measure latency and performance benchmarks, and enhance the low latency client to support realtime AI workflows above industry grade standards.
  2. Network as a Product - Self adaptability and scalability: Network delivers against these SLAs and core value props for supported realtime AI workflows. Selection algorithms, failovers and redundancy, and competitive market price discovery established for realtime AI.
  3. Extensibility - Toolkit for community to deploy workflows and provision resources: Workflow deployment and signaling, LPT incentive updates to ensure compute supply for popular AI workflows exceeds demand.
  4. Parallel Scalability: Manage clusters of resources on the network for parallel workflow execution, truly unlocking job types beyond single-GPU inference. 

Many teams within the ecosystem, from the Foundation, to Livepeer Inc, to various SPEs have already started operationalizing around how they’ll be contributing to milestones 1 and 2 to upgrade the network to deliver against these key realtime AI value propositions. 

Conclusion and Livepeer’s opportunity

 The market for the opportunity to be the GPU infrastructure that powers real-time AI and world models is absolutely massive – the compute requirements are tremendous - 1000x that of AI text or images - and real-time interaction with media represents a new platform that will affect all of the above-mentioned industries. The Livepeer network can be the infrastructure that powers it. How we plan to close the needed gaps and achieve this will be the subject of an upcoming post. But when we do prove these value propositions, Livepeer will have a clear path to 100x the demand on the network

The likely target market users for the network are those startups that are building out vertical specific businesses on top of real-time AI and world model workflows. The ecosystem should look to enable one (or multiple!) startups in each category going after building real-time AI platforms that serve gaming, that serve robotics, that serve synthetic data generation, that serve monitoring and analysis, and all the additional relevant categories. The network’s value propositions will hopefully speak for themselves, but in the early stages of this journey, it is likely the ecosystem will want to use incentives (like investment or credits) to bootstrap these businesses into existence. Each will represent a chance at success, and will bring more demand and proof.

Ultimately, many users of these platforms may choose to build direct on the network themselves. Similarly to how startups start to build on platforms like Heroku, Netlify, or Vercel, and then as they scale and need more control and cost savings they build direct on AWS, and then ultimately move to their own datacenters after reaching even more scale – users of Daydream or a real-time Agent platform built on Livepeer, may ultimately choose to run their own gateways to recognize the cost savings and control and full feature set that comes from doing so. This is a good thing! As it represents even more usage and scale for the network, more proof that as an infrastructure the Livepeer network has product market fit, and that it can absorb all workflows directly. The businesses built on top will provide their own vertical specific bundles of features and services that onboard that vertical specific capacity, but they’ll be complemented by and enabled by the Livepeer Network’s superpowers.

While there’s a lot of work ahead, the Livepeer community has already stepped up to cover tremendous ground on this mission. At the moment by already powering millions of minutes of real-time AI inference per week, by our orchestrators already upgrading their capacity and procurement mechanisms to provide real-time AI-capable compute, and by the Foundation groups already working to evaluate the networks incentives and cryptoeconomics to sustainably fund and reward those contributing to this effort, we’re set up well to capture this enormous opportunity!

`, + datePosted: `Nov 13, 2025`, + img: `https://blog.livepeer.org/content/images/2025/11/LP_Blog-Header_Nov25_01_moshed-1.png`, + excerpt: `For the past year, the Livepeer Ecosystem has been guided by the Cascade vision:  a path to transition from a pure streaming and transcoding infrastructure, to an infrastructure that could succeed at providing compute for the future of real-time AI video. The latest Livepeer quarterly report from Messari highlights that this transition is paying off, with network fees up 3x from this time last year, and over 72% of the fees now driven via AI inference. This is exemplified by the growing inspirat`, + readingTime: 9, + }, + { + title: `Livepeer Onchain Builders - Streamplace: Building the Video Backbone of Decentralized Social`, + href: `https://blog.livepeer.org/livepeer-onchain-builders-streamplace-building-the-video-backbone-of-decentralized-social/`, + author: `By Livepeer Team`, + content: `

Welcome to Livepeer Onchain Builders, a new content series spotlighting the Special Purpose Entities (SPEs) funded by the Livepeer onchain treasury. SPEs are working groups funded by the community treasury to work on specific tasks and are accountable to the community for their delivery. These deep dives will explore how each initiative is driving protocol usage, expanding infrastructure, and pushing the boundaries of what’s possible in decentralized video and AI.

Streamplace is an open-source video streaming platform designed to power decentralized social applications with real-time, creator-first infrastructure. It aims to make livestreaming and video hosting as seamless as TikTok or YouTube, but built on open protocols and self-sovereign identity.

What makes it ambitious? Streamplace is not only building full-stack video infra for federated social networks, it's doing so in a way that prioritizes interoperability, scalability, and public goods. From developer SDKs to end-user apps, Streamplace is building an entire ecosystem.

What is an SPE? 

A Special Purpose Entity (SPE) is a focused, community-funded team contributing to the Livepeer ecosystem. SPEs are typically mission-driven groups that operate independently to build infrastructure, applications, or tooling that expand and improve the Livepeer protocol. These teams are funded through proposals to the onchain treasury and are accountable to the community.

SPEs are necessary for the ecosystem because no single team can build every part of a decentralized protocol. SPEs decentralize development, fund public goods, and allow the community to direct resources where they're most needed.

Why do they matter to delegators and stakeholders? Because SPEs grow in usage. More usage = more fees = more rewards. Delegators benefit when the protocol succeeds, and SPEs are among the most direct ways to make that happen.

From Aquareum to Streamplace

A clear goal drives the team behind Streamplace: to build the foundational video infrastructure for the next generation of decentralized social platforms. These platforms, such as Farcaster and the AT Protocol, promise user-owned identity and interoperability, but have thus far lacked robust support for live and on-demand video.

Streamplace solves this by providing a full-stack, developer-friendly video layer that anyone can plug into. It's a bold attempt to make decentralized video feel as native and easy as its Web2 counterparts.

Streamplace started as Aquareum, a project with the same mission and team. This evolution into Streamplace is a rebranding, not a restart, building on past momentum with a sharper focus.

Their vision is to give every user the ability to publish, stream, and remix content with the same ease as TikTok or YouTube, but backed by self-sovereign identity and decentralized networks.

Streamplace homepage

The first proposal delivered:

  • A unified Aquareum node: bundling the Livepeer stack with indexing and playback.
  • App releases on iOS, Android, and Web.
  • Native integrations with AT Protocol and Farcaster.
  • Support for C2PA metadata and content provenance.

Now, Streamplace continues that momentum with 100,000 LPT in treasury funding and a clear mandate to scale.

Streamplace Graphana dashboard

Why Streamplace Matters

Video is the heart of online social interaction. Yet decentralized social networks have lagged in providing seamless, user-friendly video experiences. Streamplace addresses this by:

  • Transcoding every livestream through Livepeer, providing decentralized, low-cost processing for global delivery.
  • Powering partner platforms like Skylight Social, a TikTok alternative backed by Mark Cuban, that recently hit #1 in entertainment on the App Store.
  • Making it dead-simple to stream or host video through single-binary nodes that anyone can deploy.
  • Championing public goods, 100% of their code is open source, with a commitment to infrastructure, not monetization lock-in.

Decentralized social, spanning protocols like Farcaster, AT Protocol, and Bluesky, represents a movement toward user-owned networks and open standards. These networks are gaining traction, but video remains a missing layer. That’s where Streamplace comes in.

Video is essential because it's the most engaging, expressive medium for creators and communities. And as these decentralized platforms scale, having real-time, composable video becomes non-negotiable.

Streamplace positions itself as the default video infra layer for this new social stack, and with every stream transcoded through Livepeer, it's also a major driver of protocol usage and visibility.

What Streamplace 2.0 Will Deliver

This new phase of work, funded by the Livepeer treasury, focuses on scale, performance, and ecosystem integration:

Infrastructure Enhancements

  • Expand server capacity to support growing user bases like Skylight.
  • Harden video nodes for reliability under real-world load.
  • Deliver high-quality performance on all platforms: Web, iOS, Android.

Protocol and Developer Growth

  • Deepen native integration with AT Protocol.
  • Build SDKs and NPM packages to embed Streamplace easily into other apps.
  • Ship VOD functionality and new moderation tools.

Community-First Ethos

  • Launch creator monetization models and stream incentive programs.
  • Empower streamers with self-hosted app capabilities ("Twitch, but it's your own app").
  • Maintain full transparency and livestream development.

The Livepeer Angle

Livepeer's decentralized video infrastructure powers every second of video on Streamplace. That means more work for orchestrators, more fees flowing through the protocol, and more incentive for high-quality node operation.

Streamplace strengthens the Livepeer ecosystem in three key ways:

  • Demand generation: Real-world usage at scale means more consistent transcoding work.
  • Protocol visibility: High-impact apps like Skylight drive awareness of Livepeer beyond its native circles.
  • Infrastructure robustness: Streamplace's nodes enhance the distributed capacity of the Livepeer network.

Without Livepeer, a decentralized video stack like Streamplace wouldn’t be possible. And without ambitious apps like Streamplace, Livepeer wouldn’t have the same opportunity to prove its value at scale.

Final Thoughts

Streamplace is a keystone piece of open video infrastructure and a cornerstone in the emerging world of decentralized social media. By fusing creator-first tooling with Livepeer’s scalable infrastructure, it offers a glimpse into what the open internet can become.

As decentralized protocols shift from vision to adoption, the need for native video is urgent. Streamplace, with the support of the Livepeer treasury and a relentless commitment to open-source infrastructure, is meeting that need head-on.

If you're a developer, creator, or community builder, now is the time to get involved.

Do you want to contribute to Streamplace's success? Explore the open roles here.

Interested in building or contributing to the Livepeer ecosystem? Learn more about current and past SPEs, open opportunities, and how to submit your own proposal here.

Follow along, fork the code, or join a stream — the future of social video is open.

Streamplace App

Streamplace Proposal

Aquareum Proposal


Livepeer is a decentralized video infrastructure network for live and on-demand streaming. It has integrated AI Video Compute capabilities (Livepeer AI) by harnessing its massive GPU network and is not building the future of real-time AI video.

Twitter | Discord | Website

`, + datePosted: `Aug 14, 2025`, + img: `https://blog.livepeer.org/content/images/2025/08/Onchain-Builders-Streamplace.jpg`, + excerpt: `Welcome to Livepeer Onchain Builders, a new content series spotlighting the Special Purpose Entities (SPEs) funded by the Livepeer onchain treasury. SPEs are working groups funded by the community treasury to work on specific tasks and are accountable to the community for their delivery. These deep dives will explore how each initiative is driving protocol usage, expanding infrastructure, and pushing the boundaries of what’s possible in decentralized video and AI. + +Streamplace is an open-source `, + readingTime: 5, + }, + { + title: `Builder Story: dotsimulate x Daydream`, + href: `https://blog.livepeer.org/builder-story-dotsimulate-x-daydream/`, + author: `By Livepeer Team`, + content: `

Building StreamDiffusionTD Operator - a Real-Time Generative Video Operator for TouchDesigner, Powered by the Daydream API

Creator:
Lyell Hintz (@dotsimulate)
Operator: StreamDiffusionTD
Backends Supported: Local + Daydream (Livepeer)

+
+ +
+ +
+
+
+ + + 0:00 +
+ /0:34 +
+ + + + + +
+
+
+ +

Overview

StreamDiffusionTD is a TouchDesigner operator that connects real-time inputs like audio, sensors, and camera feeds to StreamDiffusion, enabling live generative visuals controlled in real time. With the Daydream API, it adds remote inference capabilities on top of the existing local GPU inference and unlocks more flexibility for users.

Built by Lyell Hintz, a technical artist and TouchDesigner developer, the operator is used in live shows, installations, and experimental workflows.

Why It Was Built

Lyell began working on the operator a few hours after StreamDiffusion was released on GitHub. He wanted to use it in TouchDesigner - a powerful tool for real time interactive content creation.

“TouchDesigner is the only place this could be controlled from… it can hook into everything else.”

From the start, he avoided creating a “black box.” The operator exposes core parameters like prompt, seed, and ControlNet weights, allowing users to adjust values and see results immediately.

Key Features

  • Real-time video generation
  • Prompt and seed morphing
  • Dynamic ControlNet weighting
  • Live input support: audio, sensors, camera
  • Local GPU and Daydream backend options
  • Instant visual feedback in TouchDesigner
+
+ +
+ +
+
+
+ + + 0:00 +
+ /0:26 +
+ + + + + +
+
+
+ +

Daydream API Integration

StreamDiffusionTD works with the Daydream API, which allows the operator to run on a remote GPU backend. This eliminates the major barrier of requiring a high-end PC with an NVIDIA RTX 4090 to run StreamDiffusion at professional quality, unlocking the flexibility to run it from any location, on any device form factor.

Just drop in your API key and hit “Start Stream.” The backend handles orchestration, model hosting, and frame delivery, so builders can stay focused on their creative and technical workflows.

Setup takes less than 1 minute and once installed, the configuration is remembered for future use.Daydream’s API brings new features to StreamDiffusion:

  • Multi-controlnet: Mixing different controlnets for better artistic control
  • IPAdapter: Use images as powerful style guides
  • TensorRT: Better frame rate for smooth video output

Daydream is adding support for more real time video generation models, and developers can request features, suggest improvements, or build on top of the API itself. It aligns with the values of open tooling and community-led infrastructure.

How Artists can use StreamDiffusionTD in TouchDesigner

  • Audio-reactive visuals for concerts
  • Camera-driven generative visuals
  • Real-time visuals for LED walls and stages
  • TouchDesigner automation workflows

Because it's built inside TouchDesigner, the operator can be extended using Python, MIDI, OSC, or any other input TouchDesigner supports.

Current State

The operator is live and ready to use, with active development underway for new features and improved performance. It’s a great time to jump in, explore, and help shape what comes next.

Try it Yourself

Operator Access: patreon.com/dotsimulate
Community and Support: discord.gg/daydreamlive
API Keys can be requested here

`, + datePosted: `Aug 5, 2025`, + img: `https://blog.livepeer.org/content/images/2025/08/DD_Builder-Story_dotsimulate_01.png`, + excerpt: `Building StreamDiffusionTD Operator - a Real-Time Generative Video Operator for TouchDesigner, Powered by the Daydream API + +Creator: Lyell Hintz (@dotsimulate) +Operator: StreamDiffusionTD +Backends Supported: Local + Daydream (Livepeer) + + + + + + + + + + + + + + + + + + + + + + + + +0:00 + +/0:34 + + +1× + + + + + + + + + + + + + + + + + +Overview + +StreamDiffusionTD is a TouchDesigner operator that connects real-time inputs like audio, sensors, and camera feeds to StreamDiffusion, enabling live generative visuals controlled in real time. Wit`, + readingTime: 2, + }, + { + title: `Livepeer Incorporated! (and realtime AI)`, + href: `https://blog.livepeer.org/livepeer-incorporated-and-realtime-ai/`, + author: `By Livepeer Team`, + content: `

Written by Doug Petkanics, Co-founder and CEO at Livepeer Inc

The past 18 months have been an energizing time to be in the Livepeer Ecosystem. An onchain treasury was introduced to fund public goods via community governance, the community has coalesced around Livepeer’s opportunity to be the leading infrastructure for realtime AI video, and fees and usage of the network have been steadily increasing due to this focus. The Livepeer Foundation has recently launched to steward the 10+ entities in the ecosystem that are core contributors to the project, and is unlocking even more funding around the opportunities recommended in the project’s strategic pillars.

With so much core development, marketing, and growth driven by the ecosystem at large, the company that I co-founded and operate, Livepeer Incorporated, has had the opportunity to shift its focus to what we deem to be the highest priority area of the project where we feel uniquely suited to make an outsized impact: executing a high conviction go to market motion in an attempt to dramatically grow demand on the Livepeer network. We, like many in the ecosystem, are fully bought in to the realtime AI video vision laid out in Livepeer Cascade, and are solely focused on productization to find product market fit for the Livepeer network as the leading infrastructure in the coming world of live video AI. Here is a bit about what Livepeer Inc is focused on, and almost equally as importantly, what we are not focused on in the coming 12 months.

Product Market Fit for Realtime AI Video 

As mentioned, the number one priority is to prove that the Livepeer network has product market fit as an infrastructure that runs the latest and greatest in realtime AI video workflows for developers. To do this, we’ll focus on three core things:

  1. Contribute to core network development to ensure Livepeer is an infrastructure that can run realtime AI video workflows.
  2. Build the developer APIs to run these workflows that developers use to build them into applications. This is a natural extension of Livepeer Studio
  3. Cultivate the leading realtime AI video community. Researchers, builders, and creators interested in this coming category need a home. They will provide the moat that ensures that an open, community led infrastructure will always be more responsive, cost effective, and full featured than centralized alternatives.

We’re going to provide the full stack product, engineering, community, and go to market motion to validate product market fit for this opportunity. This will drive significant fees and growth into the Livepeer network. We’re aligned as large LPT token holders and want the network to succeed - which represents a far bigger opportunity for Livepeer Inc than any revenue related opportunity via SaaS services in the short term. Let’s grow those network fees!

What Livepeer Inc is Not Focused On

While there are many potential products and go to markets that can be executed upon under an ambitious vision of being the world’s open video infrastructure, a single company is more likely to succeed by focusing on only one opportunity at a time. Many alternative demand generating bets will be better served by other self-motivated actors in the ecosystem - especially as the open source software around Livepeer, and the broader ecosystem has matured to the point of providing reliable access points for different categories of use cases.Regarding Livepeer Inc’s learnings on some of these categories:

  • Transcoding alone has been proven out technically and economically, however the market hasn’t accepted the standalone infrastructure without significant productization, support, SLAs, and enterprise services around it.
  • Similarly, when bundled with end to end streaming, the offering isn’t significantly differentiated in a crowded and consolidating market. 
  • Livepeer Studio will continue to support existing users at the enterprise level that pay for these surrounding services, while passing the transcoding jobs through to the Livepeer network, but due to the long sales cycle and slow growth, it will not be actively competing to grow this source of demand. 
  • The ecosystem can support aspiring users of transcoding and streaming via projects like Streamplace, the Frameworks SPE, and their supporting teams. One of the core pillars of the Livepeer Foundation’s GTM recommendations is to tackle being the open video infrastructure for web3 social and decentralized streaming, so the ecosystem will prioritize support. This includes aspiring web3-centric streaming users, who culturally align with the values of the project community, but to date have not shown significant growth nor driven significant fees to the network. There’s an opportunity for these projects to crack this nut and help these users grow, if they deem it to be worth the effort!
  • There are also additional bets that the ecosystem is interested in around the realtime AI mission. These are laid out by the Livepeer Foundation’s GTM Strategy post. Visual avatars for live AI agents is one example. Realtime video analysis and understanding are others. These areas do overlap with the broad theme that Livepeer Inc is focused on - running realtime AI models on live video on the Livepeer network. However as Inc pursues creative AI use cases initially to inspire the broader world in what’s possible, we welcome others in the ecosystem building commercial entities to go after these opportunities. And we will certainly collaborate. If the ecosystem efforts make technical progress, but stop short of commercializing and going to market, these are areas for collaboration with Inc to consider productizing for commercial purposes. 

A Simplified View: Foundation and Inc

While the above contains a lot of details about realtime AI and specific demand generating bets on the Livepeer network, there’s a simplified view:

  • The Livepeer Foundation will steward the Livepeer community, project marketing, and public goods funding to enable recommendations on the project roadmap.
  • Livepeer Inc will focus on driving demand to the network by building the realtime AI products, go to market services, and AI community - initially in the creative realtime AI video space.

If you’re interested in building within this ecosystem, there are lots of opportunities that both contribute to the core development and operations of the project in service of the realtime AI mission, but also to develop companies that service additional markets not currently being focused on. Hopefully the above post gives you a view into what some of those opportunities and gaps are. Then check out the Livepeer Foundation’s recent forum posts on tactical recommendations, and raise your hand to get involved in the ones of interest.

`, + datePosted: `Jul 31, 2025`, + img: `https://blog.livepeer.org/content/images/2025/07/e.png`, + excerpt: `Written by Doug Petkanics, Co-founder and CEO at Livepeer Inc + +The past 18 months have been an energizing time to be in the Livepeer Ecosystem. An onchain treasury was introduced to fund public goods via community governance, the community has coalesced around Livepeer’s opportunity to be the leading infrastructure for realtime AI video, and fees and usage of the network have been steadily increasing due to this focus. The Livepeer Foundation has recently launched to steward the 10+ entities in `, + readingTime: 5, + }, +]; diff --git a/snippets/automationData/forum/Hero_Livepeer_Forum.png b/snippets/automationData/forum/Hero_Livepeer_Forum.png new file mode 100644 index 00000000..7805c1bd Binary files /dev/null and b/snippets/automationData/forum/Hero_Livepeer_Forum.png differ diff --git a/snippets/automationData/forum/forumData.jsx b/snippets/automationData/forum/forumData.jsx new file mode 100644 index 00000000..5026ebdd --- /dev/null +++ b/snippets/automationData/forum/forumData.jsx @@ -0,0 +1,38 @@ +export const forumData = [ + { + title: "It's time to ACT! Accumulation & the Treasury Ceiling", + href: "https://forum.livepeer.org/t/3153", + author: "By b3nnn (@b3nnn)", + content: + "

The onchain treasury was designed to provide sustainable public goods funding. It has supported many important and strategic contributions to the Livepeer Ecosystem. The AI SPE, Streamplace, Agent SPE and Cloud have all received funds and made important contributions. And through our onchain governance, the community have shown time and again their thoughtfulness and care for getting decisions right. Your desire to align decisions with long-term health has made us a shining example of simple but effective governance and how people can working together onchain.

The treasury is key to supporting strategic investments to improve UX for stakeholders, effectively manage protocol security, and fund other capital and resource needs for this exciting phase of the project.

As of now, the onchain treasury is currently not accumulating LPT. It was designed not to accept unlimited funding, hit the initial value set as the ceiling, and reset treasury contributions to 0% on or around 31st of March this year. There are a backlog of upcoming projects on highly strategic initiatives that will need treasury support, and we will all feel better about how to allocate funds if we have certainty that new funds are coming into the treasury.

I intend to post a LIP to turn on the treasury rewards again at their initial values:

  • treasuryRewardCutRate: 10%

  • treasuryBalanceCeiling: 750000 LPT

The rate of 750000 LPT is currently set as the ceiling so would not be updated in the formal proposal

For what it’s worth, my personal bias is to increase one of these values, but I’m happy to punt that discussion to another day. Having seen the exciting things in the background that will require treasury support in coming weeks, the most pressing item for us as a community is to start getting the treasury repopulated.

I’ll be on the watercooler next week to discuss and am happy to set up an office hours to discuss direct if there is support for that. I look forward to proposing this for a community vote . If you have any input on the contribution percentage that goes into my proposal, please also share your input here.

", + replyCount: 7, + datePosted: "Dec 3, 2025", + }, + { + title: "Pre-proposal: IDOL - Improving Dex / Onchain Liquidity", + href: "https://forum.livepeer.org/t/3151", + author: "By b3nnn (@b3nnn)", + content: + '
TLDR

We propose to address known UX issues and ease and costs to participate by increasing DEX liquidity. Arrakis offers an optimal solution for our specific needs, and we are requesting 250,000 LPT for deployment to a Uniswap v4 pool which will significantly reduce slippage for ecosystem participants

Motivation

The Capital Markets Advisory board made improving onchain liquidity a tactical recommendation, specifically sighting:

  • Low liquidity levels on our DEX pools (primarily Uniswap on Arbitrum). This creates high slippage when trying to transact with any size, and might refrain larger stakeholders or participants from buying LPT

  • The much higher ratio of available liquidity on centralized exchanges compared to DEXs drives participants to rely on centralized platforms, exposing them to the inherent risks associated with centralized providers

  • Further, centralised exchanges often don’t support L2 withdrawals. This results in delayed bridging and withdrawal processing between L1 & L2, impairing overall UX and the efficiency of orchestrators as it relates to capital allocation

In short, improved L2 Dex liquidity is essential for both current and future participants in Livepeer.

Recommended Solution

How to address our challenges is relatively straightforward to describe:

  • Increase the amount of liquidity on targeted DEX pool/s

  • Ensure the solution is executing against this goal as agreed

  • Use funds wisely, ensuring a good balance between what we pay and what we receive

Any solution will require liquidity from the on-chain treasury to start bootstrapping an optimal asset mix. In addition to this liquidity requirement, using a traditional market maker is likely a major expense (in the range of $15-20K per month). While traditional market makers can do a good job in actively managing liquidity, especially on centralised exchanges, they often present new or additional challenges:

  • Market makers typically operate through asset loan agreements, using our capital to actively manage liquidity across venues. While this model provides flexibility and professional management, it can make visibility into how and where assets are deployed more challenging.

  • Compared to centralized venues, on-chain liquidity provision is often less economically attractive for market makers. As a result, they may prioritize other strategies or venues where returns are higher, which can limit incentives to deepen on-chain liquidity.

  • Ensuring that capital is being used effectively by traditional market makers remains challenging, as it requires clear visibility into capital deployment and a deep understanding of the alternative strategies they pursue.

While none of this is insurmountable, it requires significant thought, effort and time to ensure oversight and manage risk.

Arrakis pro is an ideal solution to addresses these challenges.

Arrakis specifically addresses each of these challenges because:

  • It is built specifically for managing onchain liquidity on DEXs

  • The assets are stored in a vault controlled by a multisig made up of Livpeer Foundation members. This means the treasury, via the Foundation, can withdraw and return the liquidity at any time

  • Because it is onchain, and through the features provided in Arrakis pro, we can check and confirm at any time where our assets are and what strategies are being applied.

  • It rebalances positions by setting up ranges / limit orders, no swaps involved. The solution algorithmically minimises price impact given the allocated capital and bootstraps base asset liquidity without causing negative selling pressure.

  • Arrakis leverages sophisticated algorithms to increase capital efficiency for the deployed capital and reduce slippage for traders on the DEX pools.

Arrakis vaults hold ~$170M TVL and the team actively manages the on-chain liquidity for over 100 protocols. Projects such as MakerDAO, Lido, Morpho, Gelato, Redstone, Wormhole, Across, Euler, Usual, Syrup, Venice.ai, Ether.fi, etc. are benefiting from the high capital efficiency and cost effectiveness for DEX liquidity optimization enabled by Arrakis PRO.

For more information regarding Arrakis and Arrakis Pro, feel free to have a look at their docs or join their community:

Arrakis | Twitter | Resources

In addition, the team are present here and will address any questions directly - hello @Arrakis

The Ask

We want to significantly decrease slippage and costs for orchestrators and other participants to interact with the network through onchain liquidity.

We are asking for 250,000 LPT (approx. $1M in USD value) to be held in a multisig controlled by the Livepeer Foundation, to be deployed via an onchain vault with Arrakis as a concentrated pool on Uniswap v4.

Management of concentrated liquidity on Uniswap V4 allows for larger trades with minimal price impact, improving the overall trading experience. Savings to participants are substantial at approx. $1500 in slippage reduction on a $25,000 sale of LPT (estimate based on data below).

Comparison of current and estimated price impact (after successful ETH liquidity bootstrapping) for buying LPT and ETH across different amounts

Specification for Livepeer
  1. The Arrakis team uses the existing LPT/ETH pool on the 0.3% fee tier for UniswapV4

  2. Arrakis then deploys a dedicated vault managed by the Arrakis Pro smart contract for this LPT/ETH Uniswap pool.

  3. The Livepeer Foundation team establish a ⅔ Multisig for custody of the funds. If the proposal passes, funds are transferred onchain to this multisig account

  4. Through this Livepeer Foundation multisig, we deposit $1 million worth of $LPT into the Arrakis Pro vault. Transfers in and out of the vault are controlled by the multisig, meaning they cannot be deployed or moved by Arrakis elsewhere

  5. Arrakis Pro will allocate the provided liquidity in a concentrated and fully active market making strategy to facilitate trading on UniswapV4.

  6. The strategy initially operates to bootstrap ETH to establish a 50/50 inventory ratio over the first months. The primary objective is to create price stability by generating deep liquidity and reaching an even inventory over time.

For the services provided, Arrakis charges the following fees:

Arrakis Asset-under-Management (AUM) fee: 1% per year, waived for the first 6 months

Arrakis performance fee: 50% of trading fees the vault generates

FAQ

What are the risks of this model?

  • Deploying funds to DEX pools bears smart contract risk and general market risk (e.g. token exposure, impermanent loss). Arrakis smart contracts have been audited by leading security firms and currently secure +$150M TVL (https://docs.arrakis.finance/text/resources/audits.html)

What happens to the capital required?

  • The capital required is deployed by the Livepeer DAO, via a Foundation controlled multisig, to a self-custodial smart contract vault and can be withdrawn at any point in time. Arrakis does not hold custody, nor control the funds deployed outside of the mandate to manage DEX liquidity on Uniswap V4 for the respective trading pair.

Will this impact the current liquidity on CEXs?

  • Arrakis mandate is to gradually improve on-chain markets and provide deeper liquidity for the respective pair over time on DEX markets. CEX markets will not be affected.

How does the Arrakis model differ from standard AMMs (like Uniswap v3)?

  • Arrakis provides a sophisticated on-chain market making service, running dedicated algorithmic market making strategies.

  • Instead of manually deploying funds into the CLAMM pool, Arrakis algorithmically rebalances the position and runs active liquidity management strategies.

Will our liquidity still be actively managed, or will it be passively allocated in a vault?

  • Close to 100% of the liquidity deployed with an Arrakis vault is actively deployed to the Uniswap CLAMM pool and provides liquidity. Small shares of liquidity remain in the vault as token reserves for rebalancing purposes.

How is the strategy for the vault determined — who sets the parameters, and how often are they rebalanced?

  • Arrakis quant team fine tunes the strategies and engages in period review cycles along with 24h-365day monitoring and alerting.

Who controls or can modify the AMM strategy parameters?

  • Arrakis strategies are designed, deployed and maintained by professional quant traders. The Foundation can be involved in discussion in regular intervals as needed to further align on achieving the stated goals.

Will the community have visibility into performance and strategy updates?

  • The Foundation delegates will receive access to a custom real time analytics dashboard and can share periodic updates to the forum for the community.

What happens to the liquidity if the vault underperforms or becomes unbalanced?

  • Liquidity is actively rebalanced towards a 50:50 ratio by placing one sided limit maker orders. In adverse market scenarios strategies will adjust to certain market volatility settings.

How do fees compare to centralized market makers?

  • Centralized market makers work in two models: a) Loan & Option b) Retainer Fix Fee payment. Arrakis works on a profit sharing of trading fees earned (50% captured by the Livepeer DAO, 50% retained by Arrakis for the services provided)

How will LP performance be measured?

  • LP performance will be measured by market depth, price impact, slippage improvement, total volumes facilitated.

What happens after funds are returned?

  • It’s important to note that the liquidity in the vault can remain deployed indefinitely, but also returned to the onchain treasury or control by the voters at any time. As funds will now be held in both ETH and LPT, the community can be involved in discussions about how returned funds are stored or used.

This is a large proportion of the current treasury. What gives?

  • We recognise that this is a large ask relative to the current size and value of the treasury. The size and value of the treasury will be addressed in a separate proposal. As it relates to this proposal, consider that we will reduce slippage costs by approx 2-3X on every dex transaction. The ROI on this proposal will be quite substantial.
', + replyCount: 3, + datePosted: "Dec 1, 2025", + }, + { + title: "Transformation SPE Release Notes", + href: "https://forum.livepeer.org/t/3142", + author: "By Mehrdad (@Mehrdad)", + content: + "

Release notes are a way to share work being completed by the Transformation SPE and it’s various contributors. Dive in and explore what has been happening and please reach out or reply with any questions and we will happily expand further.

", + replyCount: 2, + datePosted: "Nov 10, 2025", + }, + { + title: "Transcoder Campaign: organic-node.eth", + href: "https://forum.livepeer.org/t/1970", + author: "By Ron (@ron)", + content: + "

Hello fellow video enthusiast and web3 supporters,

Thanks for your time in reading my post. (organic-node.eth) Node has been active for about 6 months and everyday has been a great learning experience. My node has been highly reliable with 4 Orchestrators across the globe with possibility to expand more depending on the demand. If you are looking to get in touch with me please reach out to me on discord Organic-Node#9009.

It gives me great pleasure when looking at lenstube videos, thinking that some of these vides may have been transcoded by my Orch. Stakes and delegators enjoy passive income with my low reward cuts and low fee cut and help support robust Orch for a fairer web3 platforms

Stake here:
(organic-node.eth)

", + replyCount: 1, + datePosted: "Dec 6, 2022", + }, +]; diff --git a/snippets/automationData/globals/README.md b/snippets/automationData/globals/README.md new file mode 100644 index 00000000..407971f3 --- /dev/null +++ b/snippets/automationData/globals/README.md @@ -0,0 +1,89 @@ +# Livepeer Release Updater + +Three different solutions for updating the Livepeer release version: + +## 1. **n8n Workflow** (livepeer-release-updater.json) CURRENT + +This is the recommended approach for your setup. It: + +- Polls the go-livepeer releases API every 30 minutes +- Uses Redis to track the last known version (prevents duplicate updates) +- Only updates the `LatestVersion` value without touching anything else +- Commits directly to the docs-v2 branch + +## 2. **GitHub Action** (update-livepeer-release.yml) RECOMMENDED + +If someone with admin access can add this to the docs repo's +`.github/workflows/` folder, it will run automatically without needing external +infrastructure like n8n + +## 3. **Node.js Script** (update-livepeer-release.js) + +Can be run manually or via cron job from any server with Node.js installed. + +Since you mentioned you can't get a GitHub token for the livepeer org but can +use the GUI, the n8n workflow is your best bet. You'll need to: + +1. Create a Personal Access Token from your own GitHub account (Settings → + Developer settings → Personal access tokens) +2. Ensure you have write access to the docs repository +3. Import the n8n workflow and configure it with your token + +The workflow specifically: + +- Uses regex to update ONLY the `LatestRelease` value +- Preserves all other content and formatting +- Includes error handling and validation +- Can send notifications when updates occur + +All files include the setup guide with detailed instructions for each approach. + +### 2. Code for yml + +on: schedule: # Run every 30 minutes - cron: '_/30 _ \* \* \*' +workflow_dispatch: + +jobs: check-and-update: runs-on: ubuntu-latest + + steps: + - name: Checkout docs repository + uses: actions/checkout@v3 + with: + ref: docs-v2 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Get latest go-livepeer release + id: get_release + run: | + LATEST_RELEASE=$(curl -s https://api.github.com/repos/livepeer/go-livepeer/releases/latest | jq -r .tag_name) + echo "release=${LATEST_RELEASE}" >> $GITHUB_OUTPUT + echo "Latest release: ${LATEST_RELEASE}" + + - name: Read current version from globals.jsx + id: current_version + run: | + CURRENT=$(grep -oP 'LatestRelease:\s*["'\'']?\K[^"'\'']+' snippets/automationData/globals/globals.jsx || echo "") + echo "current=${CURRENT}" >> $GITHUB_OUTPUT + echo "Current version: ${CURRENT}" + + - name: Update globals.jsx if needed + if: steps.get_release.outputs.release != steps.current_version.outputs.current + run: | + # Create backup + cp snippets/automationData/globals/globals.jsx snippets/automationData/globals/globals.jsx.bak + + # Update the LatestRelease value + sed -i "s/LatestRelease:[[:space:]]*[\"'][^\"']*[\"']/LatestRelease: \"${{ steps.get_release.outputs.release }}\"/" snippets/automationData/globals/globals.jsx + + # Verify the change + echo "Updated content:" + grep "LatestRelease" snippets/automationData/globals/globals.jsx + + - name: Commit and push if changed + if: steps.get_release.outputs.release != steps.current_version.outputs.current + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add snippets/automationData/globals/globals.jsx + git commit -m "chore: update latest release to ${{ steps.get_release.outputs.release }}" + git push origin docs-v2 diff --git a/snippets/automationData/globals/globals.mdx b/snippets/automationData/globals/globals.mdx new file mode 100644 index 00000000..d80520fc --- /dev/null +++ b/snippets/automationData/globals/globals.mdx @@ -0,0 +1,2 @@ +export const latestVersion = "v0.7.7"; +export const latestVersionUrl = "https://github.com/livepeer/go-livepeer/releases/download/v0.7.7"; diff --git a/snippets/components/README-custom-view.md b/snippets/components/README-custom-view.md new file mode 100644 index 00000000..94cbf233 --- /dev/null +++ b/snippets/components/README-custom-view.md @@ -0,0 +1,173 @@ +# Custom View Dropdown Component + +A custom, positionable dropdown component that mimics Mintlify's View component +but gives you full control over styling and positioning. + +## Features + +- ✅ Fully customizable positioning (inline, top-right, top-left, or custom CSS) +- ✅ Matches your Livepeer brand colors (#2b9a66) +- ✅ Supports Font Awesome icons +- ✅ Dark mode support +- ✅ Smooth animations +- ✅ Click-outside-to-close functionality + +## Usage + +### Basic Example + +```jsx +import { CustomViewDropdown } from "/snippets/components/custom-view-dropdown.jsx"; + + +

Docker Setup

+

Your Docker content here...

+ + ), + }, + { + title: "Linux/Mac", + icon: "linux", + iconType: "solid", + content: ( + <> +

Linux/Mac Setup

+

Your Linux/Mac content here...

+ + ), + }, + ]} +/>; +``` + +### Position Options + +Control where the dropdown appears: + +```jsx +// Inline with content (default) + + +// Fixed to top-right corner + + +// Fixed to top-left corner + +``` + +### Custom Positioning with CSS + +You can add custom positioning in your `style.css`: + +```css +/* Custom position for the dropdown */ +.custom-view-dropdown-wrapper.position-custom { + position: fixed; + top: 100px; + right: 50px; + z-index: 1000; +} +``` + +Then use it: + +```jsx + +``` + +## Props + +### `views` (required) + +Array of view objects. Each view object has: + +- `title` (string, required): Display name in dropdown +- `icon` (string, optional): Font Awesome icon name (e.g., 'docker', 'linux', + 'windows') +- `iconType` (string, optional): Font Awesome icon type ('solid', 'regular', + 'brands'). Default: 'solid' +- `content` (JSX, required): The content to display when this view is selected + +### `position` (optional) + +String that controls positioning. Options: + +- `'inline'` (default): Appears inline with content +- `'top-right'`: Fixed to top-right corner +- `'top-left'`: Fixed to top-left corner +- Custom value: Add your own CSS class + +## Styling + +The component uses your Livepeer brand colors: + +- Primary: `#2b9a66` +- Dark: `#18794e` + +You can override styles in your `style.css`: + +```css +/* Change button colors */ +.custom-view-dropdown-button { + background-color: your-color !important; + border-color: your-color !important; +} + +/* Change dropdown menu */ +.custom-view-dropdown-menu { + min-width: 300px !important; +} + +/* Change active item color */ +.custom-view-dropdown-item.active { + background-color: your-color !important; +} +``` + +## Converting from Mintlify View Components + +**Before (Mintlify View):** + +```jsx + + ## Docker Setup + Content here... + + + + ## Linux/Mac Setup + Content here... + +``` + +**After (Custom View Dropdown):** + +```jsx +## Docker Setup Content here..., + }, + { + title: "Linux/Mac", + icon: "linux", + iconType: "solid", + content: <>## Linux/Mac Setup Content here..., + }, + ]} +/> +``` + +## Example + +See `/snippets/examples/custom-view-example.mdx` for a complete working example. diff --git a/snippets/components/buttons.jsx b/snippets/components/buttons.jsx new file mode 100644 index 00000000..61defcc0 --- /dev/null +++ b/snippets/components/buttons.jsx @@ -0,0 +1,87 @@ +export const BasicBtn = () => { + return
; +}; + +export const DownloadButton = ({ + label = "Download", + icon = "download", + downloadLink, + rightIcon = "", + border = false, +}) => { + const [isVisible, setIsVisible] = React.useState(false); + const ref = React.useRef(null); + + React.useEffect(() => { + const observer = new IntersectionObserver( + ([entry]) => { + if (entry.isIntersecting) { + setIsVisible(true); + observer.disconnect(); + } + }, + { threshold: 0.1 }, + ); + + if (ref.current) { + observer.observe(ref.current); + } + + return () => observer.disconnect(); + }, []); + + downloadLink = downloadLink ? downloadLink : "https://Livepeer.org"; + + const handleDownload = () => { + const a = document.createElement("a"); + a.href = downloadLink; + a.download = ""; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + }; + + if (!isVisible) { + return ( + + ); + } + + return ( + + + + {rightIcon && ( + + )} + + ); +}; diff --git a/snippets/components/cards.jsx b/snippets/components/cards.jsx new file mode 100644 index 00000000..d77f9acd --- /dev/null +++ b/snippets/components/cards.jsx @@ -0,0 +1,234 @@ +// card layouts + +export const PostCard = ({ + title, + content, + href, + author = "Unknown", + datePosted = null, + replyCount = null, + icon = "book-open", + authorIcon = "user-pen", + dateIcon = "calendar", + cta = "Read More", + img = null, +}) => { + console.log("item", title, content, href, img); + // Show hint if content is likely to overflow (>500 chars as proxy) + const showScrollHint = content && content.length > 500; + + return ( + + {author && ( +
+ + + + {author} +
+ )} + {datePosted && ( +
+ + + + {datePosted} +
+ )} + {/* {replyCount && ( +
+ + + + Replies: {replyCount} +
+ )} */} +
+
{ + const el = e.target; + const atBottom = + el.scrollHeight - el.scrollTop <= el.clientHeight + 10; + const hint = el.nextSibling; + if (hint) hint.style.display = atBottom ? "none" : "block"; + }} + dangerouslySetInnerHTML={{ __html: content }} + /> + {showScrollHint && ( +
+ Scroll for more ↓ +
+ )} + + ); +}; + +export const CardColumnsPostLayout = ({ cols = 2, items = [] }) => { + console.log("items", items); + return ( + + {items.map((props, idx) => ( + + ))} + + ); +}; + +export const BlogCard = ({ + title, + content, + href, + author = "Livepeer Team", + datePosted = null, + excerpt = null, //use if we prefer people to go to the actual blog site + readingTime = null, + icon = "book-open", + authorIcon = "user-pen", + dateIcon = "calendar", + cta = "Read More", + img = null, +}) => { + console.log("item", title, content, href, img); + // Show hint if content is likely to overflow (>500 chars as proxy) + const showScrollHint = content && content.length > 500; + + return ( + + {/* {author && ( +
+ + + + {author} +
+ )} */} + {datePosted && ( +
+ + + + {datePosted} +
+ )} + {readingTime && ( +
+ + + + Read Time: {readingTime} minutes +
+ )} +
+
{ + const el = e.target; + const atBottom = + el.scrollHeight - el.scrollTop <= el.clientHeight + 10; + const hint = el.nextSibling; + if (hint) hint.style.display = atBottom ? "none" : "block"; + }} + dangerouslySetInnerHTML={{ __html: content }} + /> + {showScrollHint && ( +
+ Scroll for more ↓ +
+ )} + + ); +}; + +export const CardBlogDataLayout = ({ items = [] }) => { + console.log("items", items); + return ( +
+ {items.map((props, idx) => ( + + ))} +
+ ); +}; diff --git a/snippets/components/code.jsx b/snippets/components/code.jsx new file mode 100644 index 00000000..c0109249 --- /dev/null +++ b/snippets/components/code.jsx @@ -0,0 +1,183 @@ +export const CustomCodeBlock = ({ + filename, + icon, + language, + highlight, + codeString = "", + placeholderValue = "", + wrap = true, + lines = true, + preNote = "", + postNote = "", + output = "", +}) => { + // Return null if no codeString is provided + if (!codeString || codeString.trim() === "") { + return null; + } + + const renderedCode = codeString.replace(/\{PLACEHOLDER\}/g, placeholderValue); + // const CalloutComponent = callout?.type ? callout.type : Note; + + return ( + <> + {preNote && ( +
+ {preNote} +
+ )} + + {renderedCode} + + {postNote && ( +
+ {postNote} +
+ )} + {output?.codeString && ( + <> + + Expected Output + + } + > + + {output.codeString} + + +
+ + )} + + ); +}; + +/** + * CodeComponent - Simple code display with {PLACEHOLDER} replacement + * + * Props: + * - codeString: string with {PLACEHOLDER} to replace + * - placeholderValue: string value to insert in place of {PLACEHOLDER} + */ +export const CodeComponent = ({ + filename = "", + icon = "terminal", + language = "", + highlight = "", + expandable = false, + wrap = true, + lines = true, + codeString = "", + placeholderValue = "", +}) => { + const renderedCode = codeString.replace(/\{PLACEHOLDER\}/g, placeholderValue); + return ( + + {renderedCode} + + ); +}; + +export const ComplexCodeBlock = ({ + filename, + icon, + language, + highlight, + codeString = "", + placeholderValue = "", + wrap = true, + lines = true, + preNote = null, + postNote = null, +}) => { + // Return null if no codeString is provided + if (!codeString || codeString.trim() === "") { + return null; + } + + const renderedCode = codeString.replace(/\{PLACEHOLDER\}/g, placeholderValue); + + return ( + <> + {preNote && ( +
+ {preNote} +
+ )} + + {renderedCode} + + {postNote && ( +
+ {postNote} +
+ )} + + ); +}; + +export const CodeSection = ({ fields = {} }) => { + return ; +}; + +// export const CodeSection = ({ fields = {} }) => { +// return ; +// }; diff --git a/snippets/components/coingecko.jsx b/snippets/components/coingecko.jsx new file mode 100644 index 00000000..d5aa3732 --- /dev/null +++ b/snippets/components/coingecko.jsx @@ -0,0 +1,251 @@ +import { useState, useEffect } from "react"; + +/** + * CoinGeckoExchanges - Dynamically fetches and displays exchanges that support a coin from CoinGecko + * + * Props: + * - coinId: The CoinGecko coin ID (e.g., "arbitrum", "ethereum", "bitcoin") + */ +export const CoinGeckoExchanges = ({ coinId = "arbitrum" }) => { + const [exchanges, setExchanges] = useState([]); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); + const [sortBy, setSortBy] = useState(null); // null (default order), "name", or "type" + const [sortOrder, setSortOrder] = useState("asc"); // "asc" or "desc" + + useEffect(() => { + const fetchExchanges = async () => { + try { + // Fetch first page of tickers from CoinGecko API + const response = await fetch( + `https://api.coingecko.com/api/v3/coins/${coinId}/tickers?depth=true` + ); + + if (response.ok) { + const data = await response.json(); + + // Extract unique exchanges from tickers, preserving API order + const exchangeMap = new Map(); + + data.tickers?.forEach((ticker) => { + if (ticker.market?.name && ticker.trade_url) { + // Only add if not already in map (preserves first occurrence) + if (!exchangeMap.has(ticker.market.name)) { + exchangeMap.set(ticker.market.name, { + name: ticker.market.name, + url: ticker.trade_url, + trustScore: ticker.trust_score || "N/A", + tradingPair: + ticker.base && ticker.target + ? `${ticker.base}/${ticker.target}` + : "N/A", + type: + ticker.market.identifier?.includes("uniswap") || + ticker.market.identifier?.includes("sushiswap") || + ticker.market.identifier?.includes("pancakeswap") || + ticker.market.name?.toLowerCase().includes("swap") || + ticker.market.name?.toLowerCase().includes("dex") + ? "DEX" + : "CEX", + }); + } + } + }); + + // Convert to array, preserving the order from the API + const exchangeList = Array.from(exchangeMap.values()); + + setExchanges(exchangeList); + } else { + throw new Error("Failed to fetch exchange data"); + } + } catch (err) { + setError("Failed to load exchange data"); + console.error("CoinGeckoExchanges error:", err); + } finally { + setLoading(false); + } + }; + + fetchExchanges(); + }, [coinId]); + + if (loading) { + return
Loading exchanges...
; + } + + if (error) { + return
Error: {error}
; + } + + if (exchanges.length === 0) { + return
No exchanges found for this coin.
; + } + + // Sort exchanges based on current sort settings + const sortedExchanges = sortBy + ? [...exchanges].sort((a, b) => { + let comparison = 0; + + if (sortBy === "type") { + comparison = a.type.localeCompare(b.type); + } else if (sortBy === "name") { + comparison = a.name.localeCompare(b.name); + } + + return sortOrder === "asc" ? comparison : -comparison; + }) + : exchanges; // If no sort selected, use original API order + + const handleSort = (column) => { + if (sortBy === column) { + // Toggle sort order if clicking the same column + setSortOrder(sortOrder === "asc" ? "desc" : "asc"); + } else { + // Set new column and default to ascending + setSortBy(column); + setSortOrder("asc"); + } + }; + + // Convert trust score to color + const getTrustScoreColor = (trustScore) => { + if (trustScore === "N/A" || trustScore === "yellow") return "#fbbf24"; // yellow + if (trustScore === "green") return "#22c55e"; // green + if (trustScore === "red") return "#ef4444"; // red + return "#fbbf24"; // default yellow + }; + + return ( +
+ + + + + + + + + + + + {sortedExchanges.map((exchange, index) => ( + + + + + + + + ))} + +
handleSort("name")} + title="Click to sort by name" + > + Exchange {sortBy === "name" && (sortOrder === "asc" ? "↑" : "↓")} + handleSort("type")} + title="Click to sort by type" + > + Type {sortBy === "type" && (sortOrder === "asc" ? "↑" : "↓")} + + Pair + + Trust + + Link +
+ {exchange.name} + + + {exchange.type} + + + {exchange.tradingPair} + + + + + Trade → + +
+
+ ); +}; diff --git a/snippets/components/custom-view-dropdown.jsx b/snippets/components/custom-view-dropdown.jsx new file mode 100644 index 00000000..c87a7120 --- /dev/null +++ b/snippets/components/custom-view-dropdown.jsx @@ -0,0 +1,211 @@ +"use client"; +import React, { useState } from "react"; + +export const CustomViewDropdown = ({ views, position = "inline" }) => { + const [selectedView, setSelectedView] = useState(0); + const [isOpen, setIsOpen] = useState(false); + + if (!views || views.length === 0) { + return null; + } + + return ( + <> + {/* Custom Dropdown - Position controlled by 'position' prop */} +
+ + + {/* Dropdown Menu */} + {isOpen && ( + <> +
setIsOpen(false)} + /> +
+ {views.map((view, index) => ( + + ))} +
+ + )} +
+ + {/* Content Display */} +
{views[selectedView]?.content}
+ + + + ); +}; diff --git a/snippets/components/divider.jsx b/snippets/components/divider.jsx new file mode 100644 index 00000000..f99cbe8a --- /dev/null +++ b/snippets/components/divider.jsx @@ -0,0 +1,79 @@ +const LivepeerIcon = ({ ...props }) => { + return ( + + ); +}; + +const LivepeerIconFlipped = ({ ...props }) => { + return ( + + + + ); +}; + +export const CustomDivider = ({ color, middleText }) => { + const dividerColor = "#e5e7eb"; + + return ( +
+ + + +
+ {middleText && ( + <> + + + {middleText} + + + + )} +
+ + + + + +
+ ); +}; diff --git a/snippets/components/embed.jsx b/snippets/components/embed.jsx new file mode 100644 index 00000000..228c49ce --- /dev/null +++ b/snippets/components/embed.jsx @@ -0,0 +1,23 @@ +export const MarkdownEmbed = ({ url }) => { + const [content, setContent] = useState(""); + + useEffect(() => { + fetch(url) + .then((res) => res.text()) + .then(setContent); + }, [url]); + + return {content}; +}; + +export const EmbedMarkdown = ({ url }) => { + const [content, setContent] = useState(""); + + useEffect(() => { + fetch(url) + .then((res) => res.text()) + .then(setContent); + }, [url]); + + return {content}; +}; diff --git a/snippets/components/embed.mdx b/snippets/components/embed.mdx new file mode 100644 index 00000000..d6b6eb76 --- /dev/null +++ b/snippets/components/embed.mdx @@ -0,0 +1,53 @@ +export const ExternalEmbed = ({ + children, + repoName, + repoUrl, + maxHeight = "1000px", +}) => ( +
+
+ + + {repoName} + + + View on GitHub + +
+
+ {children} +
+
+); diff --git a/snippets/components/external-content.jsx b/snippets/components/external-content.jsx new file mode 100644 index 00000000..e0f3cadf --- /dev/null +++ b/snippets/components/external-content.jsx @@ -0,0 +1,69 @@ +/** + * ExternalContent - A reusable component for displaying external GitHub content + * Usage: + * import { ExternalContent } from '/snippets/components/external-content.jsx' + * import MyContent from '/snippets/external/my-content.mdx' + * + * + * + */ + +export const ExternalContent = ({ + repoName, + githubUrl, + maxHeight = "1000px", + icon = "github", + children, +}) => { + return ( +
+
+ + + {repoName} + + + View on GitHub + +
+
+ {children} +
+
+ ); +}; diff --git a/snippets/components/gateways/callouts.jsx b/snippets/components/gateways/callouts.jsx new file mode 100644 index 00000000..c30e7def --- /dev/null +++ b/snippets/components/gateways/callouts.jsx @@ -0,0 +1,118 @@ +const GatewayOffChainWarning = () => { + return ( + + + You will need to{" "} + + run your own Orchestrator node + {" "} + to test an off-chain (local) Gateway: + +
    +
  • + See{" "} + {" "} + to test a local Gateway without a GPU. +
  • +
  • + See{" "} + {" "} + to setup and run an Orchestrator. +
  • +
+
+ ); +}; + +const GatewayOnChainWarning = () => { + return ( + + + You will need to{" "} + + fund an Ethereum wallet + {" "} + account on Arbitrum One to run an on-chain Gateway. +

See{" "} + + Fund Your Gateway{" "} + +
+
+ ); +}; + +const GatewayOnChainTTestnetNote = () => { + return ( + + While Livepeer contracts are deployed to the Arbitrum Testnet, there is + currently no freely available Orchestrator services on this chain. +
+ + If you would like to use the Aribtum Testnet for your Gateway, you will + need to run your own Orchestrator node. + +
+ There are conversations underway to enable this in the future. Follow & + contribute to the discussion on: + + {" "} + Discord + + + {" "} + Forum + +
+ ); +}; + +const OrchAddrNote = () => { + return ( + + Replace {""} with + your locally running orchestrator address
+
+ ); +}; + +const TestVideoDownload = ({ children }) => { + return ( + +
+ You need a video file called test-video.mp4 on your + machine!{" "} +
+ {children} +
+ ); +}; + +const FfmpegWarning = () => { + return ( + +
+ IMPORTANT! Do not install with sudo!
+ Livepeer uses a custom build of FFmpeg that must be + installed in a specific location. +
+
+ ); +}; + +export { + GatewayOffChainWarning, + GatewayOnChainWarning, + GatewayOnChainTTestnetNote, + OrchAddrNote, + TestVideoDownload, + FfmpegWarning, +}; diff --git a/snippets/components/gateways/quickstartTabs.jsx b/snippets/components/gateways/quickstartTabs.jsx new file mode 100644 index 00000000..a5e75af3 --- /dev/null +++ b/snippets/components/gateways/quickstartTabs.jsx @@ -0,0 +1,64 @@ +// THIS MUST BE IMPORTED IN THE PAGE YOU USE IT IN +// ALT: THE DATA MUST BE IN THE SAME PAGE. +// import { +// dockerOffChainQuickstart, +// dockerOnChainQuickstart, +// linuxOffChainQuickstart, +// linuxOnChainQuickstart, +// windowsOffChainQuickstart, +// windowsOnChainQuickstart, +// } from "/snippets/data/gateways.jsx"; + +export const QuickStartTabs = ({ offchainSteps, onchainSteps }) => { + return ( + + + + {offchainSteps} + + + + {onchainSteps} + + + ); +}; + +// THIS INHERITS IMPORTS FROM THE PAGE ITS CALLED IN. +// BUT WILL NOT USE IMPORTS FROM THIS PAGE (WTF MINTLIFY) +export const QuickStartSteps = ({ dataSource }) => { + // console.log("dataSource", dataSource); + // console.log("dockerOffChainQuickstart", dockerOffChainQuickstart); + const { installStep, configureStep, runStep, connectStep, testStep } = + dataSource; + // console.log("steps obj", installStep); + return ( + + {installStep} + {configureStep} + {runStep} + {connectStep} + {testStep} + + ); +}; + +/* + export const QuickstartSteps = ( + installStep, + configureStep, + runStep, + connectStep, + testStep + ) => { + return ( + + {installStep} + {configureStep} + {runStep} + {connectStep} + {testStep} + + ); + }; +*/ diff --git a/snippets/components/groupedItems/GroupedResponseField.jsx b/snippets/components/groupedItems/GroupedResponseField.jsx new file mode 100644 index 00000000..e69de29b diff --git a/snippets/components/icons.jsx b/snippets/components/icons.jsx new file mode 100644 index 00000000..3d0b97fc --- /dev/null +++ b/snippets/components/icons.jsx @@ -0,0 +1,32 @@ +export const LivepeerSVG = ({ size = 24, ...props }) => { + return ( + + + + ); +}; + +export const LivepeerIcon = ({ ...props }) => { + return ( + + ); +}; + +export const LivepeerIconFlipped = ({ ...props }) => { + return ( + + + + ); +}; diff --git a/snippets/components/image.jsx b/snippets/components/image.jsx new file mode 100644 index 00000000..ef7e5689 --- /dev/null +++ b/snippets/components/image.jsx @@ -0,0 +1,30 @@ +export const Image = ({ src, alt, caption, icon, hint, fullwidth = true }) => { + icon = icon ? icon : "arrow-turn-down-right"; + return ( + + {alt} + + ); +}; + +export const LinkImage = ({ src, alt, caption, icon, hint, href }) => { + icon = icon ? icon : "arrow-turn-down-right"; + return ( + + + {alt} + + + ); +}; + +// +// Livepeer Community GIF +// diff --git a/snippets/components/image.tsx b/snippets/components/image.tsx new file mode 100644 index 00000000..a593e98c --- /dev/null +++ b/snippets/components/image.tsx @@ -0,0 +1,31 @@ +export type imageProps = Record; +export const Image = ({ src, alt, caption, icon, hint, fullwidth = true }) => { + icon = icon ? icon : "arrow-turn-down-right"; + return ( + + {alt} + + ); +}; + +export const LinkImage = ({ src, alt, caption, icon, hint, href }) => { + icon = icon ? icon : "arrow-turn-down-right"; + return ( + + + {alt} + + + ); +}; + +// +// Livepeer Community GIF +// diff --git a/snippets/components/index.jsx b/snippets/components/index.jsx new file mode 100644 index 00000000..61024a71 --- /dev/null +++ b/snippets/components/index.jsx @@ -0,0 +1,4 @@ +export { + GatewayOffChainWarning, + GatewayOnChainWarning, +} from "./gateways/warnings.jsx"; diff --git a/snippets/components/layouts/ListSteps.jsx b/snippets/components/layouts/ListSteps.jsx new file mode 100644 index 00000000..7d502592 --- /dev/null +++ b/snippets/components/layouts/ListSteps.jsx @@ -0,0 +1,14 @@ +// listItems = array of objects with props and children props=any +// stepsConfig = object with props + +export const ListSteps = (listItems, stepsConfig) => { + return ( + + {listItems.map(({ title, icon, ...props }, idx) => ( + + {children} + + ))} + + ); +}; diff --git a/snippets/components/links.jsx b/snippets/components/links.jsx new file mode 100644 index 00000000..33e15df7 --- /dev/null +++ b/snippets/components/links.jsx @@ -0,0 +1,186 @@ +export const CustomCallout = ({ + children, + icon = "lightbulb", + color = "#2d9a67", + iconSize = 16, + textSize = "0.875rem", + textColor, +}) => { + // Default textColor to match the icon color if not specified + const resolvedTextColor = textColor || color; + // Convert hex to rgba for proper opacity + const hexToRgba = (hex, alpha) => { + const r = parseInt(hex.slice(1, 3), 16); + const g = parseInt(hex.slice(3, 5), 16); + const b = parseInt(hex.slice(5, 7), 16); + return `rgba(${r}, ${g}, ${b}, ${alpha})`; + }; + + return ( +
+
+ +
+
+ {children} +
+
+ ); +}; + +export const BlinkingIcon = ({ + icon = "terminal", + size = 16, + color = "#2d9a67", +}) => { + return ( + + + + + ); +}; + +// Alias for backwards compatibility +export const BlinkingTerminal = BlinkingIcon; + +export const DoubleIconLink = ({ + label = "", + href = "#", + text = "", + iconLeft = "github", + iconRight = "arrow-up-right", +}) => { + return ( + + {text && {text}} + + {label} + + + ); +}; + +export const GotoLink = ({ + label, + relativePath, + text = "", + icon = "arrow-turn-down-right", +}) => { + return ( + +

{text}

+ + + {label} + +
+ ); +}; + +export const GotoCard = ({ label, relativePath, icon, text, cta = "" }) => { + icon = icon ? icon : "arrow-turn-down-right"; + return ( + + {text} + + ); +}; + +export const TipWithArrow = ({ + children, + icon = "lightbulb", + arrowIcon = "arrow-up-right", + color = "#2d9a67", + iconSize = 16, + arrowSize = 16, +}) => { + // Convert hex to rgba for proper opacity + const hexToRgba = (hex, alpha) => { + const r = parseInt(hex.slice(1, 3), 16); + const g = parseInt(hex.slice(3, 5), 16); + const b = parseInt(hex.slice(5, 7), 16); + return `rgba(${r}, ${g}, ${b}, ${alpha})`; + }; + + return ( +
+
+ +
+
+ {children} +
+
+ +
+
+ ); +}; diff --git a/snippets/components/lists.jsx b/snippets/components/lists.jsx new file mode 100644 index 00000000..0571ad60 --- /dev/null +++ b/snippets/components/lists.jsx @@ -0,0 +1,62 @@ +// Lists +import { GotoLink } from "./links"; + +export const BasicList = ({ listItems: array }) => { + return <>; +}; + +export const IconList = ({ listItems: array }) => { + return <>; +}; + +export const StepList = ({ listItems }) => { + console.log("listItems", listItems); + return ( + + {listItems.map(({ title, icon, content }, idx) => ( + + {content} + + ))} + + ); +}; + +export const StepLinkList = ({ listItems }) => { + console.log("listItems", listItems); + return ( + + {listItems.map(({ title, icon, content, link }, idx) => ( + + + + ))} + + ); +}; + +export const UpdateList = ({ listItems: array }) => { + return ( + +
+ Learn what Livepeer is and how it can benefit you + [About Livepeer](../../01_about/about-home/) +
+
+ ); +}; + +export const UpdateLinkList = ({ listItems: array }) => { + return ( + <> + {array.map(({ title, icon, content, link }, idx) => ( + +
+ {content} + +
+
+ ))} + + ); +}; diff --git a/snippets/components/release.jsx b/snippets/components/release.jsx new file mode 100644 index 00000000..b349d4d9 --- /dev/null +++ b/snippets/components/release.jsx @@ -0,0 +1,66 @@ +import { useState, useEffect } from "react"; + +/** + * LatestRelease - Fetches and displays the latest release version from GitHub + * Usage: + * import { LatestRelease, LatestReleaseUrl } from '/snippets/components/release.jsx' + * + * Latest version: + * Download here + */ + +// TO REMOVE BAD BAD BAD +export const LatestRelease = ({ + repo = "livepeer/go-livepeer", + fallback = "latest", +}) => { + const [version, setVersion] = useState(fallback); + const [loading, setLoading] = useState(true); + + useEffect(() => { + fetch(`https://api.github.com/repos/${repo}/releases/latest`) + .then((res) => res.json()) + .then((data) => { + if (data.tag_name) { + setVersion(data.tag_name); + } + setLoading(false); + }) + .catch(() => { + setLoading(false); + }); + }, [repo]); + + return version; +}; + +export const LatestReleaseUrl = ({ + repo = "livepeer/go-livepeer", + asset = "livepeer-linux-amd64.tar.gz", + children, +}) => { + const [url, setUrl] = useState(`https://github.com/${repo}/releases/latest`); + + useEffect(() => { + fetch(`https://api.github.com/repos/${repo}/releases/latest`) + .then((res) => res.json()) + .then((data) => { + if (data.tag_name) { + setUrl( + `https://github.com/${repo}/releases/download/${data.tag_name}/${asset}`, + ); + } + }) + .catch(() => {}); + }, [repo, asset]); + + return ( + + {children || url} + + ); +}; + +export const LatestVersion = (version) => { + return <>{version}; +}; diff --git a/snippets/components/responseField.jsx b/snippets/components/responseField.jsx new file mode 100644 index 00000000..977d108b --- /dev/null +++ b/snippets/components/responseField.jsx @@ -0,0 +1,114 @@ +/** + * CustomResponseField - ResponseField wrapper that hides the bottom divider + * + * Usage: + * + * Description text + * + * + * Props: + * - All ResponseField props are supported (name, type, default, required, post, etc.) + */ + +const ValueResponseField = ({ description, post = null, ...props }) => { + const uniqueId = `custom-rf-${Math.random().toString(36).substring(2, 11)}`; + + const value = post + ? [ + + value: + {post[0]} + , + ] + : null; + + return ( +
+ + {typeof description === "function" ? description() : description} + +
+ ); +}; + +const expandableCode = () => { + return ( + + + Description + + + ); +}; + +const CustomResponseField = ({ description, ...props }) => { + const uniqueId = `custom-rf-${Math.random().toString(36).substring(2, 11)}`; + + return ( +
+ + {description} +
+ ); +}; + +const ResponseFieldExpandable = ({ fields = {}, ...props }) => { + const fieldsArray = Array.isArray(fields) ? fields : Object.values(fields); + // console.log("fieldsArray", fieldsArray); + return ( + + {fieldsArray.map((field, index) => ( + + ))} + + ); +}; + +const ResponseFieldAccordion = ({ fields = {}, ...props }) => { + console.log("fields", fields); + const fieldsArray = Array.isArray(fields) ? fields : Object.values(fields); + return ( + + {fieldsArray.map((field, index) => ( + + ))} + + ); +}; + +// Not Working. +const ResponseFieldGroup = ({ + component = "accordion", + fields = {}, + ...props +}) => { + console.log("fields", fields); + const fieldsArray = Array.isArray(fields) ? fields : Object.values(fields); + const componentMap = { + expandable: Expandable, + accordion: Accordion, + }; + console.log("fieldsArray", fieldsArray); + const Component = componentMap[component]; + console.log("Component", Component.typeOf, Component); + return ( + + {fieldsArray.map((field, index) => ( + + ))} + + ); +}; + +export { + ValueResponseField, + CustomResponseField, + ResponseFieldExpandable, + ResponseFieldAccordion, + ResponseFieldGroup, +}; diff --git a/snippets/components/steps.jsx b/snippets/components/steps.jsx new file mode 100644 index 00000000..bf86f7b3 --- /dev/null +++ b/snippets/components/steps.jsx @@ -0,0 +1,38 @@ +// Custom Steps component with styling support + +export const StyledSteps = ({ + children, + iconColor = "#18794e", + titleColor = "#2b9a66", + lineColor = "#2b9a66", + iconSize = "24px", +}) => { + const stepsId = `styled-steps-${Math.random().toString(36).substr(2, 9)}`; + + return ( + <> + +
+ {children} +
+ + ); +}; + +export const StyledStep = ({ title, icon, titleSize = "h3", children }) => { + return ( + + {children} + + ); +}; diff --git a/snippets/components/stuff.js b/snippets/components/stuff.js new file mode 100644 index 00000000..918dc856 --- /dev/null +++ b/snippets/components/stuff.js @@ -0,0 +1,9 @@ +// export const embedUrl = url.replace('watch?v=', 'embed/') + +// import { Video } from '/snippets/video.jsx' +//