diff --git a/.github/pr-screenshots/version-component/lsu-version-devnet.png b/.github/pr-screenshots/version-component/lsu-version-devnet.png new file mode 100644 index 00000000..c42ca23f Binary files /dev/null and b/.github/pr-screenshots/version-component/lsu-version-devnet.png differ diff --git a/.github/pr-screenshots/version-component/lsu-version-mainnet-testnet.png b/.github/pr-screenshots/version-component/lsu-version-mainnet-testnet.png new file mode 100644 index 00000000..177c569a Binary files /dev/null and b/.github/pr-screenshots/version-component/lsu-version-mainnet-testnet.png differ diff --git a/config/repo-version-config.json b/config/repo-version-config.json index f57656cf..bc340474 100644 --- a/config/repo-version-config.json +++ b/config/repo-version-config.json @@ -1,189 +1,290 @@ { - "versions": { + "versions": { + "mainnet": { + "name": "MainNet", + "advanced": { + "minProtocolVersion": "6", + "migrationId": "4", + "darVersions": [ + { + "name": "splice-amulet", + "version": "0.1.14" + }, + { + "name": "splice-wallet", + "version": "0.1.14" + }, + { + "name": "splice-dso-governance", + "version": "0.1.20" + } + ], + "releaseUrl": "https://github.com/canton-network/splice/releases/tag/0.5.18" + }, + "endpoint": "scan.sv-1.global.canton.network.sync.global", + "substitutions": { + "splice_cluster": "main", + "da_hostname": "global.canton.network.digitalasset.com", + "gsf_sv_url": "https://sv.sv-1.global.canton.network.sync.global", + "generic_sv_url": "https://sv.sv-1.global.canton.network.YOUR_SV_SPONSOR", + "gsf_scan_url": "https://scan.sv-1.global.canton.network.sync.global", + "generic_scan_url": "https://scan.sv-1.global.canton.network.YOUR_SV_SPONSOR", + "gsf_sequencer_url": "https://sequencer-MIGRATION_ID.sv-1.global.canton.network.sync.global", + "version": "0.5.18", + "version_literal": "0.5.18", + "chart_version_literal": "0.5.18", + "chart_version_set": "export CHART_VERSION=0.5.18", + "image_tag_set": "export IMAGE_TAG=0.5.18", + "image_tag_set_plain": "export IMAGE_TAG=0.5.18", + "bundle_download_link": { + "label": "Download Bundle", + "href": "https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.5.18/0.5.18_splice-node.tar.gz" + }, + "openapi_download_link": { + "label": "Download OpenAPI specs", + "href": "https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.5.18/0.5.18_openapi.tar.gz" + }, + "helm_repo_prefix": "oci://ghcr.io/digital-asset/decentralized-canton-sync/helm", + "docker_repo_prefix": "ghcr.io/digital-asset/decentralized-canton-sync/docker" + } + }, + "testnet": { + "name": "TestNet", + "advanced": { + "minProtocolVersion": "6", + "migrationId": "1", + "darVersions": [ + { + "name": "splice-amulet", + "version": "0.1.15" + }, + { + "name": "splice-wallet", + "version": "0.1.15" + }, + { + "name": "splice-dso-governance", + "version": "0.1.21" + } + ], + "releaseUrl": "https://github.com/canton-network/splice/releases/tag/0.5.18" + }, + "endpoint": "scan.sv-1.test.global.canton.network.sync.global", + "substitutions": { + "splice_cluster": "test", + "da_hostname": "test.global.canton.network.digitalasset.com", + "gsf_sv_url": "https://sv.sv-1.test.global.canton.network.sync.global", + "generic_sv_url": "https://sv.sv-1.test.global.canton.network.YOUR_SV_SPONSOR", + "gsf_scan_url": "https://scan.sv-1.test.global.canton.network.sync.global", + "generic_scan_url": "https://scan.sv-1.test.global.canton.network.YOUR_SV_SPONSOR", + "gsf_sequencer_url": "https://sequencer-MIGRATION_ID.sv-1.test.global.canton.network.sync.global", + "version": "0.5.18", + "version_literal": "0.5.18", + "chart_version_literal": "0.5.18", + "chart_version_set": "export CHART_VERSION=0.5.18", + "image_tag_set": "export IMAGE_TAG=0.5.18", + "image_tag_set_plain": "export IMAGE_TAG=0.5.18", + "bundle_download_link": { + "label": "Download Bundle", + "href": "https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.5.18/0.5.18_splice-node.tar.gz" + }, + "openapi_download_link": { + "label": "Download OpenAPI specs", + "href": "https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.5.18/0.5.18_openapi.tar.gz" + }, + "helm_repo_prefix": "oci://ghcr.io/digital-asset/decentralized-canton-sync/helm", + "docker_repo_prefix": "ghcr.io/digital-asset/decentralized-canton-sync/docker" + } + }, + "devnet": { + "name": "DevNet", + "advanced": { + "minProtocolVersion": "6", + "migrationId": "1", + "darVersions": [ + { + "name": "splice-amulet", + "version": "0.1.15" + }, + { + "name": "splice-wallet", + "version": "0.1.15" + }, + { + "name": "splice-dso-governance", + "version": "0.1.21" + } + ], + "releaseUrl": "https://github.com/canton-network/splice/releases/tag/0.6.3" + }, + "endpoint": "scan.sv-1.dev.global.canton.network.sync.global", + "substitutions": { + "splice_cluster": "dev", + "da_hostname": "dev.global.canton.network.digitalasset.com", + "gsf_sv_url": "https://sv.sv-1.dev.global.canton.network.sync.global", + "generic_sv_url": "https://sv.sv-1.dev.global.canton.network.YOUR_SV_SPONSOR", + "gsf_scan_url": "https://scan.sv-1.dev.global.canton.network.sync.global", + "generic_scan_url": "https://scan.sv-1.dev.global.canton.network.YOUR_SV_SPONSOR", + "gsf_sequencer_url": "https://sequencer-MIGRATION_ID.sv-1.dev.global.canton.network.sync.global", + "version": "0.6.3", + "version_literal": "0.6.3", + "chart_version_literal": "0.6.3", + "chart_version_set": "export CHART_VERSION=0.6.3", + "image_tag_set": "export IMAGE_TAG=0.6.3", + "image_tag_set_plain": "export IMAGE_TAG=0.6.3", + "bundle_download_link": { + "label": "Download Bundle", + "href": "https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz" + }, + "openapi_download_link": { + "label": "Download OpenAPI specs", + "href": "https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_openapi.tar.gz" + }, + "helm_repo_prefix": "oci://ghcr.io/digital-asset/decentralized-canton-sync/helm", + "docker_repo_prefix": "ghcr.io/digital-asset/decentralized-canton-sync/docker" + } + } + }, + "repositories": { + "splice": { + "url": "", + "versionMapping": { "mainnet": { - "name": "MainNet", - "advanced": { - "minProtocolVersion": "6", - "migrationId": "2", - "darVersions": [ - { "name": "splice-amulet", "version": "0.1.14" }, - { "name": "splice-wallet", "version": "0.1.14" }, - { "name": "splice-dso-governance", "version": "0.1.20" } - ], - "releaseUrl": "https://github.com/hyperledger-labs/splice/releases/tag/v0.5.10" - }, - "endpoint": "scan.sv-1.global.canton.network.sync.global" + "branch": "main", + "externalVersion": "0.5.18", + "folderPathRepo": "splice-wallet-kernel" + }, + "devnet": { + "branch": "main", + "externalVersion": "0.6.3", + "folderPathRepo": "splice-wallet-kernel" }, "testnet": { - "name": "TestNet", - "advanced": { - "minProtocolVersion": "6", - "migrationId": "3", - "darVersions": [ - { "name": "splice-amulet", "version": "0.1.15" }, - { "name": "splice-wallet", "version": "0.1.15" }, - { "name": "splice-dso-governance", "version": "0.1.21" } - ], - "releaseUrl": "https://github.com/hyperledger-labs/splice/releases/tag/v0.5.11" - }, - "endpoint": "scan.sv-1.test.global.canton.network.sync.global" - }, - "devnet": - { - "name": "DevNet", - "advanced": { - "minProtocolVersion": "6", - "migrationId": "4", - "darVersions": [ - { "name": "splice-amulet", "version": "0.1.15" }, - { "name": "splice-wallet", "version": "0.1.15" }, - { "name": "splice-dso-governance", "version": "0.1.21" } - ], - "releaseUrl": "https://github.com/hyperledger-labs/splice/releases/tag/v0.5.12" - }, - "endpoint": "scan.sv-1.dev.global.canton.network.sync.global" - } + "branch": "main", + "externalVersion": "0.5.18", + "folderPathRepo": "splice-wallet-kernel" + } + } + }, + "damlSdk": { + "url": "", + "versionMapping": { + "mainnet": { + "branch": "", + "externalVersion": "3.4.11", + "folderPathRepo": "" + }, + "devnet": { + "branch": "", + "externalVersion": "3.4.11", + "folderPathRepo": "" + }, + "testnet": { + "branch": "", + "externalVersion": "3.4.11", + "folderPathRepo": "" + } + } + }, + "pqs": { + "url": "", + "versionMapping": { + "mainnet": { + "branch": "", + "externalVersion": "3.4.1", + "folderPathRepo": "" + }, + "devnet": { + "branch": "", + "externalVersion": "3.4.1", + "folderPathRepo": "" + }, + "testnet": { + "branch": "", + "externalVersion": "3.4.1", + "folderPathRepo": "" + } + } }, - "repositories": { - "splice": { - "url": "", - "versionMapping": { - "mainnet": { - "branch": "main", - "externalVersion": "0.5.10", - "folderPathRepo": "splice-wallet-kernel" - }, - "devnet": { - "branch": "main", - "externalVersion": "0.5.12", - "folderPathRepo": "splice-wallet-kernel" - }, - "testnet": { - "branch": "main", - "externalVersion": "0.5.11", - "folderPathRepo": "splice-wallet-kernel" - } - } - }, - "damlSdk": { - "url": "", - "versionMapping": { - "mainnet": { - "branch": "", - "externalVersion": "3.4.11", - "folderPathRepo": "" - }, - "devnet": { - "branch": "", - "externalVersion": "3.4.11", - "folderPathRepo": "" - }, - "testnet": { - "branch": "", - "externalVersion": "3.4.11", - "folderPathRepo": "" - } - } - }, - "pqs": { - "url": "", - "versionMapping": { - "mainnet": { - "branch": "", - "externalVersion": "3.4.1", - "folderPathRepo": "" - }, - "devnet": { - "branch": "", - "externalVersion": "3.4.1", - "folderPathRepo": "" - }, - "testnet": { - "branch": "", - "externalVersion": "3.4.1", - "folderPathRepo": "" - } - } - }, - "tokenStandard": { - "url": "", - "versionMapping": { - "mainnet": { - "branch": "", - "externalVersion": "1.0.0", - "folderPathRepo": "" - }, - "devnet": { - "branch": "", - "externalVersion": "1.0.0", - "folderPathRepo": "" - }, - "testnet": { - "branch": "", - "externalVersion": "1.0.0", - "folderPathRepo": "" - } - } - }, - "walletSdk": { - "url": "", - "versionMapping": { - "mainnet": { - "branch": "", - "externalVersion": "1.1.0", - "folderPathRepo": "" - }, - "devnet": { - "branch": "", - "externalVersion": "1.1.0", - "folderPathRepo": "" - }, - "testnet": { - "branch": "", - "externalVersion": "1.1.0", - "folderPathRepo": "" - } - } - }, - "dappSdk": { - "url": "", - "versionMapping": { - "mainnet": { - "branch": "", - "externalVersion": "1.1.0", - "folderPathRepo": "" - }, - "devnet": { - "branch": "", - "externalVersion": "1.1.0", - "folderPathRepo": "" - }, - "testnet": { - "branch": "", - "externalVersion": "1.1.0", - "folderPathRepo": "" - } - } - }, - "walletGateway": { - "url": "", - "versionMapping": { - "mainnet": { - "branch": "", - "externalVersion": "1.2.0", - "folderPathRepo": "" - }, - "devnet": { - "branch": "", - "externalVersion": "1.2.0", - "folderPathRepo": "" - }, - "testnet": { - "branch": "", - "externalVersion": "1.2.0", - "folderPathRepo": "" - } - } + "tokenStandard": { + "url": "", + "versionMapping": { + "mainnet": { + "branch": "", + "externalVersion": "1.0.0", + "folderPathRepo": "" + }, + "devnet": { + "branch": "", + "externalVersion": "1.0.0", + "folderPathRepo": "" + }, + "testnet": { + "branch": "", + "externalVersion": "1.0.0", + "folderPathRepo": "" + } + } + }, + "walletSdk": { + "url": "", + "versionMapping": { + "mainnet": { + "branch": "", + "externalVersion": "1.1.0", + "folderPathRepo": "" + }, + "devnet": { + "branch": "", + "externalVersion": "1.1.0", + "folderPathRepo": "" + }, + "testnet": { + "branch": "", + "externalVersion": "1.1.0", + "folderPathRepo": "" + } + } + }, + "dappSdk": { + "url": "", + "versionMapping": { + "mainnet": { + "branch": "", + "externalVersion": "1.1.0", + "folderPathRepo": "" + }, + "devnet": { + "branch": "", + "externalVersion": "1.1.0", + "folderPathRepo": "" + }, + "testnet": { + "branch": "", + "externalVersion": "1.1.0", + "folderPathRepo": "" + } + } + }, + "walletGateway": { + "url": "", + "versionMapping": { + "mainnet": { + "branch": "", + "externalVersion": "1.2.0", + "folderPathRepo": "" + }, + "devnet": { + "branch": "", + "externalVersion": "1.2.0", + "folderPathRepo": "" + }, + "testnet": { + "branch": "", + "externalVersion": "1.2.0", + "folderPathRepo": "" } + } } + } } diff --git a/docs-main/appdev/deep-dives/token-standard.mdx b/docs-main/appdev/deep-dives/token-standard.mdx index ec2c2c81..e684ca14 100644 --- a/docs-main/appdev/deep-dives/token-standard.mdx +++ b/docs-main/appdev/deep-dives/token-standard.mdx @@ -3,6 +3,9 @@ title: "Token Standard" description: "The Canton Network Token Standard APIs and Daml interfaces" --- +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/app_dev/token_standard/index.rst" hash="f20ad600" */} @@ -38,13 +41,17 @@ We recommend wallet providers to implement a UTXO management strategy that: ### Setting up MergeDelegations + + Assuming you are a wallet provider that runs a validator node for your users, you can set up `MergeDelegation` contracts for your users as follows. -1. Extract the latest version of the `splice-util-token-standard-wallet.dar` file from the release bundle ([Download Bundle](https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz)). +1. Extract the latest version of the `splice-util-token-standard-wallet.dar` file from the release bundle (|bundle_download_link|). 2. Upload the extracted `.dar` file to your validator node. 3. Adjust your user onboarding procedure such that the users signs the creation of a `MergeDelegationProposal` contract (see docs). 4. Accept the `MergeDelegationProposal` contracts by exercising their `Accept` choice using your wallet provider's party. + + ### Using MergeDelegations We recommend to use the `MergeDelegation` contracts in a batched fashion as follows. diff --git a/docs-main/appdev/modules/m7-canton-coin-preapprovals.mdx b/docs-main/appdev/modules/m7-canton-coin-preapprovals.mdx index bf9b4607..85e06045 100644 --- a/docs-main/appdev/modules/m7-canton-coin-preapprovals.mdx +++ b/docs-main/appdev/modules/m7-canton-coin-preapprovals.mdx @@ -15,7 +15,13 @@ Contrary to other assets like Eth or Bitcoin, Canton Coin requires a party to ex Parties that are ok with accepting incoming Canton Coin transfers from any sender, can setup a `TransferPreapproval`. This allows any party to send Canton Coin to the party that setup the `TransferPreapproval`. Note that this only applies to transfers of Canton Coin but not to other assets. Other assets may provide their own variant of a preapproval which needs to be setup separately or they may require approval of each incoming transfer individually. -To ensure that the super validators don't have to store and serve `TransferPreapprovals` contracts for parties that are no longer active or malicious parties cannot spam them, a preapproval has a limited lifetime until it expires and a fee must be burned proportional to the lifetime when creating the preapproval. The fee is controlled by the super validators through the `transferPreapprovalFee` parameter. The current value can be observed in CC Scan at [https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/dso](https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/dso) and defaults to \$1/year. +To ensure that the super validators don't have to store and serve `TransferPreapprovals` contracts for parties that are no longer active or malicious parties cannot spam them, a preapproval has a limited lifetime until it expires and a fee must be burned proportional to the lifetime when creating the preapproval. The fee is controlled by the super validators through the `transferPreapprovalFee` parameter. The current value can be observed in CC Scan; select the right network: + +- DevNet: [https://scan.sv-1.dev.global.canton.network.sync.global/dso](https://scan.sv-1.dev.global.canton.network.sync.global/dso) +- TestNet: [https://scan.sv-1.test.global.canton.network.sync.global/dso](https://scan.sv-1.test.global.canton.network.sync.global/dso) +- MainNet: [https://scan.sv-1.global.canton.network.sync.global/dso](https://scan.sv-1.global.canton.network.sync.global/dso) + +The current value defaults to \$1/year. Each preapproval has two parties: The `receiver` party that approves incoming transfers and the `provider` party. The provider party is responsible for paying the fee and renewing the preapproval when it gets close to its expiry date. In return, the `provider` party will be the app provider on all incoming transfers that use this preapproval and get the app rewards for it. The `provider` party must not necessarily be hosted on the same node as the `receiver` party although that is the most common setup in practice. @@ -54,4 +60,4 @@ If you are working through APIs instead, in particular for external parties, the Lastly, the legacy external signing APIs for non-standard Canton Coin transfers on the validator `/v0/admin/external-party/transfer-preapproval/prepare-send` and `/v0/admin/external-party/transfer-preapproval/submit-send` can also be used. Refer to the API docs for details. -{/* COPIED_END */} \ No newline at end of file +{/* COPIED_END */} diff --git a/docs-main/global-synchronizer/canton-console/console-overview.mdx b/docs-main/global-synchronizer/canton-console/console-overview.mdx index b6bf253f..eba54150 100644 --- a/docs-main/global-synchronizer/canton-console/console-overview.mdx +++ b/docs-main/global-synchronizer/canton-console/console-overview.mdx @@ -7,6 +7,9 @@ import ExternalSpliceMainSpliceRstCodeDocsSrcDeploymentConsoleAccessNone40 from import ExternalSpliceMainSpliceRstCodeDocsSrcDeploymentConsoleAccessBash166 from "/snippets/external/splice/main/splice-rst-code-docs-src-deployment-console-access-bash-166.mdx"; import ExternalSpliceMainSpliceRstCodeDocsSrcDeploymentConsoleAccessBash174 from "/snippets/external/splice/main/splice-rst-code-docs-src-deployment-console-access-bash-174.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/deployment/console_access.rst" hash="bccf3653" */} @@ -46,53 +49,53 @@ Welcome to Canton! 4. Run the docker command - > ```bash - > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf /canton:0.6.3 --console - > ``` - > - -> - >
- > - > Important - > - > - - > - > If you run the participant using the docker compose setup the docker command must be run with the docker network used by the participant. Adjust the configuration to connect to the participant container: - > - > ``` - > canton { - > remote-participants { - > participant { - > admin-api { - > port = 5002 - > address = participant - > } - > ledger-api { - > port = 5001 - > address = participant - > } - > token = "" - > } - > } - > features.enable-preview-commands = yes - > features.enable-testing-commands = yes - > features.enable-repair-commands = yes - > } - > ``` - > - > Running docker with the default network (`splice-validator`): - > - > ```bash - > ``` - > - > docker run -it --rm --network splice-validator -v \$(pwd)/console.conf:/app/app.conf /canton:0.6.3 --console - > - >
+ + +```bash +docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf |docker_repo_prefix|/canton:|version_literal| --console +``` + + + + +If you run the participant using the docker compose setup the docker command must be run with the docker network used by the participant. Adjust the configuration to connect to the participant container: + +```text +canton { + remote-participants { + participant { + admin-api { + port = 5002 + address = participant + } + ledger-api { + port = 5001 + address = participant + } + token = "" + } + } + features.enable-preview-commands = yes + features.enable-testing-commands = yes + features.enable-repair-commands = yes +} +``` + +Running docker with the default network (`splice-validator`): + + + +```bash +docker run -it --rm --network splice-validator -v \$(pwd)/console.conf:/app/app.conf |docker_repo_prefix|/canton:|version_literal| --console +``` + + + ## Sequencer console + + 1. Ensure you can access the sequencer's ports 5008 and 5009 2. Add the configuration to a local file `console.conf` @@ -120,11 +123,15 @@ Welcome to Canton! 3. Run the docker command > ```bash - > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf /canton:0.6.3 --console + > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf |docker_repo_prefix|/canton:|version_literal| --console > ``` + + ## Mediator console + + 1. Ensure you can access the mediator's port 5007 2. Add the configuration to a local file `console.conf` @@ -148,9 +155,11 @@ Welcome to Canton! 3. Run the docker command > ```bash - > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf /canton:0.6.3 --console + > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf |docker_repo_prefix|/canton:|version_literal| --console > ``` + + ## Access in a K8s cluster In a K8s cluster you can use a debug pod to access the console directly from the cluster. diff --git a/docs-main/global-synchronizer/deployment/kubernetes-deployment.mdx b/docs-main/global-synchronizer/deployment/kubernetes-deployment.mdx index de00bbdb..352851b7 100644 --- a/docs-main/global-synchronizer/deployment/kubernetes-deployment.mdx +++ b/docs-main/global-synchronizer/deployment/kubernetes-deployment.mdx @@ -24,6 +24,9 @@ import ExternalSpliceMainSpliceRstCodeDocsSrcSvOperatorSvHelmYaml914 from "/snip import ExternalSpliceMainSpliceRstCodeDocsSrcSvOperatorSvHelmBash978 from "/snippets/external/splice/main/splice-rst-code-docs-src-sv-operator-sv-helm-bash-978.mdx"; import ExternalSpliceMainSpliceRstCodeDocsSrcSvOperatorSvHelmNone1145 from "/snippets/external/splice/main/splice-rst-code-docs-src-sv-operator-sv-helm-none-1145.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/sv_operator/sv_helm.rst" hash="37dc325e" */} @@ -36,6 +39,8 @@ This section describes deploying a Super Validator (SV) node in kubernetes using ## Requirements + + 1) A running Kubernetes cluster in which you have administrator access to create and manage namespaces. 2) A development workstation with the following: @@ -45,10 +50,10 @@ This section describes deploying a Super Validator (SV) node in kubernetes using 3) Your cluster needs a static egress IP. After acquiring that, propose to the other SVs to add it to the IP allowlist. -4) Please download the release artifacts containing the sample Helm value files, from here: [Download Bundle](https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz), and extract the bundle: +4) Please download the release artifacts containing the sample Helm value files, from here: |bundle_download_link|, and extract the bundle: ```bash -tar xzvf 0.6.3_splice-node.tar.gz +tar xzvf |version|_splice-node.tar.gz ``` 5) Please inquire the migration id and serial id of the global synchronizer on your target network. The migration ID is frozen at the value after the last major upgrade and is only used for `migration.id` in the helm chart values. The serial ID is 0 for the initial synchronizer deployment and is incremented by 1 for each logical synchronizer upgrade. The serial ID is used for helm release names, DNS entries, database names, and deployment naming. @@ -58,6 +63,8 @@ export MIGRATION_ID=0 export SERIAL_ID=0 ``` + + ## Generating an SV identity SV operators are identified by a human-readable name and an EC public key. This identification is stable across deployments of the Global Synchronizer. You are, for example, expected to reuse your SV name and public key between (test-)network resets. @@ -245,7 +252,9 @@ Every SV node also deploys a CometBFT node. This node must be configured to join ### Generating your CometBFT node keys -To generate the node config you use the CometBFT docker image provided through Github Container Registry (). + + +To generate the node config you use the CometBFT docker image provided through Github Container Registry (|docker_repo_prefix|). Use the following shell commands to generate the proper keys: @@ -254,9 +263,9 @@ Use the following shell commands to generate the proper keys: mkdir cometbft cd cometbft # Init the node -docker run --rm -v "$(pwd):/init" /cometbft:0.6.3 init --home /init +docker run --rm -v "$(pwd):/init" |docker_repo_prefix|/cometbft:|version| init --home /init # Read the node id and keep a note of it for the deployment -docker run --rm -v "$(pwd):/init" /cometbft:0.6.3 show-node-id --home /init +docker run --rm -v "$(pwd):/init" |docker_repo_prefix|/cometbft:|version| show-node-id --home /init ``` Please keep a note of the node ID printed out above. @@ -268,6 +277,8 @@ In addition, please retain some of the configuration files generated, as follows Any other files can be ignored. + + ### Configuring your CometBFT node keys The CometBFT node is configured with a secret, based on the output from Generating the CometBFT node identity The secret is created as follows, with the `node_key.json` and `priv_validator_key.json` files representing the files generated as part of the node identity: @@ -353,15 +364,19 @@ All apps support reading the Postgres password from a Kubernetes secret. Current ### Postgres in the Cluster + + If you wish to run the Postgres instances as pods in your cluster, you can use the `splice-postgres` Helm chart to install them: ```bash -helm install sequencer-pg /splice-postgres -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-sequencer.yaml --wait -helm install mediator-pg /splice-postgres -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-mediator.yaml --wait -helm install participant-pg /splice-postgres -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-participant.yaml --wait -helm install apps-pg /splice-postgres -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-apps.yaml --wait +helm install sequencer-pg |helm_repo_prefix|/splice-postgres -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-sequencer.yaml --wait +helm install mediator-pg |helm_repo_prefix|/splice-postgres -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-mediator.yaml --wait +helm install participant-pg |helm_repo_prefix|/splice-postgres -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-participant.yaml --wait +helm install apps-pg |helm_repo_prefix|/splice-postgres -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-apps.yaml --wait ``` + + ### Cloud-Hosted Postgres If you wish to use cloud-hosted Postgres instances, please configure and initialize each of them as follows: @@ -376,16 +391,18 @@ Note that the default Helm values files used below assume that the Postgres inst ### Configuring the Helm Charts + + To install the Helm charts needed to start an SV node connected to the cluster, you will need to meet a few preconditions. The first is that there needs to be an environment variable defined to refer to the version of the Helm charts necessary to connect to this environment: ```bash -export CHART_VERSION=0.6.3 +|chart_version_set| ``` An SV node includes a CometBFT node so you also need to configure that. Please modify the file `splice-node/examples/sv-helm/cometbft-values.yaml` as follows: -- Replace all instances of `TARGET_CLUSTER` with unknown_cluster, per the cluster to which you are connecting. -- Replace all instances of `TARGET_HOSTNAME` with unknown_cluster.global.canton.network.digitalasset.com, per the cluster to which you are connecting. +- Replace all instances of `TARGET_CLUSTER` with |splice_cluster|, per the cluster to which you are connecting. +- Replace all instances of `TARGET_HOSTNAME` with |da_hostname|, per the cluster to which you are connecting. - Replace all instances of `SERIAL_ID` with the serial ID of the global synchronizer on your target cluster. Note that `SERIAL_ID` is also used within port numbers in URLs here! - Replace `YOUR_SV_NAME` with the name you chose when creating the SV identity (this must be an exact match of the string for your SV to be approved to onboard) - Replace `YOUR_COMETBFT_NODE_ID` with the id obtained when generating the config for the CometBFT node @@ -428,7 +445,7 @@ An SV node includes a validator app so you also need to configure that. Please m Additionally, please modify the file `splice-node/examples/sv-helm/sv-validator-values.yaml` as follows: -- Replace all instances of `TARGET_HOSTNAME` with unknown_cluster.global.canton.network.digitalasset.com, per the cluster to which you are connecting. +- Replace all instances of `TARGET_HOSTNAME` with |da_hostname|, per the cluster to which you are connecting. - Replace all instances of `MIGRATION_ID` with the migration ID of the global synchronizer on your target cluster. - Replace all instances of `SERIAL_ID` with the serial ID of the global synchronizer on your target cluster. @@ -438,7 +455,7 @@ The private and public key for your SV are defined in a K8s secret. If you haven For configuring your sv app, please modify the file `splice-node/examples/sv-helm/sv-values.yaml` as follows: -- Replace all instances of `TARGET_HOSTNAME` with unknown_cluster.global.canton.network.digitalasset.com, per the cluster to which you are connecting. +- Replace all instances of `TARGET_HOSTNAME` with |da_hostname|, per the cluster to which you are connecting. - Replace all instances of `MIGRATION_ID` with the migration ID of the global synchronizer on your target cluster. - Replace all instances of `SERIAL_ID` with the serial ID of the global synchronizer on your target cluster. - If you want to configure the audience for the SV app backend API, replace `OIDC_AUTHORITY_SV_AUDIENCE` in the `auth.audience` entry with audience for the SV app backend API. e.g. `https://sv.example.com/api`. @@ -465,11 +482,11 @@ migration: Please modify the file `splice-node/examples/sv-helm/info-values.yaml` as follows: -- Replace `TARGET_CLUSTER` with unknown_cluster -- Replace `MD5_HASH_OF_ALLOWED_IP_RANGES` with the MD5 hash of the `allowed-ip-ranges.json` file corresponding to the unknown_cluster network. -- Replace `MD5_HASH_OF_APPROVED_SV_IDENTITIES` with the MD5 hash of the `approved-sv-id-values.yaml` file corresponding to the unknown_cluster network. +- Replace `TARGET_CLUSTER` with |splice_cluster| +- Replace `MD5_HASH_OF_ALLOWED_IP_RANGES` with the MD5 hash of the `allowed-ip-ranges.json` file corresponding to the |splice_cluster| network. +- Replace `MD5_HASH_OF_APPROVED_SV_IDENTITIES` with the MD5 hash of the `approved-sv-id-values.yaml` file corresponding to the |splice_cluster| network. - Replace `MIGRATION_ID` with the migration ID of the global synchronizer on your target cluster. -- Replace all instances of `CHAIN_ID_SUFFIX` with the chain ID suffix of the unknown_cluster network. +- Replace all instances of `CHAIN_ID_SUFFIX` with the chain ID suffix of the |splice_cluster| network. - Uncomment `staging` synchronizer and `legacy` synchronizer sections if you are using them. - Replace `STAGING_SYNCHRONIZER_MIGRATION_ID` with the migration ID of the staging synchronizer on your target cluster. - Replace `STAGING_SYNCHRONIZER_VERSION` with the version of the staging synchronizer on your target cluster. @@ -483,15 +500,19 @@ The [configs repo](https://github.com/global-synchronizer-foundation/configs) co These environment variables will be used below. + + ### Installing the Helm Charts + + With these files in place, you can execute the following helm commands in sequence. It's generally a good idea to wait until each deployment reaches a stable state prior to moving on to the next step. Install the Canton and CometBFT components: ```bash -helm install global-domain-${SERIAL_ID}-cometbft /splice-cometbft -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/cometbft-values.yaml --wait -helm install global-domain-${SERIAL_ID} /splice-global-domain -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/global-domain-values.yaml --wait +helm install global-domain-${SERIAL_ID}-cometbft |helm_repo_prefix|/splice-cometbft -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/cometbft-values.yaml --wait +helm install global-domain-${SERIAL_ID} |helm_repo_prefix|/splice-global-domain -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/global-domain-values.yaml --wait ``` Note that we use the serial ID when naming Canton synchronizer components. This is to support operating multiple instances of these components side by side as part of a logical synchronizer upgrade. @@ -499,21 +520,21 @@ Note that we use the serial ID when naming Canton synchronizer components. This Install the participant: ```bash -helm install participant /splice-participant -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/participant-values.yaml --wait +helm install participant |helm_repo_prefix|/splice-participant -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/participant-values.yaml --wait ``` Install the SV node apps: ```bash -helm install sv /splice-sv-node -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/sv-values.yaml -f ${SV_IDENTITIES_FILE} -f ${UI_CONFIG_VALUES_FILE} --wait -helm install scan /splice-scan -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/scan-values.yaml -f ${UI_CONFIG_VALUES_FILE} --wait -helm install validator /splice-validator -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/validator-values.yaml -f splice-node/examples/sv-helm/sv-validator-values.yaml -f ${UI_CONFIG_VALUES_FILE} --wait +helm install sv |helm_repo_prefix|/splice-sv-node -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/sv-values.yaml -f ${SV_IDENTITIES_FILE} -f ${UI_CONFIG_VALUES_FILE} --wait +helm install scan |helm_repo_prefix|/splice-scan -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/scan-values.yaml -f ${UI_CONFIG_VALUES_FILE} --wait +helm install validator |helm_repo_prefix|/splice-validator -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/validator-values.yaml -f splice-node/examples/sv-helm/sv-validator-values.yaml -f ${UI_CONFIG_VALUES_FILE} --wait ``` Install the INFO app, which is used to provide information about the SV node and its configuration: ```bash -helm install info /splice-info -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/info-values.yaml +helm install info |helm_repo_prefix|/splice-info -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/info-values.yaml ``` Once everything is running, you should be able to inspect the state of the cluster and observe pods running in the new namespace. A typical query might look as follows: @@ -522,6 +543,8 @@ Once everything is running, you should be able to inspect the state of the clust Note also that `Pod` restarts may happen during bringup, particularly if all helm charts are deployed at the same time. The `splice-sv-node` cannot start until `participant` is running and `participant` cannot start until `postgres` is running. + + ## SV Network Diagram SV Network Diagram @@ -595,6 +618,8 @@ In order to install the reference charts, the following must be satisfied in you ### Installation Instructions + + Create a `cluster-ingress` namespace: @@ -630,9 +655,11 @@ The http gateway terminates tls using the secret that you configured above, and using: ```bash -helm install cluster-ingress-sv /splice-cluster-ingress-runbook -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/sv-cluster-ingress-values.yaml +helm install cluster-ingress-sv |helm_repo_prefix|/splice-cluster-ingress-runbook -n sv --version ${CHART_VERSION} -f splice-node/examples/sv-helm/sv-cluster-ingress-values.yaml ``` + + ## Configuring the Cluster Egress Below is a complete list of destinations for outbound traffic from the Super Validator node. This list is useful for an SV that wishes to limit egress to only allow the minimum necessary outbound traffic. `S` will be used a shorthand for `SERIAL_ID`. The tables below are wide - you might need to scroll vertically to see the rightmost columns. diff --git a/docs-main/global-synchronizer/deployment/onboarding-process.mdx b/docs-main/global-synchronizer/deployment/onboarding-process.mdx index 1db28892..94a804d6 100644 --- a/docs-main/global-synchronizer/deployment/onboarding-process.mdx +++ b/docs-main/global-synchronizer/deployment/onboarding-process.mdx @@ -6,6 +6,9 @@ description: "How to onboard your validator on DevNet, TestNet, and MainNet via import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorOnboardingBash107 from "/snippets/external/splice/main/splice-rst-code-docs-src-validator-operator-validator-onboarding-bash-107.mdx"; import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorOnboardingBash136 from "/snippets/external/splice/main/splice-rst-code-docs-src-validator-operator-validator-onboarding-bash-136.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/validator_operator/validator_onboarding.rst" hash="a1a3ab0c" */} @@ -42,6 +45,8 @@ Onboarding a Validator involves the following steps (for each network you want t ## Validating that your IP has been approved + + To validate that the SVs have added you to their respective IP allowlists, you can query their Scan URLs. Note that this must be run from the same egress IP from which you want to deploy your validator, e.g., from the VM that you want to run your docker compose setup on, or from within your Kubernetes cluster. First, please confirm that your egress IP in the terminal in which you are running the command is indeed the one you provided for whitelisting by running: @@ -57,7 +62,7 @@ Note that the following snippet requires installing [jq](https://jqlang.org/). ```bash (set -o pipefail CURL='curl -fsS -m 5 --connect-timeout 5' -for url in $($CURL https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/scans | jq -r '.scans[].scans[].publicUrl'); do +for url in $($CURL |gsf_scan_url|/api/scan/v0/scans | jq -r '.scans[].scans[].publicUrl'); do echo -n "$url: " $CURL "$url"/api/scan/version | jq -r '.version' done) @@ -71,7 +76,7 @@ Apart from connectivity to Scan, your validator must also be able to connect to ```bash (set -o pipefail -for url in $(curl -fsS -m 5 --connect-timeout 5 https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/dso-sequencers | jq -r '.domainSequencers[].sequencers[].url | sub("https://"; "")'); do +for url in $(curl -fsS -m 5 --connect-timeout 5 |gsf_scan_url|/api/scan/v0/dso-sequencers | jq -r '.domainSequencers[].sequencers[].url | sub("https://"; "")'); do echo -n "$url: " grpcurl --max-time 10 "$url":443 grpc.health.v1.Health/Check done) @@ -83,6 +88,8 @@ Sequencers that are functional and have whitelisted your IP correctly will retur The default configuration for both of these requires access to at least 2/3 of the SVs for each of scans and sequencers. You may, at your option and own risk, configure connection to a single trusted scan and sequencer as described under validator helm chart configuration, at the cost of losing BFT integrity guarantees. + + ## Stay Connected To stay connected with other validator operators, there is a shared slack channel and a few mailing lists: diff --git a/docs-main/global-synchronizer/deployment/required-network-parameters.mdx b/docs-main/global-synchronizer/deployment/required-network-parameters.mdx index b77d8063..a65c644a 100644 --- a/docs-main/global-synchronizer/deployment/required-network-parameters.mdx +++ b/docs-main/global-synchronizer/deployment/required-network-parameters.mdx @@ -3,6 +3,9 @@ title: "Required Network Parameters" description: "Parameters required to initialise a validator node and connect to the network" --- +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/validator_operator/required_network_parameters.rst" hash="236187bd" */} @@ -11,19 +14,21 @@ This section was copied from existing reviewed documentation. Reviewers: Skip this section. Remove markers after final approval. + + To initialize your validator node, you need the following parameters that define the network you're onboarding to and the secret required for doing so. - **MIGRATION_ID** — The current migration id of the network (dev/test/mainnet) you are trying to connect to. This value is frozen and must not be changed from the last value. You can find this on [https://sync.global/sv-network/](https://sync.global/sv-network/). -- **SPONSOR_SV_URL** — The URL of the SV app of your SV sponsor. This should be of the form [https://sv.sv-1.unknown_cluster.global.canton.network.YOUR_SV_SPONSOR](https://sv.sv-1.unknown_cluster.global.canton.network.YOUR_SV_SPONSOR), e.g., if the Global Synchronizer Foundation is your sponsor use [https://sv.sv-1.unknown_cluster.global.canton.network.sync.global](https://sv.sv-1.unknown_cluster.global.canton.network.sync.global). +- **SPONSOR_SV_URL** — The URL of the SV app of your SV sponsor. This should be of the form [|generic_sv_url|](|generic_sv_url|), e.g., if the Global Synchronizer Foundation is your sponsor use [|gsf_sv_url|](|gsf_sv_url|). ONBOARDING_SECRET The onboarding secret provided by your sponsor. If you don't already have one, ask your sponsor. Note that onboarding secrets are one-time use and expire after 48 hours. If you don't join before it expires, you need to request a new secret from your SV sponsor. - +
-**DevNet-only** + -On DevNet, you can obtain an onboarding secret automatically by calling the following endpoint on any SV (replace `SPONSOR_SV_URL` with the SV app URL defined above): +You can obtain an onboarding secret automatically by calling the following endpoint on any SV (replace `SPONSOR_SV_URL` with the SV app URL defined above): ```bash curl -X POST SPONSOR_SV_URL/api/sv/v0/devnet/onboard/validator/prepare @@ -35,5 +40,8 @@ Note that this self-served secret is only valid for 1 hour. +
+ +
-{/* COPIED_END */} \ No newline at end of file +{/* COPIED_END */} diff --git a/docs-main/global-synchronizer/deployment/sv-network-resets.mdx b/docs-main/global-synchronizer/deployment/sv-network-resets.mdx index 9660cd72..463172e6 100644 --- a/docs-main/global-synchronizer/deployment/sv-network-resets.mdx +++ b/docs-main/global-synchronizer/deployment/sv-network-resets.mdx @@ -7,6 +7,9 @@ import ExternalSpliceMainSpliceRstCodeDocsSrcSvOperatorSvNetworkResetsBash75 fro import ExternalSpliceMainSpliceRstCodeDocsSrcSvOperatorSvNetworkResetsBash81 from "/snippets/external/splice/main/splice-rst-code-docs-src-sv-operator-sv-network-resets-bash-81.mdx"; import ExternalSpliceMainSpliceRstCodeDocsSrcSvOperatorSvNetworkResetsBash90 from "/snippets/external/splice/main/splice-rst-code-docs-src-sv-operator-sv-network-resets-bash-90.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/sv_operator/sv_network_resets.rst" hash="1e423aba" */} @@ -15,6 +18,8 @@ This section was copied from existing reviewed documentation. Reviewers: Skip this section. Remove markers after final approval. + + DevNet and TestNet get reset roughly every 3 months with the resets spread out such that they never happen at the same time on DevNet and TestNet. The exact time is announced in the `#supervalidator-operations` channel run by the [Global Synchronizer Foundation](https://sync.global/). A reset requires a full redeployment of your node and loses any data you had on the node. Your node will not be functional until you complete the reset. Wait for the bootstrapping SV-1 to announce that they completed redeployment of their node before attempting to redeploy your node. @@ -22,7 +27,7 @@ A reset requires a full redeployment of your node and loses any data you had on To complete the reset, go through the following steps: 1. Backup information to be preserved across the reset - 1. Take a backup of the DSO configuration (replace YOUR_SCAN_URL with your own scan e.g. [https://scan.sv-1.unknown_cluster.global.canton.network.sync.global](https://scan.sv-1.unknown_cluster.global.canton.network.sync.global)): + 1. Take a backup of the DSO configuration (replace YOUR_SCAN_URL with your own scan e.g. [|gsf_scan_url|](|gsf_scan_url|)): curl -sSL --fail-with-body https://YOUR_SCAN_URL/api/scan/v0/dso > backup.json @@ -85,4 +90,7 @@ To complete the reset, go through the following steps: 4. Update your auto-sweeping configuration, as party ids change as part of the reset. + + + {/* COPIED_END */} \ No newline at end of file diff --git a/docs-main/global-synchronizer/deployment/synchronizer-traffic.mdx b/docs-main/global-synchronizer/deployment/synchronizer-traffic.mdx index d67a85b1..71a0a1df 100644 --- a/docs-main/global-synchronizer/deployment/synchronizer-traffic.mdx +++ b/docs-main/global-synchronizer/deployment/synchronizer-traffic.mdx @@ -5,6 +5,9 @@ description: "Traffic accounting, fees, and monitoring on the Global Synchronize import ExternalSpliceMainSpliceRstCodeDocsSrcDeploymentTrafficJson94 from "/snippets/external/splice/main/splice-rst-code-docs-src-deployment-traffic-json-94.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/deployment/traffic.rst" hash="0e110a7b" */} @@ -44,10 +47,12 @@ Traffic accounting is "by participant"; all parties hosted on the same participa ## Traffic parameters + + The current synchronizer traffic parameters are recorded on the global `AmuletRules` contract and can be obtained from Scan. You can obtain them via the Scan UI or by querying the Scan API using, for example, this command (requires installing [jq](https://jqlang.org/)): ```bash -curl -X POST --header "Content-Type: application/json" -d "{}" https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/amulet-rules | jq ".amulet_rules_update.contract.payload.configSchedule.initialValue.decentralizedSynchronizer.fees" +curl -X POST --header "Content-Type: application/json" -d "{}" |gsf_scan_url|/api/scan/v0/amulet-rules | jq ".amulet_rules_update.contract.payload.configSchedule.initialValue.decentralizedSynchronizer.fees" ``` Above command will return a JSON object similar to the following: @@ -61,7 +66,7 @@ This represents an encoded instance of the `SynchronizerFeesConfig` Daml data ty - `extraTrafficPrice`: the price of extra traffic beyond the free tier, denominated in USD per MB. The price is charged in `CC` as per the current USD exchange rate. The exchange rate is determined by SVs via median voting and recorded on current `OpenMiningRound` contracts obtainable from Scan. For querying the current CC price in USD as per the currently open mining round, you can check the Scan UI or use the following command (requires installing [jq](https://jqlang.org/)): ```bash - curl -X POST --header "Content-Type: application/json" -d "{\"cached_open_mining_round_contract_ids\":[], \"cached_issuing_round_contract_ids\":[]}" https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/open-and-issuing-mining-rounds | jq ".open_mining_rounds | values[] | .contract.payload | {round, amuletPrice}" + curl -X POST --header "Content-Type: application/json" -d "{\"cached_open_mining_round_contract_ids\":[], \"cached_issuing_round_contract_ids\":[]}" |gsf_scan_url|/api/scan/v0/open-and-issuing-mining-rounds | jq ".open_mining_rounds | values[] | .contract.payload | {round, amuletPrice}" ``` - `readVsWriteScalingFactor`: specifies the weight of additional traffic balance subtractions (from a sender's balance) for delivering a synchronizer message to each of its recipients. Delivering messages incurs actual costs for the SVs, even if this cost is much smaller than the cost of ordering and persisting messages. The `readVsWriteScalingFactor` is specified in basis points (parts per 10,000), i.e., a value of 1 means that for each 1000 bytes that need to be delivered to a recipient, 0.1 bytes of traffic will be charged. So for example: At a factor of 4, a 1 MB message with 10 recipients will draw `1,000,000 * (1 + 10 * 0.004) = 1,040,000` bytes from the sending participant's traffic balance. @@ -70,6 +75,8 @@ This represents an encoded instance of the `SynchronizerFeesConfig` Daml data ty Like all parts of the `AmuletRulesConfig`, the `SynchronizerFeesConfig` is set by SVs via on-ledger voting, as part of DSO governance. The SV operations docs contain pointers for determining good values for some of these parameters. + + ## How traffic balance interacts with transaction submission Here's an example of traffic accounting in practice for submitting a transaction: diff --git a/docs-main/global-synchronizer/deployment/validator-docker-compose.mdx b/docs-main/global-synchronizer/deployment/validator-docker-compose.mdx index 9fe4d6e5..48b6fa47 100644 --- a/docs-main/global-synchronizer/deployment/validator-docker-compose.mdx +++ b/docs-main/global-synchronizer/deployment/validator-docker-compose.mdx @@ -9,6 +9,9 @@ import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorComposeYa import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorComposeBash123 from "/snippets/external/splice/main/splice-rst-code-docs-src-validator-operator-validator-compose-bash-123.mdx"; import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorComposeBash262 from "/snippets/external/splice/main/splice-rst-code-docs-src-validator-operator-validator-compose-bash-262.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/validator_operator/validator_compose.rst" hash="074ae45b" */} @@ -31,6 +34,8 @@ This deployment is useful for: ## Requirements + + 1) A linux/MacOS machine with the following: 1. [docker compose](https://docs.docker.com/compose/install/) - at least version 2.26.0 or newer @@ -44,14 +49,16 @@ To validate that the dependencies are set up correctly, run the following comman 2) Your machine should either be connected to a VPN that is whitelisted on the network (contact your sponsor SV to obtain access), or have a static egress IP address. In the latter case, please provide that IP address to your sponsor SV to add it to the firewall rules. -3) Please download the release artifacts containing the docker-compose files, from here: [Download Bundle](https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz), and extract the bundle: +3) Please download the release artifacts containing the docker-compose files, from here: |bundle_download_link|, and extract the bundle: ```bash -tar xzvf 0.6.3_splice-node.tar.gz +tar xzvf |version|_splice-node.tar.gz ``` Additional parameters describing your own setup as opposed to the connection to the network are described below. + + ### HTTP Proxy configuration If you need to use an HTTP forward proxy for egress in your environment, you need to set `https.proxyHost` and `https.proxyPort` in `JAVA_TOOL_OPTIONS` in `splice-node/docker-compose/validator/compose.yaml` to use the HTTP proxy for outgoing connections. You need to do this for both the validator and the participant services: @@ -95,11 +102,13 @@ services: ## Deployment + + 1) Change to the `docker-compose` directory inside the extracted bundle: -2) Export the current version to an environment variable: export IMAGE_TAG=0.6.3 +2) Export the current version to an environment variable: |image_tag_set| 3) Run the following command to start the validator node, and wait for it to become ready (could take a few minutes): > ```bash @@ -113,6 +122,8 @@ services: Note that the validator may be stopped with the command `./stop.sh` and restarted again with the same `start.sh` command as above. Its data will be retained between invocations. In subseqent invocations, the secret itself may be left empty, but the `-o` is still mandatory, so a `-o ""` argument should be provided. + + ## Logging into the wallet UI diff --git a/docs-main/global-synchronizer/deployment/validator-kubernetes.mdx b/docs-main/global-synchronizer/deployment/validator-kubernetes.mdx index 1c0db4a4..99bd4ff4 100644 --- a/docs-main/global-synchronizer/deployment/validator-kubernetes.mdx +++ b/docs-main/global-synchronizer/deployment/validator-kubernetes.mdx @@ -20,6 +20,9 @@ import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorHelmBash6 import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorHelmYaml771 from "/snippets/external/splice/main/splice-rst-code-docs-src-validator-operator-validator-helm-yaml-771.mdx"; import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorHelmYaml797 from "/snippets/external/splice/main/splice-rst-code-docs-src-validator-operator-validator-helm-yaml-797.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/validator_operator/validator_helm.rst" hash="b0bc851d" */} @@ -32,6 +35,8 @@ This section describes how to deploy a standalone validator node in Kubernetes u ## Requirements + + 1) A running Kubernetes cluster in which you have administrator access to create and manage namespaces. 2) A development workstation with the following: @@ -41,16 +46,18 @@ This section describes how to deploy a standalone validator node in Kubernetes u 3) Your cluster needs a static egress IP. After acquiring that, provide it to your SV sponsor who will propose adding it to the IP allowlist to the other SVs. -4) Please download the release artifacts containing the sample Helm value files, from here: [Download Bundle](https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz), and extract the bundle: +4) Please download the release artifacts containing the sample Helm value files, from here: |bundle_download_link|, and extract the bundle: ```bash -tar xzvf 0.6.3_splice-node.tar.gz +tar xzvf |version|_splice-node.tar.gz ``` -- **TRUSTED_SCAN_URL** — The scan URL of an SV that you trust and that is reachable by your validator, often your SV sponsor. This should be of the form [https://scan.sv-1.unknown_cluster.global.canton.network.YOUR_SV_SPONSOR](https://scan.sv-1.unknown_cluster.global.canton.network.YOUR_SV_SPONSOR), e.g., for the Global Synchronizer Foundation SV it is [https://scan.sv-1.unknown_cluster.global.canton.network.sync.global](https://scan.sv-1.unknown_cluster.global.canton.network.sync.global). +- **TRUSTED_SCAN_URL** — The scan URL of an SV that you trust and that is reachable by your validator, often your SV sponsor. This should be of the form [|generic_scan_url|](|generic_scan_url|), e.g., for the Global Synchronizer Foundation SV it is [|gsf_scan_url|](|gsf_scan_url|). Additional parameters describing your own setup as opposed to the connection to the network are described below. + + ## Validator Network Diagram Validator Network Diagram @@ -259,10 +266,12 @@ When running without authentication, the username of the validator administrator ### Configuring the Helm Charts + + To install the Helm charts needed to start a Validator node connected to the cluster, you will need to meet a few preconditions. The first is that there needs to be an environment variable defined to refer to the version of the Helm charts necessary to connect to this environment: ```bash -export CHART_VERSION=0.6.3 +|chart_version_set| ``` Please modify the file `splice-node/examples/sv-helm/participant-values.yaml` as follows: @@ -317,7 +326,7 @@ scanClient: # scanAddress: "TRUSTED_SCAN_URL" # replace with the trusted scan url ``` -For your selected `scanClient` type, replace `TRUSTED_SCAN_URL` with a URL of a Scan you host or trust that is reachable by your Validator. For example, the GSF scan URL, [https://scan.sv-1.unknown_cluster.global.canton.network.sync.global](https://scan.sv-1.unknown_cluster.global.canton.network.sync.global). For `bft-custom` and `bft` modes of `scanClient`, you can specify more than one scan seed URL by separating them with commas. +For your selected `scanClient` type, replace `TRUSTED_SCAN_URL` with a URL of a Scan you host or trust that is reachable by your Validator. For example, the GSF scan URL, [|gsf_scan_url|](|gsf_scan_url|). For `bft-custom` and `bft` modes of `scanClient`, you can specify more than one scan seed URL by separating them with commas. - If you want to configure the audience for the Validator app backend API, replace `OIDC_AUTHORITY_VALIDATOR_AUDIENCE` in the `auth.audience` entry with audience for the Validator app backend API. e.g. `https://validator.example.com/api`. - If you want to configure the audience for the Ledger API, set the `audience` field in the `splice-app-validator-ledger-api-auth` k8s secret with the audience for the Ledger API. e.g. `https://ledger_api.example.com`. @@ -368,14 +377,18 @@ Additionally, please modify the file `splice-node/examples/sv-helm/standalone-va Finally, please download the UI config values file from [https://github.com/global-synchronizer-foundation/configs/blob/main/configs/ui-config-values.yaml](https://github.com/global-synchronizer-foundation/configs/blob/main/configs/ui-config-values.yaml) and add the values from it to your `standalone-validator-values.yaml`. + + ### Installing the Helm Charts + + With these files in place, you can execute the following helm commands in sequence. It's generally a good idea to wait until each deployment reaches a stable state prior to moving on to the next step. ```bash -helm install postgres /splice-postgres -n validator --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-validator-participant.yaml --wait -helm install participant /splice-participant -n validator --version ${CHART_VERSION} -f splice-node/examples/sv-helm/participant-values.yaml -f splice-node/examples/sv-helm/standalone-participant-values.yaml --wait -helm install validator /splice-validator -n validator --version ${CHART_VERSION} -f splice-node/examples/sv-helm/validator-values.yaml -f splice-node/examples/sv-helm/standalone-validator-values.yaml --wait +helm install postgres |helm_repo_prefix|/splice-postgres -n validator --version ${CHART_VERSION} -f splice-node/examples/sv-helm/postgres-values-validator-participant.yaml --wait +helm install participant |helm_repo_prefix|/splice-participant -n validator --version ${CHART_VERSION} -f splice-node/examples/sv-helm/participant-values.yaml -f splice-node/examples/sv-helm/standalone-participant-values.yaml --wait +helm install validator |helm_repo_prefix|/splice-validator -n validator --version ${CHART_VERSION} -f splice-node/examples/sv-helm/validator-values.yaml -f splice-node/examples/sv-helm/standalone-validator-values.yaml --wait ``` Once this is running, you should be able to inspect the state of the cluster and observe pods running in the new namespace. A typical query might look as follows: @@ -384,6 +397,8 @@ Once this is running, you should be able to inspect the state of the cluster and Note also that `Pod` restarts may happen during bringup, particularly if all helm charts are deployed at the same time. For example, the `participant` cannot start until `postgres` is running. + + ## Configuring the Cluster Ingress The following routes should be configured in your cluster ingress controller. @@ -421,6 +436,8 @@ In order to install the reference charts, the following must be satisfied in you ### Installation Instructions + + Create a `cluster-ingress` namespace: @@ -453,9 +470,11 @@ This gateway terminates tls using the secret that you configured above, and expo using: ```bash -helm install cluster-ingress-validator /splice-cluster-ingress-runbook -n validator --version ${CHART_VERSION} -f splice-node/examples/sv-helm/validator-cluster-ingress-values.yaml +helm install cluster-ingress-validator |helm_repo_prefix|/splice-cluster-ingress-runbook -n validator --version ${CHART_VERSION} -f splice-node/examples/sv-helm/validator-cluster-ingress-values.yaml ``` + + ## Logging into the wallet UI After you deploy your ingress, open your browser at [https://wallet.validator.YOUR_HOSTNAME](https://wallet.validator.YOUR_HOSTNAME) and login using the credentials for the user that you configured as `validatorWalletUser` earlier. Once logged in one should see the transactions page. diff --git a/docs-main/global-synchronizer/production-operations/logical-synchronizer-upgrade.mdx b/docs-main/global-synchronizer/production-operations/logical-synchronizer-upgrade.mdx index b4117ca7..cabf2398 100644 --- a/docs-main/global-synchronizer/production-operations/logical-synchronizer-upgrade.mdx +++ b/docs-main/global-synchronizer/production-operations/logical-synchronizer-upgrade.mdx @@ -3,6 +3,9 @@ title: "Logical Synchronizer Upgrades" description: "Upgrade the protocol version of a Global Synchronizer with very limited network downtime through Logical Synchronizer Upgrades (LSU)" --- +import { Version, VersionOption } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/sv_operator/sv_logical_synchronizer_upgrade.rst" hash="0cf8ccb8" */} @@ -101,13 +104,32 @@ Concretely, the procedure is as follows: submitted. The actual timestamps will be chosen through coordination with all SVs. The timestamps are applied through an environment variable on the successor sequencer: -``` + + + +```yaml - name: ADDITIONAL_CONFIG_SEQUENCER_LSU_SEQUENCING_BOUNDS value: | canton.sequencers.sequencer.parameters.parameters.lsu-repair.lsu-sequencing-bounds-override.lower-bound-sequencing-time-exclusive=LOWER_BOUND_SEQUENCING_TIME_EXCLUSIVE canton.sequencers.sequencer.parameters.parameters.lsu-repair.lsu-sequencing-bounds-override.lower-bound-sequencing-time-exclusive=UPGRADE_TIME ``` + + + +```yaml +- name: ADDITIONAL_CONFIG_SEQUENCER_LSU_SEQUENCING_BOUNDS + value: | + canton.sequencers.sequencer.parameters.lsu-repair.lsu-sequencing-bounds-override.lower-bound-sequencing-time-exclusive=LOWER_BOUND_SEQUENCING_TIME_EXCLUSIVE + canton.sequencers.sequencer.parameters.lsu-repair.lsu-sequencing-bounds-override.upgrade-time=UPGRADE_TIME +``` + + + + 3. Super validators wait until ingestion completed. 4. Super validators configure their SV app app to transfer the topology and traffic state from the old physical synchronizer to the successor nodes. diff --git a/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx b/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx index 80f3f754..af5705f1 100644 --- a/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx +++ b/docs-main/global-synchronizer/production-operations/monitoring-setup.mdx @@ -33,7 +33,7 @@ To configure [Docker Compose](https://docs.docker.com/compose/) to spin up the D `compose` allows you to provide the overall configuration across multiple files. Each configuration file is described below, followed by information on how to bring them together in a running network. -A diagram showing an example Docker network setup +A diagram showing an example Docker network setup ### Intended Use @@ -398,7 +398,7 @@ To view container metrics: You should now see a UI similar to the one shown. -An example cAdvisor UI +An example cAdvisor UI Prometheus-formatted metrics are available by default at [http://localhost:8080/metrics](http://localhost:8080/metrics). @@ -704,7 +704,7 @@ When Kibana is started for the first time, you must set up a data view to allow You should now see a UI similar to the one shown here: -An example Kibana UI +An example Kibana UI In the Kibana interface, you can: @@ -728,7 +728,7 @@ You can log into the Grafana UI and set up a dashboard. The example imports a [G You should see a container system metrics dashboard similar to the one shown here: -An example metrics dashboard +An example metrics dashboard See the [Grafana documentation](https://grafana.com/grafana/) for how to configure dashboards. For information about which metrics are available, see the Metrics documentation in the Monitoring section of this user manual. diff --git a/docs-main/global-synchronizer/production-operations/splice-metrics-overview.mdx b/docs-main/global-synchronizer/production-operations/splice-metrics-overview.mdx index 8f3108e6..86ba0e16 100644 --- a/docs-main/global-synchronizer/production-operations/splice-metrics-overview.mdx +++ b/docs-main/global-synchronizer/production-operations/splice-metrics-overview.mdx @@ -3,6 +3,9 @@ title: "Splice Metrics Overview" description: "Prometheus metrics exposed by Canton Network components and how to scrape them" --- +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/deployment/observability/metrics.rst" hash="88d7cba8" */} @@ -73,11 +76,16 @@ This trigger is disabled by default. As per the information in Adding ad-hoc con ## Grafana Dashboards -The release bundle ([Download Bundle](https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz)) contains a set of Grafana dashboards that are built based on the metrics above. These dashboards can be imported into a Grafana instance. The dashboards are built assuming a K8s deployment, and may need to be modified for other deployment types. The dashboards can be found under the `grafana-dashboards` folder in the release bundle. + + +The release bundle (|bundle_download_link|) contains a set of Grafana dashboards that are built based on the metrics above. These dashboards can be imported into a Grafana instance. The dashboards are built assuming a K8s deployment, and may need to be modified for other deployment types. The dashboards can be found under the `grafana-dashboards` folder in the release bundle. The dashboards are built using queries specific for Prometheus native histograms. + + + {/* COPIED_END */} \ No newline at end of file diff --git a/docs-main/global-synchronizer/production-operations/validator-disaster-recovery.mdx b/docs-main/global-synchronizer/production-operations/validator-disaster-recovery.mdx index 46478c20..5cefbe05 100644 --- a/docs-main/global-synchronizer/production-operations/validator-disaster-recovery.mdx +++ b/docs-main/global-synchronizer/production-operations/validator-disaster-recovery.mdx @@ -12,6 +12,9 @@ import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorDisasterR import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorDisasterRecoveryNone373 from "/snippets/external/splice/main/splice-rst-code-docs-src-validator-operator-validator-disaster-recovery-none-373.mdx"; import ExternalSpliceMainSpliceRstCodeDocsSrcValidatorOperatorValidatorDisasterRecoveryNone405 from "/snippets/external/splice/main/splice-rst-code-docs-src-validator-operator-validator-disaster-recovery-none-405.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/validator_operator/validator_disaster_recovery.rst" hash="48572fc5" */} @@ -163,6 +166,8 @@ In some cases you might want to force the migration attempt for a set of parties #### Troubleshooting failed ACS imports + + If you still observe issues, in particular you observe `ACS_COMMITMENT_MISMATCH` warnings in your participant logs, something has likely gone wrong while importing the active contracts of at least one of the parties hosted on your node. Another common symptom (in case the validator party is affected) is that your your validator initialization fails with a `Unknown secret` error and your validator logs contain a `ValidatorLicense not found` message. To address a failed `ACS` import, you can usually: 1. First make sure all parties are hosted on the same node. The most common case is that either the parties are still on the old node with the old participant ID or they have been migrated to the new node. You can check by opening a Canton console to any participant on the network (i.e., you can also ask another validator or SV operator for this information) and running the following query where \ is the part after the `::` in, for example, your validator party ID. @@ -191,7 +196,7 @@ If you still observe issues, in particular you observe `ACS_COMMITMENT_MISMATCH` Run from a regular shell (same working directory like the one you started your Canton console from): ```bash - curl -sSL --fail-with-body 'https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/acs/YOUR_PARTY_ID' -H 'Content-Type: application/json' | jq -r .acs_snapshot | base64 -d > acs_snapshot + curl -sSL --fail-with-body '|gsf_scan_url|/api/scan/v0/acs/YOUR_PARTY_ID' -H 'Content-Type: application/json' | jq -r .acs_snapshot | base64 -d > acs_snapshot ``` From the Canton console: @@ -204,6 +209,8 @@ If you still observe issues, in particular you observe `ACS_COMMITMENT_MISMATCH` 3. If the previous step failed or you chose not to attempt it, you can retry the migration procedure with a fresh participant. If your parties are still on the original node that you took identities backup from, you can use your existing backup. If your parties have been migrated to the new node already, take a new identities dump from the new node. If the new node is in a state where you cannot take a fresh dump, use the old dump but edit the `id` field to the participant ID of the new node. You can obtain the `id` in the correct format by, for example, running `participant.id.toProtoPrimitive` in a Canton console to the participant. You can now take down the node to which you originally tried to restore and try the restore procedure again with your adjusted dump on a fresh node with a different participant ID prefix (i.e., a different `newParticipantIdentifier` / `<new_participant_id>` depending on your deployment model). + + #### Troubleshooting rejected topology snapshots In rare cases, the re-onboarding process may fail at the `ImportTopologySnapshot` step because an `OwnerToKeyMapping` for the old participant ID has an insufficient number of signatures in the topology snapshot. This only affects validators that were originally onboarded on Splice 0.4.1 or earlier, which used a Canton version that did not require the mapped keys to co-sign `OwnerToKeyMapping` transactions. You can identify this issue by looking for the following messages in your participant logs: @@ -232,6 +239,8 @@ To work around this, follow these steps: ## Recover the Coin balance of an external party + + For a party relying on external signing, a similar procedure can be used to recover its coin balance in case the validator originally hosting it becomes unusable for whatever reason. @@ -266,7 +275,7 @@ We can now query CC Scan to get the active contract set (ACS) for a party and wr ```bash // Make sure to adjust YOUR_VALID_FROM to the time you got from the previous query and YOUR_PARY_ID -curl -sSL --fail-with-body 'https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/acs/YOUR_PARTY_ID?record_time=YOUR_VALID_FROM' -H 'Content-Type: application/json' | jq -r .acs_snapshot | base64 -d > acs_snapshot +curl -sSL --fail-with-body '|gsf_scan_url|/api/scan/v0/acs/YOUR_PARTY_ID?record_time=YOUR_VALID_FROM' -H 'Content-Type: application/json' | jq -r .acs_snapshot | base64 -d > acs_snapshot ``` Lastly, we can import the ACS: @@ -275,6 +284,8 @@ Lastly, we can import the ACS: The party is now hosted on the node and can participat in transactions. The last step is to setup the necessary contracts to allow the validator automation to renew transfer preapprovals and complete transfer commands. To do so, go through the same flow used for initial onboarding of the party, i.e., `/v0/admin/external-party/setup-proposal`, `/v0/admin/external-party/setup-proposal/prepare-accept` and `/v0/admin/external-party/setup-proposal/submit-accept`. For details refer to the docs for the validator external signing API. + + ## Roll Forward Logical Synchronizer Upgrade In case the SVs communicate that they recover from a loss of the physical synchronizer, they will communicate the `newPhysicalSynchronizerId` and the `sequencerSuccessors`. diff --git a/docs-main/global-synchronizer/reference/canton-console-reference.mdx b/docs-main/global-synchronizer/reference/canton-console-reference.mdx index abc0f5a8..c9387181 100644 --- a/docs-main/global-synchronizer/reference/canton-console-reference.mdx +++ b/docs-main/global-synchronizer/reference/canton-console-reference.mdx @@ -7,6 +7,9 @@ import ExternalSpliceMainSpliceRstCodeDocsSrcDeploymentConsoleAccessNone40 from import ExternalSpliceMainSpliceRstCodeDocsSrcDeploymentConsoleAccessBash166 from "/snippets/external/splice/main/splice-rst-code-docs-src-deployment-console-access-bash-166.mdx"; import ExternalSpliceMainSpliceRstCodeDocsSrcDeploymentConsoleAccessBash174 from "/snippets/external/splice/main/splice-rst-code-docs-src-deployment-console-access-bash-174.mdx"; +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/deployment/console_access.rst" hash="bccf3653" */} @@ -46,53 +49,53 @@ Welcome to Canton! 4. Run the docker command - > ```bash - > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf /canton:0.6.3 --console - > ``` - > - -> - >
- > - > Important - > - > - - > - > If you run the participant using the docker compose setup the docker command must be run with the docker network used by the participant. Adjust the configuration to connect to the participant container: - > - > ``` - > canton { - > remote-participants { - > participant { - > admin-api { - > port = 5002 - > address = participant - > } - > ledger-api { - > port = 5001 - > address = participant - > } - > token = "" - > } - > } - > features.enable-preview-commands = yes - > features.enable-testing-commands = yes - > features.enable-repair-commands = yes - > } - > ``` - > - > Running docker with the default network (`splice-validator`): - > - > ```bash - > ``` - > - > docker run -it --rm --network splice-validator -v \$(pwd)/console.conf:/app/app.conf /canton:0.6.3 --console - > - >
+ + +```bash +docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf |docker_repo_prefix|/canton:|version_literal| --console +``` + + + + +If you run the participant using the docker compose setup the docker command must be run with the docker network used by the participant. Adjust the configuration to connect to the participant container: + +```text +canton { + remote-participants { + participant { + admin-api { + port = 5002 + address = participant + } + ledger-api { + port = 5001 + address = participant + } + token = "" + } + } + features.enable-preview-commands = yes + features.enable-testing-commands = yes + features.enable-repair-commands = yes +} +``` + +Running docker with the default network (`splice-validator`): + + + +```bash +docker run -it --rm --network splice-validator -v \$(pwd)/console.conf:/app/app.conf |docker_repo_prefix|/canton:|version_literal| --console +``` + + + ## Sequencer console + + 1. Ensure you can access the sequencer's ports 5008 and 5009 2. Add the configuration to a local file `console.conf` @@ -120,11 +123,15 @@ Welcome to Canton! 3. Run the docker command > ```bash - > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf /canton:0.6.3 --console + > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf |docker_repo_prefix|/canton:|version_literal| --console > ``` + + ## Mediator console + + 1. Ensure you can access the mediator's port 5007 2. Add the configuration to a local file `console.conf` @@ -148,9 +155,11 @@ Welcome to Canton! 3. Run the docker command > ```bash - > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf /canton:0.6.3 --console + > docker run -it --rm --network host -v $(pwd)/console.conf:/app/app.conf |docker_repo_prefix|/canton:|version_literal| --console > ``` + + ## Access in a K8s cluster In a K8s cluster you can use a debug pod to access the console directly from the cluster. diff --git a/docs-main/global-synchronizer/understand/local-testing.mdx b/docs-main/global-synchronizer/understand/local-testing.mdx index 55d835e3..9bebaee2 100644 --- a/docs-main/global-synchronizer/understand/local-testing.mdx +++ b/docs-main/global-synchronizer/understand/local-testing.mdx @@ -3,6 +3,9 @@ title: "Local Testing" description: "Docker-Compose based deployment of a local Canton Network for development and testing" --- +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/app_dev/testing/localnet.rst" hash="a3270359" */} @@ -21,10 +24,12 @@ Designed primarily for development and testing, LocalNet is not intended for pro ## Setup -1. Download the release artifacts from the [Download Bundle](https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz) link, and extract the bundle: + + +1. Download the release artifacts from the |bundle_download_link| link, and extract the bundle: > ```bash - > tar xzvf 0.6.3_splice-node.tar.gz + > tar xzvf |version|_splice-node.tar.gz > ``` The extracted docker compose files defining LocalNet are located in `splice-node/docker-compose/localnet`. @@ -38,7 +43,7 @@ Designed primarily for development and testing, LocalNet is not intended for pro > ```bash > export LOCALNET_DIR=$PWD/splice-node/docker-compose/localnet - > export IMAGE_TAG=0.6.3 + > |image_tag_set_plain| > ``` 3. See `use-localnet` for the commands to start, stop, inspect, and administrate the LocalNet nodes. @@ -53,6 +58,8 @@ Optional: use the following additional environment variables to configure: Resource constraints for containers can be configured via: - **LOCALNET_DIR/resource-constraints.yaml** + + ## Exposed Ports The following section details the ports used by various services. The default database port is **DB_PORT=5432**. diff --git a/docs-main/overview/reference/splice-wallet-reference.mdx b/docs-main/overview/reference/splice-wallet-reference.mdx index c468f32d..0e15cc16 100644 --- a/docs-main/overview/reference/splice-wallet-reference.mdx +++ b/docs-main/overview/reference/splice-wallet-reference.mdx @@ -42,7 +42,13 @@ Contrary to other assets like Eth or Bitcoin, Canton Coin requires a party to ex Parties that are ok with accepting incoming Canton Coin transfers from any sender, can setup a `TransferPreapproval`. This allows any party to send Canton Coin to the party that setup the `TransferPreapproval`. Note that this only applies to transfers of Canton Coin but not to other assets. Other assets may provide their own variant of a preapproval which needs to be setup separately or they may require approval of each incoming transfer individually. -To ensure that the super validators don't have to store and serve `TransferPreapprovals` contracts for parties that are no longer active or malicious parties cannot spam them, a preapproval has a limited lifetime until it expires and a fee must be burned proportional to the lifetime when creating the preapproval. The fee is controlled by the super validators through the `transferPreapprovalFee` parameter. The current value can be observed in CC Scan at /dso and defaults to \$1/year. +To ensure that the super validators don't have to store and serve `TransferPreapprovals` contracts for parties that are no longer active or malicious parties cannot spam them, a preapproval has a limited lifetime until it expires and a fee must be burned proportional to the lifetime when creating the preapproval. The fee is controlled by the super validators through the `transferPreapprovalFee` parameter. The current value can be observed in CC Scan; select the right network: + +- DevNet: [https://scan.sv-1.dev.global.canton.network.sync.global/dso](https://scan.sv-1.dev.global.canton.network.sync.global/dso) +- TestNet: [https://scan.sv-1.test.global.canton.network.sync.global/dso](https://scan.sv-1.test.global.canton.network.sync.global/dso) +- MainNet: [https://scan.sv-1.global.canton.network.sync.global/dso](https://scan.sv-1.global.canton.network.sync.global/dso) + +The current value defaults to \$1/year. Each preapproval has two parties: The `receiver` party that approves incoming transfers and the `provider` party. The provider party is responsible for paying the fee and renewing the preapproval when it gets close to its expiry date. In return, the `provider` party will be the app provider on all incoming transfers that use this preapproval and get the app rewards for it. The `provider` party must not necessarily be hosted on the same node as the `receiver` party although that is the most common setup in practice. diff --git a/docs-main/sdks-tools/api-reference/splice-daml-apis.mdx b/docs-main/sdks-tools/api-reference/splice-daml-apis.mdx index 2de4efbc..26d93a0a 100644 --- a/docs-main/sdks-tools/api-reference/splice-daml-apis.mdx +++ b/docs-main/sdks-tools/api-reference/splice-daml-apis.mdx @@ -3,6 +3,9 @@ title: "Splice Daml APIs" description: "The Daml choice-based APIs exposed by the Splice DARs" --- +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/app_dev/daml_api/index.rst" hash="812c336f" */} @@ -63,11 +66,13 @@ Earning featured app rewards for direct transfers of non-CC tokens to your walle ### How to use the WalletUserProxy to earn featured app rewards + + Assuming you are a wallet provider that runs a validator node for your users, you can use the `WalletUserProxy` template to get credit for the activity of your wallet users as follows. 1. Apply for a featured app right for your wallet provider party, as explained on `how_to_become_a_featured_application`. -2. Extract the latest version of the `splice-util-featured-app-proxies.dar` file from the release bundle ([Download Bundle](https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz)). +2. Extract the latest version of the `splice-util-featured-app-proxies.dar` file from the release bundle (|bundle_download_link|). 3. Upload the extracted `.dar` file to your validator node. @@ -83,6 +88,8 @@ Assuming you are a wallet provider that runs a validator node for your users, yo > > See this [Daml test script](https://github.com/canton-network/splice/blob/main/daml/splice-util-featured-app-proxies-test/daml/Splice/Util/FeaturedApp/IntegrationTests/TestWalletUserProxy.daml#L47) for a complete example of how to construct the choice. + + ## Additional Splice Daml APIs The app provider of an asset registry is not necessarily the same as the party controlling the minting and burning of tokens. A typical example are tokens that are bridged from another network. The following API targets that use-case; and thus enables to decouple the upgrade cycles of an asset registry from the ones of the bridging app. diff --git a/docs-main/sdks-tools/api-reference/splice-http-apis.mdx b/docs-main/sdks-tools/api-reference/splice-http-apis.mdx index ad8e492c..fe526377 100644 --- a/docs-main/sdks-tools/api-reference/splice-http-apis.mdx +++ b/docs-main/sdks-tools/api-reference/splice-http-apis.mdx @@ -3,6 +3,9 @@ title: "Splice HTTP APIs" description: "The HTTP REST APIs exposed by the Scan, Validator, and SV applications" --- +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/app_dev/overview/splice_app_apis.rst" hash="10312958" */} @@ -34,7 +37,11 @@ Some of the Splice apps also define additional HTTP APIs that are considered int ## OpenAPI Conventions -The HTTP APIs of Splice apps are documented using [OpenAPI specifications](https://www.openapis.org/). You can download the OpenAPI specification for Splice's applications here: [Download OpenAPI specs](https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_openapi.tar.gz). + + +The HTTP APIs of Splice apps are documented using [OpenAPI specifications](https://www.openapis.org/). You can download the OpenAPI specification for Splice's applications here: |openapi_download_link|. + + ### API Stability diff --git a/docs-main/sdks-tools/api-reference/splice-scan-bulk-data-api.mdx b/docs-main/sdks-tools/api-reference/splice-scan-bulk-data-api.mdx index 29135e7d..3f9c0fba 100644 --- a/docs-main/sdks-tools/api-reference/splice-scan-bulk-data-api.mdx +++ b/docs-main/sdks-tools/api-reference/splice-scan-bulk-data-api.mdx @@ -3,6 +3,9 @@ title: "Scan Bulk Data API" description: "Full update history and ACS snapshots from the SV participant node" --- +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/app_dev/scan_api/scan_bulk_data_api.rst" hash="2753bd48" */} @@ -32,6 +35,8 @@ The Bulk Data Scan API provides access to the update history and ACS snapshots a ## Open API Specification + + The `scan_openapi` describes the Scan API in detail. The below table provides a quick overview of the endpoints that the Scan Bulk Data API consists of: | Endpoint | Description | @@ -45,11 +50,13 @@ If you would rather read the yaml Open API specification file directly, this can Example URLs for accessing the Scan Bulk Data API are: -- [https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v2/updates](https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v2/updates) -- [https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/state/acs/snapshot-timestamp](https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/state/acs/snapshot-timestamp) +- [|gsf_scan_url|/api/scan/v2/updates](|gsf_scan_url|/api/scan/v2/updates) +- [|gsf_scan_url|/api/scan/v0/state/acs/snapshot-timestamp](|gsf_scan_url|/api/scan/v0/state/acs/snapshot-timestamp) Please note the `api/scan` prefix in the URLs, which is the base path for the Scan API. + + ### Updates An update can be one of two things: @@ -337,12 +344,14 @@ The ACS snapshots are periodically taken and stored in the Scan App. This endpoi #### GET /v0/state/acs/snapshot-timestamp + + The /v0/state/acs/snapshot-timestamp endpoint returns the timestamp of the most recent snapshot before the given date, for the given `migration_id`. Specify `migration_id = 0` for the beginning of the network. The returned timestamp corresponds to the record time of the last transaction in the snapshot. An example request to get the timestamp of the most recent snapshot before a given date is shown below: ```bash -curl https://scan.sv-1.unknown_cluster.global.canton.network.sync.global/api/scan/v0/state/acs/snapshot-timestamp\?before\="2025-02-12T00:00:00.000000Z"\&migration_id\=4 +curl |gsf_scan_url|/api/scan/v0/state/acs/snapshot-timestamp\?before\="2025-02-12T00:00:00.000000Z"\&migration_id\=4 ``` The response returns the timestamp of the most recent snapshot before the given date: @@ -353,6 +362,8 @@ The response returns the timestamp of the most recent snapshot before the given } ``` + + #### POST /v0/state/acs The /v0/state/acs endpoint returns the ACS in creation date ascending order, paged, for a given migration id and record time. Post an `AcsRequest` with a `migration_id`, `record_time` and `page_size` to get a page of contracts. An optional `templates` field filters the ACS by a set of `template_id`s. diff --git a/docs-main/sdks-tools/api-reference/splice-scan-gs-connectivity-api.mdx b/docs-main/sdks-tools/api-reference/splice-scan-gs-connectivity-api.mdx index c6a4394c..e75a2822 100644 --- a/docs-main/sdks-tools/api-reference/splice-scan-gs-connectivity-api.mdx +++ b/docs-main/sdks-tools/api-reference/splice-scan-gs-connectivity-api.mdx @@ -3,6 +3,9 @@ title: "Scan Global Synchronizer Connectivity API" description: "Discover Scan and sequencer endpoints across all SVs" --- +import { NetworkVariables } from '/snippets/components/version.mdx'; +import { networkData } from '/snippets/generated/version-dashboard-data.mdx'; + {/* COPIED_START source="splice:docs/src/app_dev/scan_api/scan_global_synchronizer_connectivity_api.rst" hash="05c0a1f9" */} @@ -15,7 +18,9 @@ Splice network applications and validators need to be able to connect to multipl ## Listing all SV Scans -Every Scan can list all approved SV scans connected to the network. For example, query from /v0/scans from [https://scan.sv-1.unknown_cluster.global.canton.network.sync.global](https://scan.sv-1.unknown_cluster.global.canton.network.sync.global), and the response will be something like + + +Every Scan can list all approved SV scans connected to the network. For example, query from /v0/scans from [|gsf_scan_url|](|gsf_scan_url|), and the response will be something like ```json { @@ -41,6 +46,8 @@ Every Scan can list all approved SV scans connected to the network. For example, `scans` is a list of synchronizer IDs, each with an associated list of SVs and their Scan base URLs. In this case, `Global-Synchronizer-Foundation` matches the originally-used Scan. Take any of these `publicUrl`s and query `/api/scan/v0/scans`, and the same set will be returned. + + ## Listing all SV Sequencers Likewise, Canton sequencers for all approved SVs are published by every Scan. For example, query from /v0/dso-sequencers, and the result will be something like diff --git a/docs-main/snippets/components/version.mdx b/docs-main/snippets/components/version.mdx new file mode 100644 index 00000000..b0292801 --- /dev/null +++ b/docs-main/snippets/components/version.mdx @@ -0,0 +1,291 @@ +export const VersionOption = ({ children }) => <>{children}; + +export const NetworkVariables = ({ + children, + title = 'Network-specific values', + description, + defaultNetwork = 'devnet', + networkData = {}, +}) => { + const deploymentNetworkOrder = ['devnet', 'testnet', 'mainnet']; + const tokenPattern = /\|([A-Za-z0-9_]+)\|/g; + const tokenTextValue = (value) => { + if (value && typeof value === 'object' && 'href' in value) { + return value.href; + } + return value ?? null; + }; + const replacementForToken = (token, substitutions) => tokenTextValue(substitutions?.[token]); + const replaceStringProp = (value, substitutions) => { + if (typeof value !== 'string') { + return value; + } + return value.replace(tokenPattern, (match, token) => replacementForToken(token, substitutions) ?? match); + }; + const replaceTextNode = (value, substitutions, keyPrefix = 'token') => { + if (typeof value !== 'string') { + return value; + } + + const pieces = []; + let lastIndex = 0; + let match; + tokenPattern.lastIndex = 0; + + while ((match = tokenPattern.exec(value)) !== null) { + const [rawToken, token] = match; + const replacement = substitutions?.[token]; + if (!replacement) { + continue; + } + + if (match.index > lastIndex) { + pieces.push(value.slice(lastIndex, match.index)); + } + + if (replacement && typeof replacement === 'object' && replacement.href) { + const linkLabel = replacement.label ?? replacement.href; + const networkLabel = activeData?.versions?.splice + ? `${activeData.name ?? activeNetwork} ${activeData.versions.splice}` + : activeData?.name ?? activeNetwork; + pieces.push( + + {networkLabel ? `${linkLabel} (${networkLabel})` : linkLabel} + + ); + } else { + pieces.push(String(replacement)); + } + lastIndex = match.index + rawToken.length; + } + + if (!pieces.length) { + return value; + } + + if (lastIndex < value.length) { + pieces.push(value.slice(lastIndex)); + } + + return pieces; + }; + const replaceNode = (node, substitutions, keyPrefix = 'node') => { + if (typeof node === 'string') { + return replaceTextNode(node, substitutions, keyPrefix); + } + + if (Array.isArray(node)) { + return node.map((child, index) => replaceNode(child, substitutions, `${keyPrefix}-${index}`)); + } + + if (!React.isValidElement(node)) { + return node; + } + + const networkOnly = node.props?.['data-network-only']; + if (typeof networkOnly === 'string') { + const allowedNetworks = networkOnly.split(',').map((network) => network.trim()).filter(Boolean); + if (!allowedNetworks.includes(activeNetwork)) { + return null; + } + } + + const props = {}; + for (const [name, value] of Object.entries(node.props ?? {})) { + if (name === 'children') { + continue; + } + props[name] = typeof value === 'string' ? replaceStringProp(value, substitutions) : value; + } + + return React.cloneElement( + node, + props, + React.Children.map(node.props.children, (child, index) => + replaceNode(child, substitutions, `${keyPrefix}-${index}`) + ) + ); + }; + const options = deploymentNetworkOrder.filter((networkKey) => networkData[networkKey]?.substitutions); + const defaultIndex = Math.max(0, options.indexOf(defaultNetwork)); + const [activeIndex, setActiveIndex] = React.useState(defaultIndex); + const activeNetwork = options[activeIndex] ?? options[0]; + const activeData = networkData[activeNetwork]; + const panelRef = React.useRef(null); + + React.useEffect(() => { + if (!activeData?.substitutions || !panelRef.current) { + return; + } + + const replaceTokenText = (value) => + value.replace(/\|([A-Za-z0-9_]+)\|/g, (match, token) => + replacementForToken(token, activeData.substitutions) ?? match + ); + + const replaceRenderedCode = () => { + for (const codeRoot of panelRef.current.querySelectorAll('pre, code')) { + if (codeRoot.tagName === 'PRE' && codeRoot.querySelector('code')) { + continue; + } + codeRoot.textContent = replaceTokenText(codeRoot.textContent ?? ''); + } + }; + + let animationFrame; + const scheduleReplaceRenderedCode = () => { + if (animationFrame) { + window.cancelAnimationFrame(animationFrame); + } + animationFrame = window.requestAnimationFrame(() => { + animationFrame = null; + replaceRenderedCode(); + }); + }; + const observer = new MutationObserver(scheduleReplaceRenderedCode); + const timeouts = [100, 500, 1000].map((delay) => window.setTimeout(replaceRenderedCode, delay)); + + replaceRenderedCode(); + observer.observe(panelRef.current, { childList: true, characterData: true, subtree: true }); + + return () => { + observer.disconnect(); + if (animationFrame) { + window.cancelAnimationFrame(animationFrame); + } + for (const timeout of timeouts) { + window.clearTimeout(timeout); + } + }; + }, [activeNetwork, activeData]); + + if (!activeData) { + return null; + } + + const label = (networkKey) => { + const network = networkData[networkKey]; + const version = network?.versions?.splice; + return version ? `${network?.name ?? networkKey} (${version})` : network?.name ?? networkKey; + }; + + return ( +
+ {(title || description) && ( +
+ {title &&
{title}
} + {description &&
{description}
} +
+ )} +
+ {options.map((networkKey, index) => { + const isActive = index === activeIndex; + return ( + + ); + })} +
+
+ {React.Children.map(children, (child, index) => { + if (typeof child === 'string' && !child.trim()) { + return null; + } + const content = replaceNode(child, activeData.substitutions, `network-${activeNetwork}-${index}`); + if (content === null || content === false) { + return null; + } + return ( +
+ {content} +
+ ); + })} +
+
+ ); +}; + +export const Version = ({ children, title, description, defaultLabel, networkData = {} }) => { + const networkOrder = ['mainnet', 'testnet', 'devnet']; + const networksForVersion = (version) => { + return networkOrder.filter((networkKey) => networkData[networkKey]?.versions?.splice === version); + }; + const networkNames = (networkKeys) => { + return networkKeys.map((networkKey) => networkData[networkKey]?.name ?? networkKey).join(' / '); + }; + const labelForVersion = (version) => { + const networks = networksForVersion(version); + return networks.length ? `${networkNames(networks)} (${version})` : `CN ${version}`; + }; + const labelWithVersion = (label, version) => { + return version ? `${label} (${version})` : label; + }; + const optionLabel = (option) => option.props.label + ? labelWithVersion(option.props.label, option.props.version) + : labelForVersion(option.props.version); + const options = React.Children.toArray(children).filter((child) => child?.props?.label || child?.props?.version); + const defaultIndex = Math.max( + 0, + options.findIndex((child) => optionLabel(child) === defaultLabel), + ); + const [activeIndex, setActiveIndex] = React.useState(defaultIndex); + const activeOption = options[activeIndex] ?? options[0]; + + if (!options.length) { + return null; + } + + return ( +
+ {(title || description) && ( +
+ {title &&
{title}
} + {description &&
{description}
} +
+ )} +
+ {options.map((option, index) => { + const isActive = index === activeIndex; + const label = optionLabel(option); + return ( + + ); + })} +
+
+ {activeOption} +
+
+ ); +}; diff --git a/docs-main/snippets/generated/version-dashboard-data.mdx b/docs-main/snippets/generated/version-dashboard-data.mdx index 143667d4..be9e2138 100644 --- a/docs-main/snippets/generated/version-dashboard-data.mdx +++ b/docs-main/snippets/generated/version-dashboard-data.mdx @@ -4,7 +4,7 @@ export const networkData = { description: 'Production network for live applications. Data is permanent and never reset. Upgrades follow DevNet and TestNet validation.', color: '#22c55e', versions: { - splice: '0.5.10', + splice: '0.5.18', damlSdk: '3.4.11', pqs: '3.4.1', tokenStandard: '1.0.0', @@ -14,22 +14,47 @@ export const networkData = { }, advanced: { minProtocolVersion: '6', - migrationId: '2', + migrationId: '4', darVersions: [ { name: 'splice-amulet', version: '0.1.14' }, { name: 'splice-wallet', version: '0.1.14' }, { name: 'splice-dso-governance', version: '0.1.20' }, ], - releaseUrl: 'https://github.com/hyperledger-labs/splice/releases/tag/v0.5.10', + releaseUrl: 'https://github.com/canton-network/splice/releases/tag/0.5.18', }, endpoint: 'scan.sv-1.global.canton.network.sync.global', + substitutions: { + splice_cluster: 'main', + da_hostname: 'global.canton.network.digitalasset.com', + gsf_sv_url: 'https://sv.sv-1.global.canton.network.sync.global', + generic_sv_url: 'https://sv.sv-1.global.canton.network.YOUR_SV_SPONSOR', + gsf_scan_url: 'https://scan.sv-1.global.canton.network.sync.global', + generic_scan_url: 'https://scan.sv-1.global.canton.network.YOUR_SV_SPONSOR', + gsf_sequencer_url: 'https://sequencer-MIGRATION_ID.sv-1.global.canton.network.sync.global', + version: '0.5.18', + version_literal: '0.5.18', + chart_version_literal: '0.5.18', + chart_version_set: 'export CHART_VERSION=0.5.18', + image_tag_set: 'export IMAGE_TAG=0.5.18', + image_tag_set_plain: 'export IMAGE_TAG=0.5.18', + bundle_download_link: { + label: 'Download Bundle', + href: 'https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.5.18/0.5.18_splice-node.tar.gz', + }, + openapi_download_link: { + label: 'Download OpenAPI specs', + href: 'https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.5.18/0.5.18_openapi.tar.gz', + }, + helm_repo_prefix: 'oci://ghcr.io/digital-asset/decentralized-canton-sync/helm', + docker_repo_prefix: 'ghcr.io/digital-asset/decentralized-canton-sync/docker', + }, }, testnet: { name: 'TestNet', description: 'Pre-production environment for final validation. Requires MainNet approval to join. May be reset periodically.', color: '#eab308', versions: { - splice: '0.5.11', + splice: '0.5.18', damlSdk: '3.4.11', pqs: '3.4.1', tokenStandard: '1.0.0', @@ -39,22 +64,47 @@ export const networkData = { }, advanced: { minProtocolVersion: '6', - migrationId: '3', + migrationId: '1', darVersions: [ { name: 'splice-amulet', version: '0.1.15' }, { name: 'splice-wallet', version: '0.1.15' }, { name: 'splice-dso-governance', version: '0.1.21' }, ], - releaseUrl: 'https://github.com/hyperledger-labs/splice/releases/tag/v0.5.11', + releaseUrl: 'https://github.com/canton-network/splice/releases/tag/0.5.18', }, endpoint: 'scan.sv-1.test.global.canton.network.sync.global', + substitutions: { + splice_cluster: 'test', + da_hostname: 'test.global.canton.network.digitalasset.com', + gsf_sv_url: 'https://sv.sv-1.test.global.canton.network.sync.global', + generic_sv_url: 'https://sv.sv-1.test.global.canton.network.YOUR_SV_SPONSOR', + gsf_scan_url: 'https://scan.sv-1.test.global.canton.network.sync.global', + generic_scan_url: 'https://scan.sv-1.test.global.canton.network.YOUR_SV_SPONSOR', + gsf_sequencer_url: 'https://sequencer-MIGRATION_ID.sv-1.test.global.canton.network.sync.global', + version: '0.5.18', + version_literal: '0.5.18', + chart_version_literal: '0.5.18', + chart_version_set: 'export CHART_VERSION=0.5.18', + image_tag_set: 'export IMAGE_TAG=0.5.18', + image_tag_set_plain: 'export IMAGE_TAG=0.5.18', + bundle_download_link: { + label: 'Download Bundle', + href: 'https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.5.18/0.5.18_splice-node.tar.gz', + }, + openapi_download_link: { + label: 'Download OpenAPI specs', + href: 'https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.5.18/0.5.18_openapi.tar.gz', + }, + helm_repo_prefix: 'oci://ghcr.io/digital-asset/decentralized-canton-sync/helm', + docker_repo_prefix: 'ghcr.io/digital-asset/decentralized-canton-sync/docker', + }, }, devnet: { name: 'DevNet', description: 'Development environment with latest features. Open to any validator (IP allowlist required). Reset every 3 months. Best for testing upgrades.', color: '#a78bfa', versions: { - splice: '0.5.12', + splice: '0.6.3', damlSdk: '3.4.11', pqs: '3.4.1', tokenStandard: '1.0.0', @@ -64,14 +114,39 @@ export const networkData = { }, advanced: { minProtocolVersion: '6', - migrationId: '4', + migrationId: '1', darVersions: [ { name: 'splice-amulet', version: '0.1.15' }, { name: 'splice-wallet', version: '0.1.15' }, { name: 'splice-dso-governance', version: '0.1.21' }, ], - releaseUrl: 'https://github.com/hyperledger-labs/splice/releases/tag/v0.5.12', + releaseUrl: 'https://github.com/canton-network/splice/releases/tag/0.6.3', }, endpoint: 'scan.sv-1.dev.global.canton.network.sync.global', + substitutions: { + splice_cluster: 'dev', + da_hostname: 'dev.global.canton.network.digitalasset.com', + gsf_sv_url: 'https://sv.sv-1.dev.global.canton.network.sync.global', + generic_sv_url: 'https://sv.sv-1.dev.global.canton.network.YOUR_SV_SPONSOR', + gsf_scan_url: 'https://scan.sv-1.dev.global.canton.network.sync.global', + generic_scan_url: 'https://scan.sv-1.dev.global.canton.network.YOUR_SV_SPONSOR', + gsf_sequencer_url: 'https://sequencer-MIGRATION_ID.sv-1.dev.global.canton.network.sync.global', + version: '0.6.3', + version_literal: '0.6.3', + chart_version_literal: '0.6.3', + chart_version_set: 'export CHART_VERSION=0.6.3', + image_tag_set: 'export IMAGE_TAG=0.6.3', + image_tag_set_plain: 'export IMAGE_TAG=0.6.3', + bundle_download_link: { + label: 'Download Bundle', + href: 'https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_splice-node.tar.gz', + }, + openapi_download_link: { + label: 'Download OpenAPI specs', + href: 'https://github.com/digital-asset/decentralized-canton-sync/releases/download/v0.6.3/0.6.3_openapi.tar.gz', + }, + helm_repo_prefix: 'oci://ghcr.io/digital-asset/decentralized-canton-sync/helm', + docker_repo_prefix: 'ghcr.io/digital-asset/decentralized-canton-sync/docker', + }, }, } diff --git a/scripts/helpers/updateVersionDashboardData.js b/scripts/helpers/updateVersionDashboardData.js index bb1d7bc6..53dd9809 100644 --- a/scripts/helpers/updateVersionDashboardData.js +++ b/scripts/helpers/updateVersionDashboardData.js @@ -93,6 +93,7 @@ function buildNetworkData(repoConfig, metaConfig) { network.versions = versions; network.advanced = repoVersion.advanced; network.endpoint = repoVersion.endpoint; + network.substitutions = repoVersion.substitutions || {}; networkData[networkKey] = network; } @@ -137,6 +138,7 @@ function generateMDX(networkData) { lines.push(` },`); lines.push(` endpoint: ${formatValue(network.endpoint, 2)},`); + lines.push(` substitutions: ${formatValue(network.substitutions, 2)},`); lines.push(` },`); }