diff --git a/.changeset/dirty-gifts-grin.md b/.changeset/dirty-gifts-grin.md new file mode 100644 index 00000000000..88cb0b7ace4 --- /dev/null +++ b/.changeset/dirty-gifts-grin.md @@ -0,0 +1,5 @@ +--- +'@hyperlane-xyz/sdk': minor +--- + +Include entire RPC array for chainMetadataToViemChain diff --git a/.changeset/eleven-cows-dance.md b/.changeset/eleven-cows-dance.md new file mode 100644 index 00000000000..adc16422bd2 --- /dev/null +++ b/.changeset/eleven-cows-dance.md @@ -0,0 +1,5 @@ +--- +'@hyperlane-xyz/sdk': minor +--- + +Added ZKSync specific deployment logic and artifact related utils diff --git a/.changeset/famous-taxis-dream.md b/.changeset/famous-taxis-dream.md new file mode 100644 index 00000000000..5471470bf31 --- /dev/null +++ b/.changeset/famous-taxis-dream.md @@ -0,0 +1,5 @@ +--- +'@hyperlane-xyz/core': minor +--- + +Refactor ZKsync artifact generation and validation logic diff --git a/.changeset/tame-sheep-retire.md b/.changeset/tame-sheep-retire.md new file mode 100644 index 00000000000..0b0b65b0f86 --- /dev/null +++ b/.changeset/tame-sheep-retire.md @@ -0,0 +1,5 @@ +--- +'@hyperlane-xyz/sdk': minor +--- + +Adds the proxyAdmin.owner to the Checker ownerOverrides such that it checks proxyAdmin.owner instead of always using the top-level owner diff --git a/.github/workflows/simapp-docker.yml b/.github/workflows/simapp-docker.yml new file mode 100644 index 00000000000..a44c0e2718c --- /dev/null +++ b/.github/workflows/simapp-docker.yml @@ -0,0 +1,62 @@ +name: Build and Push Cosmos Simapp Image to GCR +on: + workflow_dispatch: + inputs: + hyperlane_cosmos_branch: + description: 'Branch, ref, or tag to build' + default: 'v1.0.0-beta0' + +concurrency: + group: build-push-cosmos-simapp-${{ github.ref }} + cancel-in-progress: true + +jobs: + check-env: + runs-on: ubuntu-latest + # assign output from step to job output + outputs: + gcloud-service-key: ${{ steps.gcloud-service-key.outputs.defined }} + steps: + - id: gcloud-service-key + # assign GCLOUD_SERVICE_KEY to env for access in conditional + env: + GCLOUD_SERVICE_KEY: ${{ secrets.GCLOUD_SERVICE_KEY }} + if: "${{ env.GCLOUD_SERVICE_KEY != '' }}" + # runs if GCLOUD_SERVICE_KEY is defined, so we set the output to true + run: echo "defined=true" >> $GITHUB_OUTPUT + + build-and-push-to-gcr: + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + + # uses check-env to determine if secrets.GCLOUD_SERVICE_KEY is defined + needs: [check-env] + if: needs.check-env.outputs.gcloud-service-key == 'true' + + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + submodules: recursive + - name: Set up Depot CLI + uses: depot/setup-action@v1 + - name: Login to GCR + uses: docker/login-action@v3 + with: + registry: gcr.io + username: _json_key + password: ${{ secrets.GCLOUD_SERVICE_KEY }} + - name: Build and push + uses: depot/build-push-action@v1 + with: + project: 3cpjhx94qv + context: ./typescript/cosmos-sdk + file: ./typescript/cosmos-sdk/Dockerfile + push: true + tags: | + gcr.io/abacus-labs-dev/hyperlane-cosmos-simapp:${{ github.event.inputs.hyperlane_cosmos_branch || 'v1.0.0-beta0' }} + build-args: | + BRANCH_NAME=${{ github.event.inputs.hyperlane_cosmos_branch || 'v1.0.0-beta0' }} + platforms: linux/amd64,linux/arm64 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a53caaa6bcb..e1a2d553764 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -172,6 +172,23 @@ jobs: echo "CLI E2E tests failed" exit 1 + cosmos-sdk-e2e: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + submodules: recursive + fetch-depth: 0 + + - name: yarn-build + uses: ./.github/actions/yarn-build-with-cache + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + + - name: Cosmos SDK e2e tests + run: yarn --cwd typescript/cosmos-sdk test:e2e + agent-configs: runs-on: ubuntu-latest strategy: @@ -384,7 +401,6 @@ jobs: MAINNET3_ETHEREUM_RPC_URLS: ${{ secrets.MAINNET3_ETHEREUM_RPC_URLS }} TESTNET4_SEPOLIA_RPC_URLS: ${{ secrets.TESTNET4_SEPOLIA_RPC_URLS }} - timeout-minutes: 10 strategy: fail-fast: false diff --git a/rust/main/Cargo.lock b/rust/main/Cargo.lock index 4123f312e8f..1b4422a28a2 100644 --- a/rust/main/Cargo.lock +++ b/rust/main/Cargo.lock @@ -521,6 +521,438 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "aws-config" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b96342ea8948ab9bef3e6234ea97fc32e2d8a88d8fb6a084e52267317f94b6b" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http 0.60.12", + "aws-smithy-json 0.60.7", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "hex 0.4.3", + "http 0.2.12", + "hyper 0.14.30", + "ring 0.17.8", + "time", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-credential-types" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4471bef4c22a06d2c7a1b6492493d3fdf24a805323109d6874f9c94d5906ac14" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + +[[package]] +name = "aws-runtime" +version = "1.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aff45ffe35196e593ea3b9dd65b320e51e2dda95aff4390bc459e461d09c6ad" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-eventstream", + "aws-smithy-http 0.62.0", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand", + "http 0.2.12", + "http-body 0.4.6", + "once_cell", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid 1.11.0", +] + +[[package]] +name = "aws-sdk-s3" +version = "1.65.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3ba2c5c0f2618937ce3d4a5ad574b86775576fa24006bcb3128c6e2cbf3c34e" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-eventstream", + "aws-smithy-http 0.60.12", + "aws-smithy-json 0.61.3", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "hex 0.4.3", + "hmac 0.12.1", + "http 0.2.12", + "http-body 0.4.6", + "lru", + "once_cell", + "percent-encoding", + "regex-lite", + "sha2 0.10.8", + "tracing", + "url", +] + +[[package]] +name = "aws-sdk-sso" +version = "1.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ca43a4ef210894f93096039ef1d6fa4ad3edfabb3be92b80908b9f2e4b4eab" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http 0.60.12", + "aws-smithy-json 0.61.3", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ssooidc" +version = "1.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fea2f3a8bb3bd10932ae7ad59cc59f65f270fc9183a7e91f501dc5efbef7ee" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http 0.60.12", + "aws-smithy-json 0.60.7", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ada54e5f26ac246dc79727def52f7f8ed38915cb47781e2a72213957dc3a7d5" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http 0.60.12", + "aws-smithy-json 0.60.7", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "http 0.2.12", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d03c3c05ff80d54ff860fe38c726f6f494c639ae975203a101335f223386db" +dependencies = [ + "aws-credential-types", + "aws-smithy-eventstream", + "aws-smithy-http 0.62.0", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "crypto-bigint 0.5.5", + "form_urlencoded", + "hex 0.4.3", + "hmac 0.12.1", + "http 0.2.12", + "http 1.2.0", + "once_cell", + "p256 0.11.1", + "percent-encoding", + "ring 0.17.8", + "sha2 0.10.8", + "subtle", + "time", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e190749ea56f8c42bf15dd76c65e14f8f765233e6df9b0506d9d934ebef867c" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-checksums" +version = "0.60.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba1a71073fca26775c8b5189175ea8863afb1c9ea2cceb02a5de5ad9dfbaa795" +dependencies = [ + "aws-smithy-http 0.60.12", + "aws-smithy-types", + "bytes", + "crc32c", + "crc32fast", + "hex 0.4.3", + "http 0.2.12", + "http-body 0.4.6", + "md-5 0.10.6", + "pin-project-lite", + "sha1", + "sha2 0.10.8", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.60.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c45d3dddac16c5c59d553ece225a88870cf81b7b813c9cc17b78cf4685eac7a" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + +[[package]] +name = "aws-smithy-http" +version = "0.60.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7809c27ad8da6a6a68c454e651d4962479e81472aa19ae99e59f9aba1f9713cc" +dependencies = [ + "aws-smithy-eventstream", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http-body 0.4.6", + "once_cell", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http" +version = "0.62.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5949124d11e538ca21142d1fba61ab0a2a2c1bc3ed323cdb3e4b878bfb83166" +dependencies = [ + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.2.0", + "http-body 0.4.6", + "once_cell", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-http-client" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aff1159006441d02e57204bf57a1b890ba68bedb6904ffd2873c1c4c11c546b" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "h2 0.4.7", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.30", + "hyper-rustls", + "pin-project-lite", + "rustls 0.21.12", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.60.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4683df9469ef09468dad3473d129960119a0d3593617542b7d52086c8486f2d6" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-json" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92144e45819cae7dc62af23eac5a038a58aa544432d2102609654376a900bd07" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-observability" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445d065e76bc1ef54963db400319f1dd3ebb3e0a74af20f7f7630625b0cc7cc0" +dependencies = [ + "aws-smithy-runtime-api", + "once_cell", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fbd61ceb3fe8a1cb7352e42689cec5335833cd9f94103a61e98f9bb61c64bb" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0152749e17ce4d1b47c7747bdfec09dac1ccafdcbc741ebf9daa2a373356730f" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http 0.62.0", + "aws-smithy-http-client", + "aws-smithy-observability", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand", + "http 0.2.12", + "http 1.2.0", + "http-body 0.4.6", + "http-body 1.0.1", + "once_cell", + "pin-project-lite", + "pin-utils", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3da37cf5d57011cb1753456518ec76e31691f1f474b73934a284eb2a1c76510f" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.12", + "http 1.2.0", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836155caafba616c0ff9b07944324785de2ab016141c3550bd1c07882f8cee8f" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.12", + "http 1.2.0", + "http-body 0.4.6", + "http-body 1.0.1", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3873f8deed8927ce8d04487630dc9ff73193bab64742a61d050e57a68dec4125" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version", + "tracing", +] + [[package]] name = "axum" version = "0.6.20" @@ -719,6 +1151,16 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + [[package]] name = "base64ct" version = "1.6.0" @@ -1108,13 +1550,23 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" dependencies = [ "serde", ] +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" @@ -1705,7 +2157,7 @@ dependencies = [ "ed25519-zebra 4.0.3", "k256 0.13.4", "num-traits", - "p256", + "p256 0.13.2", "rand_core 0.6.4", "rayon", "sha2 0.10.8", @@ -1853,6 +2305,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crc32c" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" +dependencies = [ + "rustc_version", +] + [[package]] name = "crc32fast" version = "1.4.2" @@ -2270,6 +2731,19 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.10", +] + [[package]] name = "data-encoding" version = "2.6.0" @@ -3309,9 +3783,9 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fastrand" -version = "2.1.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "feature-probe" @@ -3416,6 +3890,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -3630,7 +4110,7 @@ dependencies = [ "fuel-types", "k256 0.13.4", "lazy_static", - "p256", + "p256 0.13.2", "rand 0.8.5", "secp256k1", "serde", @@ -3913,9 +4393,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" @@ -4197,6 +4677,17 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + [[package]] name = "hashers" version = "1.0.1" @@ -4501,9 +4992,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", @@ -4555,7 +5046,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.5.2", + "hyper 1.6.0", "hyper-util", "pin-project-lite", "tokio", @@ -4586,7 +5077,7 @@ dependencies = [ "futures-util", "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.2", + "hyper 1.6.0", "pin-project-lite", "socket2 0.5.7", "tokio", @@ -4609,6 +5100,8 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "aws-config", + "aws-sdk-s3", "axum 0.6.20", "backtrace", "backtrace-oneline", @@ -4618,6 +5111,7 @@ dependencies = [ "config", "console-subscriber", "convert_case 0.6.0", + "dashmap", "derive-new", "derive_builder", "ed25519-dalek 1.0.1", @@ -4646,7 +5140,6 @@ dependencies = [ "rocksdb", "rusoto_core", "rusoto_kms", - "rusoto_s3", "rusoto_sts", "serde", "serde_json", @@ -4839,6 +5332,7 @@ version = "0.1.0" dependencies = [ "abigen", "async-trait", + "dashmap", "derive-new", "ethers", "ethers-contract", @@ -5647,6 +6141,15 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.2", +] + [[package]] name = "lz4-sys" version = "1.10.0" @@ -6240,9 +6743,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "opaque-debug" @@ -6364,6 +6867,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "outref" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" + [[package]] name = "overload" version = "0.1.1" @@ -6376,6 +6885,17 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2 0.10.8", +] + [[package]] name = "p256" version = "0.13.2" @@ -7368,6 +7888,12 @@ dependencies = [ "regex-syntax 0.8.4", ] +[[package]] +name = "regex-lite" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" + [[package]] name = "regex-syntax" version = "0.6.29" @@ -7426,12 +7952,14 @@ dependencies = [ "serde_json", "strum 0.26.3", "submitter", + "tempfile", "thiserror", "tokio", "tokio-metrics", "tokio-test", "tracing", "tracing-futures", + "tracing-subscriber", "tracing-test", "typetag", "uuid 1.11.0", @@ -7771,19 +8299,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "rusoto_s3" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aae4677183411f6b0b412d66194ef5403293917d66e70ab118f07cc24c5b14d" -dependencies = [ - "async-trait", - "bytes", - "futures", - "rusoto_core", - "xml-rs", -] - [[package]] name = "rusoto_signature" version = "0.48.0" @@ -7926,9 +8441,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.20" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "log", "once_cell", @@ -7992,9 +8507,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" [[package]] name = "rustls-webpki" @@ -8476,9 +8991,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] @@ -8531,9 +9046,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2 1.0.93", "quote 1.0.37", @@ -10299,7 +10814,7 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.23.20", + "rustls 0.23.19", "tokio", ] @@ -10468,7 +10983,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.2", + "hyper 1.6.0", "hyper-timeout 0.5.2", "hyper-util", "percent-encoding", @@ -10897,6 +11412,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf-8" version = "0.7.6" @@ -10928,6 +11449,7 @@ name = "validator" version = "0.1.0" dependencies = [ "async-trait", + "aws-config", "axum 0.6.20", "chrono", "config", @@ -10948,6 +11470,7 @@ dependencies = [ "mockall", "prometheus", "reqwest", + "rusoto_core", "serde", "serde_json", "thiserror", @@ -10999,6 +11522,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" + [[package]] name = "walkdir" version = "2.5.0" @@ -11491,6 +12020,12 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + [[package]] name = "ya-gcp" version = "0.11.3" diff --git a/rust/main/Cargo.toml b/rust/main/Cargo.toml index 64258fc4652..d99cd3c5884 100644 --- a/rust/main/Cargo.toml +++ b/rust/main/Cargo.toml @@ -23,6 +23,7 @@ members = [ "utils/hex", "utils/run-locally", ] +resolver = "2" [workspace.package] documentation = "https://docs.hyperlane.xyz" @@ -38,6 +39,12 @@ anyhow = "1.0" async-trait = "0.1" async-rwlock = "1.3" auto_impl = "1.0" +aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } +# AWS deps are pinned to be compatible with rustc 1.80.1 +aws-sdk-s3 = "=1.65.0" +aws-sdk-sso = "=1.50.0" +aws-sdk-ssooidc = "=1.50.0" +aws-sdk-sts = "=1.50.0" axum = "0.6.1" backtrace = "0.3" base64 = "0.21.2" @@ -62,6 +69,7 @@ cosmwasm-std = "*" crunchy = "0.2" ctrlc = "3.2" curve25519-dalek = { version = "~3.2", features = ["serde"] } +dashmap = "5" derive-new = "0.5" derive_builder = "0.12" derive_more = "0.99" diff --git a/rust/main/agents/relayer/Cargo.toml b/rust/main/agents/relayer/Cargo.toml index 29102a8a737..b37ff4c9bec 100644 --- a/rust/main/agents/relayer/Cargo.toml +++ b/rust/main/agents/relayer/Cargo.toml @@ -64,10 +64,12 @@ once_cell.workspace = true mockall.workspace = true tokio-test.workspace = true tracing-test.workspace = true +tracing-subscriber.workspace = true hyperlane-test = { path = "../../hyperlane-test" } hyperlane-base = { path = "../../hyperlane-base", features = ["test-utils"] } hyperlane-core = { path = "../../hyperlane-core", features = ["agent", "async", "test-utils"] } ethers-prometheus = { path = "../../ethers-prometheus", features = ["serde"] } +tempfile.workspace = true [features] default = ["color-eyre", "oneline-errors"] diff --git a/rust/main/agents/relayer/src/msg/metadata/aggregation.rs b/rust/main/agents/relayer/src/msg/metadata/aggregation.rs index 383baaf8653..ddd5c54a19b 100644 --- a/rust/main/agents/relayer/src/msg/metadata/aggregation.rs +++ b/rust/main/agents/relayer/src/msg/metadata/aggregation.rs @@ -140,7 +140,12 @@ impl AggregationIsmMetadataBuilder { let params_cache_key = match self .base_builder() .ism_cache_policy_classifier() - .get_cache_policy(self.root_ism, ism.domain(), ModuleType::Aggregation) + .get_cache_policy( + self.root_ism, + ism.domain(), + ModuleType::Aggregation, + self.base.app_context.as_ref(), + ) .await { // To have the cache key be more succinct, we use the message id diff --git a/rust/main/agents/relayer/src/msg/metadata/base.rs b/rust/main/agents/relayer/src/msg/metadata/base.rs index 5d9174fc8fe..8c180568d1b 100644 --- a/rust/main/agents/relayer/src/msg/metadata/base.rs +++ b/rust/main/agents/relayer/src/msg/metadata/base.rs @@ -38,6 +38,8 @@ pub enum MetadataBuildError { MaxIsmDepthExceeded(u32), #[error("Exceeded max count when building metadata ({0})")] MaxIsmCountReached(u32), + #[error("Exceeded max validator count when building metadata ({0})")] + MaxValidatorCountReached(u32), #[error("Aggregation threshold not met ({0})")] AggregationThresholdNotMet(u32), } @@ -208,11 +210,22 @@ pub enum IsmCachePolicy { IsmSpecific, } +#[derive(Debug, Clone, Default, Deserialize, PartialEq)] +#[serde(tag = "type", rename_all = "camelCase")] +pub enum IsmCacheSelector { + #[default] + DefaultIsm, + AppContext { + context: String, + }, +} + /// Configuration for ISM caching behavior. /// Fields are renamed to be all lowercase / without underscores to match /// the format expected by the settings parsing. #[derive(Debug, Clone, Default, Deserialize)] pub struct IsmCacheConfig { + selector: IsmCacheSelector, #[serde(deserialize_with = "deserialize_module_types", rename = "moduletypes")] module_types: HashSet, chains: Option>, @@ -257,7 +270,7 @@ impl IsmCacheConfig { #[derive(Debug, new)] pub struct IsmCachePolicyClassifier { default_ism_getter: DefaultIsmCache, - default_ism_cache_config: IsmCacheConfig, + ism_cache_configs: Vec, } impl IsmCachePolicyClassifier { @@ -267,28 +280,36 @@ impl IsmCachePolicyClassifier { root_ism: H256, domain: &HyperlaneDomain, ism_module_type: ModuleType, + app_context: Option<&String>, ) -> IsmCachePolicy { - let default_ism = match self.default_ism_getter.get().await { - Ok(default_ism) => default_ism, - Err(err) => { - tracing::warn!(?err, "Error fetching default ISM for ISM cache policy, falling back to default cache policy"); - return IsmCachePolicy::default(); + for config in &self.ism_cache_configs { + let matches_module = match &config.selector { + IsmCacheSelector::DefaultIsm => { + let default_ism = match self.default_ism_getter.get().await { + Ok(default_ism) => default_ism, + Err(err) => { + tracing::warn!(?err, "Error fetching default ISM for ISM cache policy, attempting next config"); + continue; + } + }; + root_ism == default_ism + } + IsmCacheSelector::AppContext { + context: selector_app_context, + } => app_context.map_or(false, |app_context| app_context == selector_app_context), + }; + + if matches_module + && config.matches_chain(domain.name()) + && config.matches_module_type(ism_module_type) + { + tracing::trace!( + ?domain, + ism_cache_config =? config, + "Using configured default ISM cache policy" + ); + return config.cache_policy; } - }; - - if root_ism == default_ism - && self.default_ism_cache_config.matches_chain(domain.name()) - && self - .default_ism_cache_config - .matches_module_type(ism_module_type) - { - tracing::trace!( - ?default_ism, - ?domain, - cache_policy =? self.default_ism_cache_config.cache_policy, - "Using configured default ISM cache policy" - ); - return self.default_ism_cache_config.cache_policy; } IsmCachePolicy::default() @@ -303,6 +324,7 @@ mod tests { #[test] fn test_ism_cache_config() { let config = IsmCacheConfig { + selector: IsmCacheSelector::DefaultIsm, module_types: HashSet::from([ModuleType::Aggregation]), chains: Some(HashSet::from(["foochain".to_owned()])), cache_policy: IsmCachePolicy::IsmSpecific, @@ -320,24 +342,46 @@ mod tests { // Module type 2 is the numeric version of ModuleType::Aggregation let json = r#" { + "selector": { + "type": "defaultIsm" + }, "moduletypes": [2], "chains": ["foochain"], "cachepolicy": "ismSpecific" } "#; - let config: IsmCacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!(config.selector, IsmCacheSelector::DefaultIsm); assert_eq!( config.module_types, HashSet::from([ModuleType::Aggregation]) ); assert_eq!(config.chains, Some(HashSet::from(["foochain".to_owned()]))); assert_eq!(config.cache_policy, IsmCachePolicy::IsmSpecific); + + let json = r#" + { + "selector": { + "type": "appContext", + "context": "foo" + }, + "moduletypes": [2], + "chains": ["foochain"], + "cachepolicy": "ismSpecific" + } + "#; + let config: IsmCacheConfig = serde_json::from_str(json).unwrap(); + assert_eq!( + config.selector, + IsmCacheSelector::AppContext { + context: "foo".to_string(), + }, + ); } #[tokio::test] - async fn test_ism_cache_policy_classifier() { + async fn test_ism_cache_policy_classifier_default_ism() { let default_ism = H256::zero(); let mock_mailbox = MockMailboxContract::new_with_default_ism(default_ism); @@ -345,37 +389,163 @@ mod tests { let default_ism_getter = DefaultIsmCache::new(mailbox); let default_ism_cache_config = IsmCacheConfig { + selector: IsmCacheSelector::DefaultIsm, module_types: HashSet::from([ModuleType::Aggregation]), chains: Some(HashSet::from(["foochain".to_owned()])), cache_policy: IsmCachePolicy::IsmSpecific, }; let classifier = - IsmCachePolicyClassifier::new(default_ism_getter, default_ism_cache_config); + IsmCachePolicyClassifier::new(default_ism_getter, vec![default_ism_cache_config]); // We meet the criteria for the cache policy let domain = HyperlaneDomain::new_test_domain("foochain"); let cache_policy = classifier - .get_cache_policy(default_ism, &domain, ModuleType::Aggregation) + .get_cache_policy(default_ism, &domain, ModuleType::Aggregation, None) .await; assert_eq!(cache_policy, IsmCachePolicy::IsmSpecific); // Different ISM module type, should not match let cache_policy = classifier - .get_cache_policy(default_ism, &domain, ModuleType::Routing) + .get_cache_policy(default_ism, &domain, ModuleType::Routing, None) .await; assert_eq!(cache_policy, IsmCachePolicy::MessageSpecific); // ISM not default ISM, should not match let cache_policy = classifier - .get_cache_policy(H256::repeat_byte(0xfe), &domain, ModuleType::Routing) + .get_cache_policy(H256::repeat_byte(0xfe), &domain, ModuleType::Routing, None) + .await; + assert_eq!(cache_policy, IsmCachePolicy::MessageSpecific); + + // Different domain, should not match + let domain = HyperlaneDomain::new_test_domain("barchain"); + let cache_policy = classifier + .get_cache_policy(default_ism, &domain, ModuleType::Routing, None) + .await; + assert_eq!(cache_policy, IsmCachePolicy::MessageSpecific); + } + + #[tokio::test] + async fn test_ism_cache_policy_classifier_app_context() { + let default_ism = H256::zero(); + let mock_mailbox = MockMailboxContract::new_with_default_ism(default_ism); + let mailbox: Arc = Arc::new(mock_mailbox); + // Unused for this test + let default_ism_getter = DefaultIsmCache::new(mailbox); + + let app_context_cache_config = IsmCacheConfig { + selector: IsmCacheSelector::AppContext { + context: "foo".to_string(), + }, + module_types: HashSet::from([ModuleType::Aggregation]), + chains: Some(HashSet::from(["foochain".to_owned()])), + cache_policy: IsmCachePolicy::IsmSpecific, + }; + + let classifier = + IsmCachePolicyClassifier::new(default_ism_getter, vec![app_context_cache_config]); + + // We meet the criteria for the cache policy + let domain = HyperlaneDomain::new_test_domain("foochain"); + let cache_policy = classifier + .get_cache_policy( + // To make extra sure we're testing the app context match, + // let's use a different ISM address + H256::repeat_byte(0xfe), + &domain, + ModuleType::Aggregation, + Some(&"foo".to_string()), + ) + .await; + assert_eq!(cache_policy, IsmCachePolicy::IsmSpecific); + + // Different app context, should not match + let cache_policy = classifier + .get_cache_policy( + default_ism, + &domain, + ModuleType::Routing, + Some(&"bar".to_string()), + ) + .await; + assert_eq!(cache_policy, IsmCachePolicy::MessageSpecific); + + // No app context, should not match + let cache_policy = classifier + .get_cache_policy(H256::repeat_byte(0xfe), &domain, ModuleType::Routing, None) .await; assert_eq!(cache_policy, IsmCachePolicy::MessageSpecific); // Different domain, should not match let domain = HyperlaneDomain::new_test_domain("barchain"); let cache_policy = classifier - .get_cache_policy(default_ism, &domain, ModuleType::Routing) + .get_cache_policy( + default_ism, + &domain, + ModuleType::Routing, + Some(&"foo".to_string()), + ) + .await; + assert_eq!(cache_policy, IsmCachePolicy::MessageSpecific); + } + + #[tokio::test] + async fn test_ism_cache_policy_classifier_multiple_policies() { + let default_ism = H256::zero(); + let mock_mailbox = MockMailboxContract::new_with_default_ism(default_ism); + let mailbox: Arc = Arc::new(mock_mailbox); + // Unused for this test + let default_ism_getter = DefaultIsmCache::new(mailbox); + + let app_context_cache_config = IsmCacheConfig { + selector: IsmCacheSelector::AppContext { + context: "foo".to_string(), + }, + module_types: HashSet::from([ModuleType::Aggregation]), + chains: Some(HashSet::from(["foochain".to_owned()])), + cache_policy: IsmCachePolicy::IsmSpecific, + }; + + let default_ism_cache_config = IsmCacheConfig { + selector: IsmCacheSelector::DefaultIsm, + module_types: HashSet::from([ModuleType::Routing]), + chains: Some(HashSet::from(["foochain".to_owned()])), + cache_policy: IsmCachePolicy::IsmSpecific, + }; + + let classifier = IsmCachePolicyClassifier::new( + default_ism_getter, + vec![app_context_cache_config, default_ism_cache_config], + ); + + // We meet the criteria for the app context cache policy + let domain = HyperlaneDomain::new_test_domain("foochain"); + let cache_policy = classifier + .get_cache_policy( + // To make extra sure we're testing the app context match, + // let's use a different ISM address + H256::repeat_byte(0xfe), + &domain, + ModuleType::Aggregation, + Some(&"foo".to_string()), + ) + .await; + assert_eq!(cache_policy, IsmCachePolicy::IsmSpecific); + + // We meet the criteria for the default ISM cache policy + let cache_policy = classifier + .get_cache_policy(default_ism, &domain, ModuleType::Routing, None) + .await; + assert_eq!(cache_policy, IsmCachePolicy::IsmSpecific); + + // Different app context and not default ISM, should not match + let cache_policy = classifier + .get_cache_policy( + H256::repeat_byte(0xfe), + &domain, + ModuleType::Routing, + Some(&"bar".to_string()), + ) .await; assert_eq!(cache_policy, IsmCachePolicy::MessageSpecific); } diff --git a/rust/main/agents/relayer/src/msg/metadata/base_builder.rs b/rust/main/agents/relayer/src/msg/metadata/base_builder.rs index ca029a05e1d..4973e53728f 100644 --- a/rust/main/agents/relayer/src/msg/metadata/base_builder.rs +++ b/rust/main/agents/relayer/src/msg/metadata/base_builder.rs @@ -5,6 +5,7 @@ use std::{collections::HashMap, fmt::Debug, str::FromStr, sync::Arc}; use derive_new::new; use eyre::Context; +use futures::{stream, StreamExt}; use tokio::sync::RwLock; use tracing::{debug, info, warn}; @@ -180,67 +181,92 @@ impl BuildsBaseMetadata for BaseMetadataBuilder { // Only use the most recently announced location for now. let mut checkpoint_syncers: HashMap> = HashMap::new(); - for (&validator, validator_storage_locations) in validators.iter().zip(storage_locations) { - debug!(hyp_message=?message, ?validator, ?validator_storage_locations, "Validator and its storage locations for message"); - for storage_location in validator_storage_locations.iter().rev() { - let Ok(config) = CheckpointSyncerConf::from_str(storage_location) else { - debug!( - ?validator, - ?storage_location, - "Could not parse checkpoint syncer config for validator" - ); - continue; - }; - // If this is a LocalStorage based checkpoint syncer and it's not - // allowed, ignore it - if !self.allow_local_checkpoint_syncers - && matches!(config, CheckpointSyncerConf::LocalStorage { .. }) - { - debug!( - ?config, - "Ignoring disallowed LocalStorage based checkpoint syncer" - ); - continue; + let result = validators + .iter() + .zip(storage_locations) + .filter_map(|(validator, validator_storage_locations)| { + debug!(hyp_message=?message, ?validator, ?validator_storage_locations, "Validator and its storage locations for message"); + if validator_storage_locations.is_empty() { + // If the validator has not announced any storage locations, we skip it + // and log a warning. + warn!(?validator, "Validator has not announced any storage locations; see https://docs.hyperlane.xyz/docs/operators/validators/announcing-your-validator"); + return None; } - match config.build_and_validate(None).await { - Ok(checkpoint_syncer) => { - // found the syncer for this validator - checkpoint_syncers.insert(validator.into(), checkpoint_syncer.into()); - break; - } - Err(CheckpointSyncerBuildError::ReorgEvent(reorg_event)) => { - // If a reorg event has been posted to a checkpoint syncer, - // we refuse to build - return Err(CheckpointSyncerBuildError::ReorgEvent(reorg_event)); - } - Err(err) => { - debug!( - error=%err, - ?config, - ?validator, - "Error when loading checkpoint syncer; will attempt to use the next config" - ); + let future = async move { + // Reverse the order of storage locations to prefer the most recently announced + for storage_location in validator_storage_locations.iter().rev() { + let Ok(config) = CheckpointSyncerConf::from_str(storage_location) else { + debug!( + ?validator, + ?storage_location, + "Could not parse checkpoint syncer config for validator" + ); + continue; + }; + + // If this is a LocalStorage based checkpoint syncer and it's not + // allowed, ignore it + if !self.allow_local_checkpoint_syncers + && matches!(config, CheckpointSyncerConf::LocalStorage { .. }) + { + debug!( + ?config, + "Ignoring disallowed LocalStorage based checkpoint syncer" + ); + continue; + } + + match config.build_and_validate(None).await { + Ok(checkpoint_syncer) => { + // found the syncer for this validator + return Ok(Some((*validator, checkpoint_syncer))); + } + Err(CheckpointSyncerBuildError::ReorgEvent(reorg_event)) => { + // If a reorg event has been posted to a checkpoint syncer, + // we refuse to build + // This will result in a short circuit and return an error for the entire build process of all syncers + return Err(CheckpointSyncerBuildError::ReorgEvent(reorg_event)); + } + Err(err) => { + debug!( + error=%err, + ?config, + ?validator, + "Error when loading checkpoint syncer; will attempt to use the next config" + ); + } + } } - } - } - if checkpoint_syncers.get(&validator.into()).is_none() { - if validator_storage_locations.is_empty() { - warn!(?validator, "Validator has not announced any storage locations; see https://docs.hyperlane.xyz/docs/operators/validators/announcing-your-validator"); - } else { warn!( ?validator, ?validator_storage_locations, "No valid checkpoint syncer configs for validator" ); - } - } + Ok(None) + }; + Some(future) + }) + .collect::>(); + + let checkpoint_syncers_results = stream::iter(result) + .buffer_unordered(10) // Limit the number of concurrent tasks + .collect::>() + .await + .into_iter() + .collect::, _>>()? // Collect results into a single vector and return if any of them returns an error + .into_iter() + .flatten() // Flatten Option<_> + .collect::>(); + + for (validator, checkpoint_syncer) in checkpoint_syncers_results { + checkpoint_syncers.insert(validator.into(), checkpoint_syncer.into()); } + Ok(MultisigCheckpointSyncer::new( checkpoint_syncers, - self.metrics.clone(), - app_context, + app_context.map(|ctx| (self.metrics.clone(), ctx)), )) } } diff --git a/rust/main/agents/relayer/src/msg/metadata/message_builder.rs b/rust/main/agents/relayer/src/msg/metadata/message_builder.rs index c441171afc2..69ffc4d6ef7 100644 --- a/rust/main/agents/relayer/src/msg/metadata/message_builder.rs +++ b/rust/main/agents/relayer/src/msg/metadata/message_builder.rs @@ -215,8 +215,7 @@ mod test { use crate::{ msg::metadata::{ base::MetadataBuildError, message_builder::build_message_metadata, DefaultIsmCache, - IsmAwareAppContextClassifier, IsmCacheConfig, IsmCachePolicyClassifier, - MessageMetadataBuildParams, + IsmAwareAppContextClassifier, IsmCachePolicyClassifier, MessageMetadataBuildParams, }, settings::matching_list::{Filter, ListElement, MatchingList}, test_utils::{ @@ -279,7 +278,7 @@ mod test { base_builder.responses.app_context_classifier = Some(app_context_classifier); base_builder.responses.ism_cache_policy_classifier = Some(IsmCachePolicyClassifier::new( default_ism_getter, - IsmCacheConfig::default(), + Default::default(), )); base_builder } diff --git a/rust/main/agents/relayer/src/msg/metadata/multisig/base.rs b/rust/main/agents/relayer/src/msg/metadata/multisig/base.rs index dbb91eba451..fe83f29d414 100644 --- a/rust/main/agents/relayer/src/msg/metadata/multisig/base.rs +++ b/rust/main/agents/relayer/src/msg/metadata/multisig/base.rs @@ -38,6 +38,8 @@ pub enum MetadataToken { Signatures, } +const MAX_VALIDATOR_SET_SIZE: usize = 50; + #[async_trait] pub trait MultisigIsmMetadataBuilder: AsRef + Send + Sync { fn module_type(&self) -> ModuleType; @@ -117,6 +119,7 @@ pub trait MultisigIsmMetadataBuilder: AsRef + Send + Syn self.as_ref().root_ism, multisig_ism.domain(), self.module_type(), + self.as_ref().app_context.as_ref(), ) .await { @@ -185,6 +188,19 @@ impl MetadataBuilder for T { return Err(MetadataBuildError::CouldNotFetch); } + // Dismiss large validator sets + if validators.len() > MAX_VALIDATOR_SET_SIZE { + info!( + ?ism_address, + validator_count = validators.len(), + max_validator_count = MAX_VALIDATOR_SET_SIZE, + "Skipping metadata: Too many validators in ISM" + ); + return Err(MetadataBuildError::MaxValidatorCountReached( + validators.len() as u32, + )); + } + info!(hyp_message=?message, ?validators, threshold, "List of validators and threshold for message"); let checkpoint_syncer = match self diff --git a/rust/main/agents/relayer/src/msg/metadata/routing.rs b/rust/main/agents/relayer/src/msg/metadata/routing.rs index 3fedd4eb3a2..e0eb23df795 100644 --- a/rust/main/agents/relayer/src/msg/metadata/routing.rs +++ b/rust/main/agents/relayer/src/msg/metadata/routing.rs @@ -1,13 +1,14 @@ use async_trait::async_trait; use derive_more::Deref; use derive_new::new; +use hyperlane_base::cache::FunctionCallCache; use tracing::instrument; -use hyperlane_core::{HyperlaneMessage, H256}; +use hyperlane_core::{HyperlaneMessage, ModuleType, H256}; use super::{ - base::MessageMetadataBuildParams, MessageMetadataBuilder, Metadata, MetadataBuildError, - MetadataBuilder, + base::MessageMetadataBuildParams, IsmCachePolicy, MessageMetadataBuilder, Metadata, + MetadataBuildError, MetadataBuilder, }; #[derive(Clone, Debug, new, Deref)] @@ -30,10 +31,84 @@ impl MetadataBuilder for RoutingIsmMetadataBuilder { .build_routing_ism(ism_address) .await .map_err(|err| MetadataBuildError::FailedToBuild(err.to_string()))?; - let module = ism - .route(message) - .await - .map_err(|err| MetadataBuildError::FailedToBuild(err.to_string()))?; + + let ism_domain = ism.domain().name(); + let message_domain = self.base.base_builder().origin_domain(); + let fn_key = "route"; + + let cache_policy = self + .base_builder() + .ism_cache_policy_classifier() + .get_cache_policy( + self.root_ism, + ism.domain(), + ModuleType::Routing, + self.base.app_context.as_ref(), + ) + .await; + + let cache_result: Option = match cache_policy { + // if cache is ISM specific, we use the message origin for caching + IsmCachePolicy::IsmSpecific => { + let params_cache_key = (ism.address(), message.origin); + self.base_builder() + .cache() + .get_cached_call_result(ism_domain, fn_key, ¶ms_cache_key) + .await + } + // if cache is Message specific, we use the message id for caching + IsmCachePolicy::MessageSpecific => { + let params_cache_key = (ism.address(), message.id()); + self.base_builder() + .cache() + .get_cached_call_result(ism_domain, fn_key, ¶ms_cache_key) + .await + } + } + .map_err(|err| { + tracing::warn!(error = %err, "Error when caching call result for {:?}", fn_key); + }) + .ok() + .flatten(); + + let module = + match cache_result { + Some(result) => result, + None => { + let module = ism + .route(message) + .await + .map_err(|err| MetadataBuildError::FailedToBuild(err.to_string()))?; + + // store result in cache + match cache_policy { + IsmCachePolicy::IsmSpecific => { + let params_cache_key = (ism.address(), message.origin); + self.base_builder().cache().cache_call_result( + message_domain.name(), + fn_key, + ¶ms_cache_key, + &module, + ).await + } + IsmCachePolicy::MessageSpecific => { + let params_cache_key = (ism.address(), message.id()); + self.base_builder().cache().cache_call_result( + message_domain.name(), + fn_key, + ¶ms_cache_key, + &module, + ).await + } + } + .map_err(|err| { + tracing::warn!(error = %err, "Error when caching call result for {:?}", fn_key); + }) + .ok(); + module + } + }; + self.base.build(module, message, params).await } } diff --git a/rust/main/agents/relayer/src/msg/mod.rs b/rust/main/agents/relayer/src/msg/mod.rs index 5f7a5791d02..c95337526b6 100644 --- a/rust/main/agents/relayer/src/msg/mod.rs +++ b/rust/main/agents/relayer/src/msg/mod.rs @@ -30,6 +30,7 @@ pub(crate) mod blacklist; pub(crate) mod gas_payment; pub(crate) mod metadata; +pub(crate) mod op_batch; pub(crate) mod op_queue; pub(crate) mod op_submitter; pub(crate) mod processor; diff --git a/rust/main/agents/relayer/src/msg/op_batch.rs b/rust/main/agents/relayer/src/msg/op_batch.rs new file mode 100644 index 00000000000..30abad29647 --- /dev/null +++ b/rust/main/agents/relayer/src/msg/op_batch.rs @@ -0,0 +1,451 @@ +use std::{sync::Arc, time::Duration}; + +use derive_new::new; +use hyperlane_core::{ + rpc_clients::DEFAULT_MAX_RPC_RETRIES, total_estimated_cost, BatchResult, + ChainCommunicationError, ChainResult, ConfirmReason, HyperlaneDomain, Mailbox, + PendingOperation, PendingOperationStatus, QueueOperation, TxOutcome, +}; +use itertools::{Either, Itertools}; +use tokio::time::sleep; +use tracing::{info, instrument, warn}; + +use super::{ + op_queue::OpQueue, + op_submitter::{submit_single_operation, SerialSubmitterMetrics}, + pending_message::CONFIRM_DELAY, +}; + +const BATCH_RETRY_SLEEP_DURATION: Duration = Duration::from_millis(100); + +#[derive(new, Debug)] +pub(crate) struct OperationBatch { + operations: Vec, + #[allow(dead_code)] + domain: HyperlaneDomain, +} + +impl OperationBatch { + #[instrument(skip_all, fields(domain=%self.domain, batch_size=self.operations.len()))] + pub async fn submit( + self, + prepare_queue: &mut OpQueue, + confirm_queue: &mut OpQueue, + metrics: &SerialSubmitterMetrics, + ) { + let excluded_ops = match self.try_submit_as_batch(metrics).await { + Ok(batch_result) => { + Self::handle_batch_result(self.operations, batch_result, confirm_queue).await + } + Err(e) => { + warn!(error=?e, batch=?self.operations, "Error when submitting batch"); + self.operations + } + }; + + if !excluded_ops.is_empty() { + warn!(excluded_ops=?excluded_ops, "Either operations reverted in the batch or the txid wasn't included. Falling back to serial submission."); + OperationBatch::new(excluded_ops, self.domain) + .submit_serially(prepare_queue, confirm_queue, metrics) + .await; + } + } + + #[instrument(skip(self, metrics), ret, level = "debug")] + async fn try_submit_as_batch( + &self, + metrics: &SerialSubmitterMetrics, + ) -> ChainResult { + // We already assume that the relayer submits to a single mailbox per destination. + // So it's fine to use the first item in the batch to get the mailbox. + let Some(first_item) = self.operations.first() else { + return Err(ChainCommunicationError::BatchIsEmpty); + }; + let Some(mailbox) = first_item.try_get_mailbox() else { + // no need to update the metrics since all operations are excluded + return Ok(BatchResult::failed(self.operations.len())); + }; + let outcome = self + .submit_batch_with_retry(mailbox, DEFAULT_MAX_RPC_RETRIES, BATCH_RETRY_SLEEP_DURATION) + .await?; + let ops_submitted = self.operations.len() - outcome.failed_indexes.len(); + metrics.ops_submitted.inc_by(ops_submitted as u64); + Ok(outcome) + } + + async fn submit_batch_with_retry( + &self, + mailbox: Arc, + max_retries: usize, + sleep_period: Duration, + ) -> ChainResult { + if !mailbox.supports_batching() { + return Ok(BatchResult::failed(self.operations.len())); + } + let mut last_error = None; + let ops = self.operations.iter().collect_vec(); + let op_ids = ops.iter().map(|op| op.id()).collect_vec(); + for retry_number in 1..=max_retries { + match mailbox.process_batch(ops.clone()).await { + Ok(res) => return Ok(res), + Err(err) => { + warn!(retries=retry_number, ?max_retries, error=?err, ids=?op_ids, "Retrying batch submission"); + last_error = Some(err); + sleep(sleep_period).await; + } + } + } + let error = last_error.unwrap_or(ChainCommunicationError::BatchingFailed); + Err(error) + } + + /// Process the operations sent by a batch. + /// Returns the operations that were not sent + async fn handle_batch_result( + operations: Vec, + batch_result: BatchResult, + confirm_queue: &mut OpQueue, + ) -> Vec> { + let (sent_ops, excluded_ops): (Vec<_>, Vec<_>) = + operations.into_iter().enumerate().partition_map(|(i, op)| { + if !batch_result.failed_indexes.contains(&i) { + Either::Left(op) + } else { + Either::Right(op) + } + }); + + if let Some(outcome) = batch_result.outcome { + info!(batch_size=sent_ops.len(), outcome=?outcome, batch=?sent_ops, ?excluded_ops, "Submitted transaction batch"); + Self::update_sent_ops_state(sent_ops, outcome, confirm_queue).await; + } + excluded_ops + } + + async fn update_sent_ops_state( + sent_ops: Vec>, + outcome: TxOutcome, + confirm_queue: &mut OpQueue, + ) { + let total_estimated_cost = total_estimated_cost(sent_ops.as_slice()); + for mut op in sent_ops { + op.set_operation_outcome(outcome.clone(), total_estimated_cost); + op.set_next_attempt_after(CONFIRM_DELAY); + confirm_queue + .push( + op, + Some(PendingOperationStatus::Confirm( + ConfirmReason::SubmittedBySelf, + )), + ) + .await; + } + } + + async fn submit_serially( + self, + prepare_queue: &mut OpQueue, + confirm_queue: &mut OpQueue, + metrics: &SerialSubmitterMetrics, + ) { + for op in self.operations.into_iter() { + submit_single_operation(op, prepare_queue, confirm_queue, metrics).await; + } + } +} + +#[cfg(test)] +mod tests { + + use std::{str::FromStr, sync::Arc}; + + use crate::{ + merkle_tree::builder::MerkleTreeBuilder, + msg::{ + gas_payment::GasPaymentEnforcer, + metadata::{ + BaseMetadataBuilder, DefaultIsmCache, IsmAwareAppContextClassifier, + IsmCachePolicyClassifier, + }, + op_queue::test::MockPendingOperation, + pending_message::{MessageContext, PendingMessage}, + processor::test::{ + dummy_cache_metrics, dummy_submission_metrics, DummyApplicationOperationVerifier, + }, + }, + settings::{ + matching_list::MatchingList, GasPaymentEnforcementConf, GasPaymentEnforcementPolicy, + }, + }; + use ethers::utils::hex; + use hyperlane_base::{ + cache::{LocalCache, MeteredCache, MeteredCacheConfig, OptionalCache}, + db::{HyperlaneRocksDB, DB}, + settings::{ChainConf, ChainConnectionConf, CoreContractAddresses}, + CoreMetrics, + }; + use hyperlane_core::{ + config::OperationBatchConfig, Decode, HyperlaneMessage, KnownHyperlaneDomain, + MessageSubmissionData, ReorgPeriod, SubmitterType, H160, U256, + }; + use hyperlane_ethereum::{ConnectionConf, RpcConnectionConf}; + use hyperlane_test::mocks::{MockMailboxContract, MockValidatorAnnounceContract}; + use tokio::sync::RwLock; + + use super::*; + + fn dummy_pending_operation( + mailbox: Arc, + domain: HyperlaneDomain, + ) -> Box { + let seconds_to_next_attempt = 10; + let mut mock_pending_operation = + MockPendingOperation::new(seconds_to_next_attempt, domain.clone()); + mock_pending_operation.mailbox = Some(mailbox); + Box::new(mock_pending_operation) as Box + } + + #[tokio::test] + async fn test_handle_batch_result_succeeds() { + let mut mock_mailbox = MockMailboxContract::new(); + let dummy_domain: HyperlaneDomain = KnownHyperlaneDomain::Alfajores.into(); + + mock_mailbox.expect_supports_batching().return_const(true); + mock_mailbox.expect_process_batch().returning(move |_ops| { + let batch_result = BatchResult::new(None, vec![]); + Ok(batch_result) + }); + let mock_mailbox = Arc::new(mock_mailbox) as Arc; + let operation = dummy_pending_operation(mock_mailbox.clone(), dummy_domain.clone()); + + let operations = vec![operation]; + let op_batch = OperationBatch::new(operations, dummy_domain); + let batch_result = op_batch + .submit_batch_with_retry(mock_mailbox, 1, Duration::from_secs(0)) + .await + .unwrap(); + assert!( + batch_result.failed_indexes.is_empty(), + "Batch result should not have failed indexes" + ) + } + + #[tokio::test] + async fn test_handle_batch_result_fails() { + let mut mock_mailbox = MockMailboxContract::new(); + let dummy_domain: HyperlaneDomain = KnownHyperlaneDomain::Alfajores.into(); + + mock_mailbox.expect_supports_batching().return_const(true); + mock_mailbox + .expect_process_batch() + .returning(move |_ops| Err(ChainCommunicationError::BatchingFailed)); + let mock_mailbox = Arc::new(mock_mailbox) as Arc; + let operation = dummy_pending_operation(mock_mailbox.clone(), dummy_domain.clone()); + + let operations = vec![operation]; + let op_batch = OperationBatch::new(operations, dummy_domain); + let result = op_batch + .submit_batch_with_retry(mock_mailbox, 1, Duration::from_secs(0)) + .await; + assert!(matches!( + result, + Err(ChainCommunicationError::BatchingFailed) + )); + } + + #[tokio::test] + async fn test_handle_batch_succeeds_eventually() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .try_init(); + let mut mock_mailbox = MockMailboxContract::new(); + let dummy_domain: HyperlaneDomain = KnownHyperlaneDomain::Alfajores.into(); + + let mut counter = 0; + mock_mailbox.expect_supports_batching().return_const(true); + mock_mailbox.expect_process_batch().returning(move |_ops| { + counter += 1; + if counter < 5 { + return Err(ChainCommunicationError::BatchingFailed); + } + let batch_result = BatchResult::new(None, vec![]); + Ok(batch_result) + }); + let mock_mailbox = Arc::new(mock_mailbox) as Arc; + let operation = dummy_pending_operation(mock_mailbox.clone(), dummy_domain.clone()); + + let operations = vec![operation]; + let op_batch = OperationBatch::new(operations, dummy_domain); + let batch_result = op_batch + .submit_batch_with_retry(mock_mailbox, 10, Duration::from_secs(0)) + .await + .unwrap(); + assert!( + batch_result.failed_indexes.is_empty(), + "Batch result should not have failed indexes" + ); + } + + #[tokio::test] + async fn test_handle_batch_result_fails_if_not_supported() { + let mut mock_mailbox = MockMailboxContract::new(); + let dummy_domain: HyperlaneDomain = KnownHyperlaneDomain::Alfajores.into(); + + mock_mailbox.expect_supports_batching().return_const(false); + mock_mailbox.expect_process_batch().returning(move |_ops| { + let batch_result = BatchResult::new(None, vec![]); + Ok(batch_result) + }); + let mock_mailbox = Arc::new(mock_mailbox) as Arc; + let operation = dummy_pending_operation(mock_mailbox.clone(), dummy_domain.clone()); + + let operations = vec![operation]; + let op_batch = OperationBatch::new(operations, dummy_domain); + let batch_result = op_batch + .submit_batch_with_retry(mock_mailbox, 1, Duration::from_secs(0)) + .await + .unwrap(); + assert!( + batch_result.failed_indexes.len() == 1, + "Batching should fail if not supported" + ) + } + + #[tokio::test] + #[ignore] + async fn benchmarking_with_real_rpcs() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .try_init(); + + let arb_chain_conf = ChainConf { + domain: HyperlaneDomain::Known(hyperlane_core::KnownHyperlaneDomain::Arbitrum), + // TODO + signer: None, + submitter: SubmitterType::Classic, + estimated_block_time: Duration::from_secs(1), + reorg_period: ReorgPeriod::from_blocks(10), + addresses: CoreContractAddresses { + mailbox: H160::from_str("0x979Ca5202784112f4738403dBec5D0F3B9daabB9") + .unwrap() + .into(), + validator_announce: H160::from_str("0x1df063280C4166AF9a725e3828b4dAC6c7113B08") + .unwrap() + .into(), + ..Default::default() + }, + connection: ChainConnectionConf::Ethereum(ConnectionConf { + rpc_connection: RpcConnectionConf::HttpFallback { + urls: vec![ + "https://arbitrum.drpc.org".parse().unwrap(), + "https://endpoints.omniatech.io/v1/arbitrum/one/public" + .parse() + .unwrap(), + ], + }, + transaction_overrides: Default::default(), + operation_batch: OperationBatchConfig { + batch_contract_address: None, + max_batch_size: 32, + bypass_batch_simulation: false, + }, + }), + metrics_conf: Default::default(), + index: Default::default(), + }; + + // https://explorer.hyperlane.xyz/message/0x29160a18c6e27c2f14ebe021207ac3f90664507b9c5aacffd802b2afcc15788a + // Base -> Arbitrum, uses the default ISM + let message_bytes = hex::decode("0300139ebf000021050000000000000000000000005454cf5584939f7f884e95dba33fecd6d40b8fe20000a4b1000000000000000000000000fd34afdfbac1e47afc539235420e4be4a206f26d0000000000000000000000008650ee37ba2b0a8ac5954a04b46ee07093eab7f90000000000000000000000000000000000000000000000004563918244f40000").unwrap(); + let message = HyperlaneMessage::read_from(&mut &message_bytes[..]).unwrap(); + let base_domain = HyperlaneDomain::new_test_domain("base"); + let temp_dir = tempfile::tempdir().unwrap(); + let db = DB::from_path(temp_dir.path()).unwrap(); + let base_db = HyperlaneRocksDB::new(&base_domain, db); + + let core_metrics = CoreMetrics::new("test", 9090, Default::default()).unwrap(); + let arb_mailbox: Arc = arb_chain_conf + .build_mailbox(&core_metrics) + .await + .unwrap() + .into(); + + let cache = OptionalCache::new(Some(MeteredCache::new( + LocalCache::new("test-cache"), + dummy_cache_metrics(), + MeteredCacheConfig { + cache_name: "test-cache".to_owned(), + }, + ))); + let base_va = Arc::new(MockValidatorAnnounceContract::default()); + let default_ism_getter = DefaultIsmCache::new(arb_mailbox.clone()); + let core_metrics = Arc::new(core_metrics); + let metadata_builder = BaseMetadataBuilder::new( + base_domain.clone(), + arb_chain_conf.clone(), + Arc::new(RwLock::new(MerkleTreeBuilder::new())), + base_va, + false, + core_metrics.clone(), + cache.clone(), + base_db.clone(), + IsmAwareAppContextClassifier::new(default_ism_getter.clone(), vec![]), + IsmCachePolicyClassifier::new(default_ism_getter, Default::default()), + ); + let message_context = Arc::new(MessageContext { + destination_mailbox: arb_mailbox, + origin_db: Arc::new(base_db.clone()), + cache: cache.clone(), + metadata_builder: Arc::new(metadata_builder), + origin_gas_payment_enforcer: Arc::new(GasPaymentEnforcer::new( + vec![GasPaymentEnforcementConf { + policy: GasPaymentEnforcementPolicy::None, + matching_list: MatchingList::default(), + }], + base_db.clone(), + )), + transaction_gas_limit: Default::default(), + metrics: dummy_submission_metrics(), + application_operation_verifier: Some(Arc::new(DummyApplicationOperationVerifier {})), + }); + + let attempts = 2; + let batch_size = 32; + + let mut pending_messages = vec![]; + // Message found here https://basescan.org/tx/0x65345812a1f7df6236292d52d50418a090c84e2c901912bede6cadb9810a9882#eventlog + let metadata = + "0x000000100000001000000010000001680000000000000000000000100000015800000000000000000000000019dc38aeae620380430c200a6e990d5af5480117dbd3d5e656de9dcf604fcc90b52a3b97d9f3573b4a0733e824f1358e515698cf00139eaa5452e030aa937f6b14162a44ec3327f6832bbf16e4b0d6df452524af1c1a04e875b4ce7ac0da92aa08838a89f2a126eef23f6b6a08b6cdbe9e9e804b321088b91b034f9466eed2da1dcc36cb220b887b15f3e111a179142c27e4a0b6d6b7a291e22577d6296d82b7c3f29e8989ec1161d853aba0982b2db28b9a9917226c2c27111c41c99e6a84e7717740f901528062385e659b4330e7227593a334be532d27bcf24f3f13bf4fc1a860e96f8d6937984ea83ef61c8ea30d48cc903f6ff725406a4d1ce73f46064b3403ea4c720b770f4389d7259b275f085c6a98cef9a04880a249b42c382ba34a63031debbfb5b9b232ffd9ee45ff63a7249e83c7e9720f9e978a431b".as_bytes().to_vec(); + + for b in 0..batch_size { + let mut pending_message = PendingMessage::new( + message.clone(), + message_context.clone(), + PendingOperationStatus::FirstPrepareAttempt, + Some(format!("test-{}", b)), + attempts, + ); + pending_message.submission_data = Some(Box::new(MessageSubmissionData { + metadata: metadata.clone(), + gas_limit: U256::from(615293), + })); + pending_messages.push(pending_message); + } + + let arb_domain = HyperlaneDomain::new_test_domain("arbitrum"); + let serial_submitter_metrics = + SerialSubmitterMetrics::new(core_metrics.clone(), &arb_domain); + + let operation_batch = OperationBatch::new( + pending_messages + .into_iter() + .map(|msg| Box::new(msg) as Box) + .collect(), + arb_domain, + ); + operation_batch + .try_submit_as_batch(&serial_submitter_metrics) + .await + .unwrap(); + } +} diff --git a/rust/main/agents/relayer/src/msg/op_queue.rs b/rust/main/agents/relayer/src/msg/op_queue.rs index 7209631b73d..7ca5452d841 100644 --- a/rust/main/agents/relayer/src/msg/op_queue.rs +++ b/rust/main/agents/relayer/src/msg/op_queue.rs @@ -168,8 +168,8 @@ pub mod test { use hyperlane_core::{ ChainResult, HyperlaneDomain, HyperlaneDomainProtocol, HyperlaneDomainTechnicalStack, - HyperlaneDomainType, HyperlaneMessage, KnownHyperlaneDomain, PendingOperationResult, - ReprepareReason, TryBatchAs, TxOutcome, H256, U256, + HyperlaneDomainType, HyperlaneMessage, KnownHyperlaneDomain, Mailbox, + PendingOperationResult, ReprepareReason, TryBatchAs, TxOutcome, H256, U256, }; use crate::{ @@ -189,6 +189,8 @@ pub mod test { seconds_to_next_attempt: u64, destination_domain: HyperlaneDomain, retry_count: u32, + #[serde(skip)] + pub mailbox: Option>, } impl MockPendingOperation { @@ -202,6 +204,7 @@ pub mod test { recipient_address: H256::random(), origin_domain_id: 0, retry_count: 0, + mailbox: None, } } @@ -221,6 +224,7 @@ pub mod test { domain_protocol: HyperlaneDomainProtocol::Ethereum, domain_technical_stack: HyperlaneDomainTechnicalStack::Other, }, + mailbox: None, } } diff --git a/rust/main/agents/relayer/src/msg/op_submitter.rs b/rust/main/agents/relayer/src/msg/op_submitter.rs index ccb381d651a..47d83e4da20 100644 --- a/rust/main/agents/relayer/src/msg/op_submitter.rs +++ b/rust/main/agents/relayer/src/msg/op_submitter.rs @@ -5,31 +5,29 @@ use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; -use derive_new::new; use futures::future::join_all; use futures_util::future::try_join_all; -use itertools::{Either, Itertools}; use num_traits::Zero; use prometheus::{IntCounter, IntGaugeVec}; use tokio::sync::{broadcast::Sender, mpsc, Mutex}; use tokio::task::JoinHandle; use tokio::time::sleep; use tokio_metrics::TaskMonitor; -use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; +use tracing::{debug, error, info_span, instrument, trace, warn, Instrument}; use hyperlane_base::db::{HyperlaneDb, HyperlaneRocksDB}; use hyperlane_base::CoreMetrics; use hyperlane_core::{ - total_estimated_cost, BatchResult, ChainCommunicationError, ChainResult, ConfirmReason::{self, *}, - HyperlaneDomain, HyperlaneDomainProtocol, PendingOperation, PendingOperationResult, - PendingOperationStatus, QueueOperation, ReprepareReason, TxOutcome, + HyperlaneDomain, HyperlaneDomainProtocol, PendingOperationResult, PendingOperationStatus, + QueueOperation, ReprepareReason, }; use submitter::{Entrypoint, FullPayload, PayloadDispatcherEntrypoint, PayloadId}; use crate::msg::pending_message::CONFIRM_DELAY; use crate::server::MessageRetryRequest; +use super::op_batch::OperationBatch; use super::op_queue::OpQueue; use super::op_queue::OperationPriorityQueue; @@ -525,7 +523,7 @@ async fn prepare_op( } #[instrument(skip(prepare_queue, confirm_queue, metrics), ret, level = "debug")] -async fn submit_single_operation( +pub(crate) async fn submit_single_operation( mut op: QueueOperation, prepare_queue: &mut OpQueue, confirm_queue: &mut OpQueue, @@ -796,138 +794,39 @@ async fn send_back_on_failed_submisison( #[derive(Debug, Clone)] pub struct SerialSubmitterMetrics { - submitter_queue_length: IntGaugeVec, - ops_prepared: IntCounter, - ops_submitted: IntCounter, - ops_confirmed: IntCounter, - ops_failed: IntCounter, - ops_dropped: IntCounter, + pub(crate) submitter_queue_length: IntGaugeVec, + pub(crate) ops_prepared: IntCounter, + pub(crate) ops_submitted: IntCounter, + pub(crate) ops_confirmed: IntCounter, + pub(crate) ops_failed: IntCounter, + pub(crate) ops_dropped: IntCounter, } impl SerialSubmitterMetrics { - pub fn new(metrics: &CoreMetrics, destination: &HyperlaneDomain) -> Self { + pub fn new(metrics: impl AsRef, destination: &HyperlaneDomain) -> Self { let destination = destination.name(); Self { - submitter_queue_length: metrics.submitter_queue_length(), + submitter_queue_length: metrics.as_ref().submitter_queue_length(), ops_prepared: metrics + .as_ref() .operations_processed_count() .with_label_values(&["prepared", destination]), ops_submitted: metrics + .as_ref() .operations_processed_count() .with_label_values(&["submitted", destination]), ops_confirmed: metrics + .as_ref() .operations_processed_count() .with_label_values(&["confirmed", destination]), ops_failed: metrics + .as_ref() .operations_processed_count() .with_label_values(&["failed", destination]), ops_dropped: metrics + .as_ref() .operations_processed_count() .with_label_values(&["dropped", destination]), } } } - -#[derive(new, Debug)] -struct OperationBatch { - operations: Vec, - #[allow(dead_code)] - domain: HyperlaneDomain, -} - -impl OperationBatch { - async fn submit( - self, - prepare_queue: &mut OpQueue, - confirm_queue: &mut OpQueue, - metrics: &SerialSubmitterMetrics, - ) { - let excluded_ops = match self.try_submit_as_batch(metrics).await { - Ok(batch_result) => { - Self::handle_batch_result(self.operations, batch_result, confirm_queue).await - } - Err(e) => { - warn!(error=?e, batch=?self.operations, "Error when submitting batch"); - self.operations - } - }; - - if !excluded_ops.is_empty() { - warn!(excluded_ops=?excluded_ops, "Either operations reverted in the batch or the txid wasn't included. Falling back to serial submission."); - OperationBatch::new(excluded_ops, self.domain) - .submit_serially(prepare_queue, confirm_queue, metrics) - .await; - } - } - - #[instrument(skip(metrics), ret, level = "debug")] - async fn try_submit_as_batch( - &self, - metrics: &SerialSubmitterMetrics, - ) -> ChainResult { - // We already assume that the relayer submits to a single mailbox per destination. - // So it's fine to use the first item in the batch to get the mailbox. - let Some(first_item) = self.operations.first() else { - return Err(ChainCommunicationError::BatchIsEmpty); - }; - let outcome = if let Some(mailbox) = first_item.try_get_mailbox() { - mailbox - .try_process_batch(self.operations.iter().collect_vec()) - .await? - } else { - BatchResult::failed(self.operations.len()) - }; - let ops_submitted = self.operations.len() - outcome.failed_indexes.len(); - metrics.ops_submitted.inc_by(ops_submitted as u64); - Ok(outcome) - } - - /// Process the operations sent by a batch. - /// Returns the operations that were not sent - async fn handle_batch_result( - operations: Vec, - batch_result: BatchResult, - confirm_queue: &mut OpQueue, - ) -> Vec> { - let (sent_ops, excluded_ops): (Vec<_>, Vec<_>) = - operations.into_iter().enumerate().partition_map(|(i, op)| { - if !batch_result.failed_indexes.contains(&i) { - Either::Left(op) - } else { - Either::Right(op) - } - }); - - if let Some(outcome) = batch_result.outcome { - info!(batch_size=sent_ops.len(), outcome=?outcome, batch=?sent_ops, ?excluded_ops, "Submitted transaction batch"); - Self::update_sent_ops_state(sent_ops, outcome, confirm_queue).await; - } - excluded_ops - } - - async fn update_sent_ops_state( - sent_ops: Vec>, - outcome: TxOutcome, - confirm_queue: &mut OpQueue, - ) { - let total_estimated_cost = total_estimated_cost(sent_ops.as_slice()); - for mut op in sent_ops { - op.set_operation_outcome(outcome.clone(), total_estimated_cost); - op.set_next_attempt_after(CONFIRM_DELAY); - confirm_queue - .push(op, Some(PendingOperationStatus::Confirm(SubmittedBySelf))) - .await; - } - } - - async fn submit_serially( - self, - prepare_queue: &mut OpQueue, - confirm_queue: &mut OpQueue, - metrics: &SerialSubmitterMetrics, - ) { - for op in self.operations.into_iter() { - submit_single_operation(op, prepare_queue, confirm_queue, metrics).await; - } - } -} diff --git a/rust/main/agents/relayer/src/msg/pending_message.rs b/rust/main/agents/relayer/src/msg/pending_message.rs index 6c623ca3dcc..13713535060 100644 --- a/rust/main/agents/relayer/src/msg/pending_message.rs +++ b/rust/main/agents/relayer/src/msg/pending_message.rs @@ -96,7 +96,7 @@ pub struct PendingMessage { submitted: bool, #[new(default)] #[serde(skip_serializing)] - submission_data: Option>, + pub(crate) submission_data: Option>, #[new(default)] num_retries: u32, #[new(value = "Instant::now()")] @@ -1015,6 +1015,10 @@ impl PendingMessage { warn!(threshold, "Aggregation threshold not met"); self.on_reprepare(Some(err), ReprepareReason::CouldNotFetchMetadata) } + MetadataBuildError::MaxValidatorCountReached(count) => { + warn!(count, "Max validator count reached"); + self.on_reprepare(Some(err), ReprepareReason::ErrorBuildingMetadata) + } }); let build_metadata_end = Instant::now(); diff --git a/rust/main/agents/relayer/src/msg/processor.rs b/rust/main/agents/relayer/src/msg/processor.rs index 59410fed31e..27e27f192fd 100644 --- a/rust/main/agents/relayer/src/msg/processor.rs +++ b/rust/main/agents/relayer/src/msg/processor.rs @@ -404,7 +404,7 @@ impl MessageProcessorMetrics { } #[cfg(test)] -mod test { +pub mod test { use std::time::Instant; use prometheus::{CounterVec, IntCounter, IntCounterVec, Opts, Registry}; @@ -442,7 +442,7 @@ mod test { msg::{ gas_payment::GasPaymentEnforcer, metadata::{ - BaseMetadataBuilder, DefaultIsmCache, IsmAwareAppContextClassifier, IsmCacheConfig, + BaseMetadataBuilder, DefaultIsmCache, IsmAwareAppContextClassifier, IsmCachePolicyClassifier, }, }, @@ -451,7 +451,7 @@ mod test { use super::*; - struct DummyApplicationOperationVerifier {} + pub struct DummyApplicationOperationVerifier {} #[async_trait] impl ApplicationOperationVerifier for DummyApplicationOperationVerifier { @@ -464,7 +464,7 @@ mod test { } } - fn dummy_processor_metrics(domain_id: u32) -> MessageProcessorMetrics { + pub fn dummy_processor_metrics(domain_id: u32) -> MessageProcessorMetrics { MessageProcessorMetrics { max_last_known_message_nonce_gauge: IntGauge::new( "dummy_max_last_known_message_nonce_gauge", @@ -478,7 +478,7 @@ mod test { } } - fn dummy_cache_metrics() -> MeteredCacheMetrics { + pub fn dummy_cache_metrics() -> MeteredCacheMetrics { MeteredCacheMetrics { hit_count: IntCounterVec::new( prometheus::Opts::new("dummy_hit_count", "help string"), @@ -493,7 +493,7 @@ mod test { } } - fn dummy_submission_metrics() -> MessageSubmissionMetrics { + pub fn dummy_submission_metrics() -> MessageSubmissionMetrics { MessageSubmissionMetrics { origin: "".to_string(), destination: "".to_string(), @@ -562,7 +562,7 @@ mod test { cache, db.clone(), IsmAwareAppContextClassifier::new(default_ism_getter.clone(), vec![]), - IsmCachePolicyClassifier::new(default_ism_getter, IsmCacheConfig::default()), + IsmCachePolicyClassifier::new(default_ism_getter, Default::default()), ) } diff --git a/rust/main/agents/relayer/src/relayer.rs b/rust/main/agents/relayer/src/relayer.rs index eca980b8074..66b96f94e29 100644 --- a/rust/main/agents/relayer/src/relayer.rs +++ b/rust/main/agents/relayer/src/relayer.rs @@ -346,7 +346,7 @@ impl BaseAgent for Relayer { ), IsmCachePolicyClassifier::new( default_ism_getter.clone(), - settings.default_ism_cache_config.clone(), + settings.ism_cache_configs.clone(), ), ); @@ -1088,6 +1088,7 @@ mod test { operation_batch: OperationBatchConfig { batch_contract_address: None, max_batch_size: 1, + ..Default::default() }, }), metrics_conf: PrometheusMiddlewareConf { @@ -1132,7 +1133,7 @@ mod test { allow_local_checkpoint_syncers: true, metric_app_contexts: Vec::new(), allow_contract_call_caching: true, - default_ism_cache_config: Default::default(), + ism_cache_configs: Default::default(), max_retries: 1, } } diff --git a/rust/main/agents/relayer/src/server/environment_variable.rs b/rust/main/agents/relayer/src/server/environment_variable.rs new file mode 100644 index 00000000000..ded358ef5a9 --- /dev/null +++ b/rust/main/agents/relayer/src/server/environment_variable.rs @@ -0,0 +1,188 @@ +use std::env; + +use axum::{extract::State, routing, Json, Router}; +use derive_new::new; +use serde::{Deserialize, Serialize}; + +const ENVIRONMENT_VARIABLE: &str = "/environment_variable"; + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] +pub struct SetEnvironmentVariableRequest { + name: String, + value: Option, +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +pub struct EnvironmentVariableResponse { + name: String, + value: Option, + message: String, +} + +#[derive(new, Clone)] +pub struct EnvironmentVariableApi {} + +async fn get_environment_variable( + State(_): State, + Json(body): Json, +) -> Result, String> { + let value = env::var(&body.name).ok(); + + let response = EnvironmentVariableResponse { + name: body.name, + value, + message: "got".to_string(), + }; + + Ok(Json(response)) +} + +async fn set_environment_variable( + State(_): State, + Json(body): Json, +) -> Result, String> { + let message = match &body.value { + None => { + env::remove_var(&body.name); + "unset" + } + Some(value) => { + env::set_var(&body.name, value); + "set" + } + }; + + let response = EnvironmentVariableResponse { + name: body.name, + value: body.value, + message: message.to_string(), + }; + + Ok(Json(response)) +} + +impl EnvironmentVariableApi { + pub fn router(&self) -> Router { + Router::new() + .route("/", routing::get(get_environment_variable)) + .route("/", routing::post(set_environment_variable)) + .with_state(self.clone()) + } + + pub fn get_route(&self) -> (&'static str, Router) { + (ENVIRONMENT_VARIABLE, self.router()) + } +} + +#[cfg(test)] +mod tests { + use std::env::VarError::NotPresent; + use std::net::SocketAddr; + + use axum::http::StatusCode; + use serde_json::{json, Value}; + + use super::*; + + const NAME: &str = "TEST_ENVIRONMENT_VAR"; + const VALUE: &str = "TEST_VALUE"; + + #[derive(Debug)] + struct TestServerSetup { + pub socket_address: SocketAddr, + } + + fn setup_test_server() -> TestServerSetup { + let api = EnvironmentVariableApi::new(); + let (path, router) = api.get_route(); + + let app = Router::new().nest(path, router); + + let server = + axum::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve(app.into_make_service()); + let addr = server.local_addr(); + tokio::spawn(server); + + TestServerSetup { + socket_address: addr, + } + } + + #[tracing_test::traced_test] + #[tokio::test] + async fn test_environment_variable() { + let TestServerSetup { + socket_address: addr, + .. + } = setup_test_server(); + + let set = set(); + let response = request(addr, &set, true).await; + assert_eq!(NAME, response.name); + assert_eq!(Some(VALUE.to_string()), response.value); + assert_eq!("set", response.message); + assert_eq!(VALUE, env::var(NAME).unwrap()); + + let get = get_or_remove(); + let response = request(addr, &get, false).await; + assert_eq!(NAME, response.name); + assert_eq!(Some(VALUE.to_string()), response.value); + assert_eq!("got", response.message); + assert_eq!(VALUE, env::var(NAME).unwrap()); + + let remove = get_or_remove(); + let response = request(addr, &remove, true).await; + assert_eq!(NAME, response.name); + assert_eq!(None, response.value); + assert_eq!("unset", response.message); + assert_eq!(Err(NotPresent), env::var(NAME)); + + let get = get_or_remove(); + let response = request(addr, &get, false).await; + assert_eq!(NAME, response.name); + assert_eq!(None, response.value); + assert_eq!("got", response.message); + assert_eq!(Err(NotPresent), env::var(NAME)); + } + + async fn request(addr: SocketAddr, body: &Value, post: bool) -> EnvironmentVariableResponse { + let client = reqwest::Client::new(); + + let builder = if post { + client.post(format!("http://{}{}", addr, ENVIRONMENT_VARIABLE)) + } else { + client.get(format!("http://{}{}", addr, ENVIRONMENT_VARIABLE)) + }; + + let request = builder.json(&body).build().unwrap(); + let response = tokio::spawn(client.execute(request)) + .await + .unwrap() + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + let response = response + .json::() + .await + .unwrap(); + response + } + + fn set() -> Value { + json!( + { + "name": NAME, + "value": VALUE, + } + ) + } + + fn get_or_remove() -> Value { + json!( + { + "name": NAME, + } + ) + } +} diff --git a/rust/main/agents/relayer/src/server/mod.rs b/rust/main/agents/relayer/src/server/mod.rs index 03019d98353..c01af7310c5 100644 --- a/rust/main/agents/relayer/src/server/mod.rs +++ b/rust/main/agents/relayer/src/server/mod.rs @@ -1,6 +1,7 @@ use axum::Router; use derive_new::new; use std::collections::HashMap; +use std::env; use tokio::sync::broadcast::Sender; use crate::msg::op_queue::OperationPriorityQueue; @@ -10,6 +11,9 @@ pub const ENDPOINT_MESSAGES_QUEUE_SIZE: usize = 100; pub use list_messages::*; pub use message_retry::*; +use crate::server::environment_variable::EnvironmentVariableApi; + +mod environment_variable; mod list_messages; mod message_retry; @@ -44,6 +48,13 @@ impl Server { routes.push(ListOperationsApi::new(op_queues).get_route()); } + let expose_environment_variable_endpoint = + env::var("HYPERLANE_RELAYER_ENVIRONMENT_VARIABLE_ENDPOINT_ENABLED") + .map_or(false, |v| v == "true"); + if expose_environment_variable_endpoint { + routes.push(EnvironmentVariableApi::new().get_route()); + } + routes } } diff --git a/rust/main/agents/relayer/src/settings/mod.rs b/rust/main/agents/relayer/src/settings/mod.rs index bc5cc2791b9..e3f4e259f14 100644 --- a/rust/main/agents/relayer/src/settings/mod.rs +++ b/rust/main/agents/relayer/src/settings/mod.rs @@ -66,8 +66,8 @@ pub struct RelayerSettings { pub metric_app_contexts: Vec<(MatchingList, String)>, /// Whether to allow contract call caching at all. pub allow_contract_call_caching: bool, - /// The default ISM cache policy to use for all messages that use the default ISM. - pub default_ism_cache_config: IsmCacheConfig, + /// The ISM cache policies to use + pub ism_cache_configs: Vec, /// Maximum number of retries per operation pub max_retries: u32, } @@ -329,10 +329,10 @@ impl FromRawConf for RelayerSettings { .parse_bool() .unwrap_or(true); - let default_ism_cache_config = p + let ism_cache_configs = p .chain(&mut err) - .get_opt_key("defaultIsmCacheConfig") - .and_then(parse_ism_cache_config) + .get_opt_key("ismCacheConfigs") + .and_then(parse_ism_cache_configs) .unwrap_or_default(); let max_message_retries = p @@ -355,7 +355,7 @@ impl FromRawConf for RelayerSettings { allow_local_checkpoint_syncers, metric_app_contexts, allow_contract_call_caching, - default_ism_cache_config, + ism_cache_configs, max_retries: max_message_retries, }) } @@ -397,37 +397,16 @@ fn parse_matching_list(p: ValueParser) -> ConfigResult { err.into_result(ml) } -fn parse_json_object(p: ValueParser) -> Option<(ConfigPath, Value)> { +fn parse_ism_cache_configs(p: ValueParser) -> ConfigResult> { let mut err = ConfigParsingError::default(); - match p { - ValueParser { - val: Value::String(array_str), - cwp, - } => serde_json::from_str::(array_str) - .context("Expected JSON string") - .take_err(&mut err, || cwp.clone()) - .map(|v| (cwp, recase_json_value(v, Case::Flat))), - ValueParser { - val: value @ Value::Object(_), - cwp, - } => Some((cwp, value.clone())), - _ => Err(eyre!("Expected JSON object or stringified JSON")) - .take_err(&mut err, || p.cwp.clone()), - } -} - -fn parse_ism_cache_config(p: ValueParser) -> ConfigResult { - let mut err = ConfigParsingError::default(); - - let raw_object = parse_json_object(p.clone()).map(|(_, v)| v); - let Some(raw_object) = raw_object else { - return err.into_result(IsmCacheConfig::default()); + let raw_list = parse_json_array(p.clone()).map(|(_, v)| v); + let Some(raw_list) = raw_list else { + return err.into_result(Default::default()); }; - - let p = ValueParser::new(p.cwp.clone(), &raw_object); + let p = ValueParser::new(p.cwp.clone(), &raw_list); let ml = p - .parse_value::("Expected ISM cache config") + .parse_value::>("Expected ISM cache configs") .take_config_err(&mut err) .unwrap_or_default(); @@ -482,4 +461,34 @@ mod test { assert_eq!(res, vec![valid_address1, valid_address2]); assert!(!err.is_ok()); } + + #[test] + fn test_parse_ism_cache_configs() { + let raw = r#" + [ + { + "selector": { + "type": "defaultIsm" + }, + "moduletypes": [2], + "chains": ["foochain"], + "cachepolicy": "ismSpecific" + }, + { + "selector": { + "type": "appContext", + "context": "foo" + }, + "moduletypes": [2], + "chains": ["foochain"], + "cachepolicy": "ismSpecific" + } + ] + "#; + + let value = serde_json::from_str::(raw).unwrap(); + let p = ValueParser::new(ConfigPath::default(), &value); + let configs = parse_ism_cache_configs(p).unwrap(); + assert_eq!(configs.len(), 2); + } } diff --git a/rust/main/agents/scraper/Cargo.toml b/rust/main/agents/scraper/Cargo.toml index 937d4b09a02..e5b9a40f99d 100644 --- a/rust/main/agents/scraper/Cargo.toml +++ b/rust/main/agents/scraper/Cargo.toml @@ -20,7 +20,7 @@ itertools.workspace = true num-bigint.workspace = true num-traits.workspace = true prometheus.workspace = true -sea-orm = { workspace = true } +sea-orm = { workspace = true, features = ["mock"] } serde.workspace = true serde_json.workspace = true thiserror.workspace = true @@ -35,7 +35,6 @@ migration = { path = "migration" } [dev-dependencies] reqwest.workspace = true -sea-orm = { workspace = true, features = ["mock"]} tokio-test = "0.4" tracing-test.workspace = true ethers-prometheus = { path = "../../ethers-prometheus", features = ["serde"] } diff --git a/rust/main/agents/scraper/src/agent.rs b/rust/main/agents/scraper/src/agent.rs index 12277ce8602..be28db8f6f1 100644 --- a/rust/main/agents/scraper/src/agent.rs +++ b/rust/main/agents/scraper/src/agent.rs @@ -445,6 +445,7 @@ mod test { operation_batch: OperationBatchConfig { batch_contract_address: None, max_batch_size: 1, + ..Default::default() }, }), metrics_conf: PrometheusMiddlewareConf { diff --git a/rust/main/agents/validator/Cargo.toml b/rust/main/agents/validator/Cargo.toml index 44d507993d8..7c9ac96dea6 100644 --- a/rust/main/agents/validator/Cargo.toml +++ b/rust/main/agents/validator/Cargo.toml @@ -10,6 +10,7 @@ version.workspace = true [dependencies] async-trait.workspace = true +aws-config.workspace = true axum.workspace = true chrono.workspace = true config.workspace = true @@ -37,6 +38,9 @@ hyperlane-base = { path = "../../hyperlane-base" } hyperlane-ethereum = { path = "../../chains/hyperlane-ethereum" } hyperlane-cosmos = { path = "../../chains/hyperlane-cosmos" } +# Dependency version is determined by ethers +rusoto_core = '*' + [dev-dependencies] mockall.workspace = true tokio-test.workspace = true diff --git a/rust/main/agents/validator/src/settings.rs b/rust/main/agents/validator/src/settings.rs index c95a7ad2bde..07d0e57692f 100644 --- a/rust/main/agents/validator/src/settings.rs +++ b/rust/main/agents/validator/src/settings.rs @@ -6,6 +6,7 @@ use std::{collections::HashSet, path::PathBuf, time::Duration}; +use aws_config::Region; use derive_more::{AsMut, AsRef, Deref, DerefMut}; use eyre::{eyre, Context}; use hyperlane_base::{ @@ -259,7 +260,8 @@ fn parse_checkpoint_syncer(syncer: ValueParser) -> ConfigResult = syncer .chain(&mut err) .get_key("region") .parse_from_str("Expected aws region") @@ -274,7 +276,7 @@ fn parse_checkpoint_syncer(syncer: ValueParser) -> ConfigResult WasmGrpcProvider { OperationBatchConfig { batch_contract_address: None, max_batch_size: 1, + ..Default::default() }, NativeToken { decimals: 6, diff --git a/rust/main/chains/hyperlane-ethereum/Cargo.toml b/rust/main/chains/hyperlane-ethereum/Cargo.toml index ecc004d0295..b1b4fbd01fd 100644 --- a/rust/main/chains/hyperlane-ethereum/Cargo.toml +++ b/rust/main/chains/hyperlane-ethereum/Cargo.toml @@ -10,6 +10,7 @@ version.workspace = true [dependencies] # Main block async-trait.workspace = true +dashmap.workspace = true derive-new.workspace = true ethers-contract.workspace = true ethers-core.workspace = true diff --git a/rust/main/chains/hyperlane-ethereum/src/contracts/mailbox.rs b/rust/main/chains/hyperlane-ethereum/src/contracts/mailbox.rs index 07f1b2269d7..a0fec99e02c 100644 --- a/rust/main/chains/hyperlane-ethereum/src/contracts/mailbox.rs +++ b/rust/main/chains/hyperlane-ethereum/src/contracts/mailbox.rs @@ -8,6 +8,9 @@ use std::sync::Arc; use async_trait::async_trait; use derive_new::new; use ethers::prelude::Middleware; +use ethers::providers::ProviderError; +use ethers::types::transaction::eip2718::TypedTransaction; +use ethers::types::{Block, BlockNumber, H256 as TxHash}; use ethers_contract::builders::ContractCall; use ethers_contract::{Multicall, MulticallResult}; use ethers_core::utils::WEI_IN_ETHER; @@ -15,6 +18,7 @@ use futures_util::future::join_all; use hyperlane_core::rpc_clients::call_and_retry_indefinitely; use hyperlane_core::{BatchResult, QueueOperation, ReorgPeriod, H512}; use itertools::Itertools; +use tokio::sync::Mutex; use tracing::instrument; use hyperlane_core::{ @@ -28,7 +32,9 @@ use crate::error::HyperlaneEthereumError; use crate::interfaces::arbitrum_node_interface::ArbitrumNodeInterface; use crate::interfaces::i_mailbox::{IMailbox as EthereumMailboxInternal, IMAILBOX_ABI}; use crate::interfaces::mailbox::DispatchFilter; -use crate::tx::{call_with_reorg_period, fill_tx_gas_params, report_tx}; +use crate::tx::{ + call_with_reorg_period, estimate_eip1559_fees, fill_tx_gas_params, report_tx, Eip1559Fee, +}; use crate::{ BuildableWithProvider, ConnectionConf, EthereumProvider, EthereumReorgPeriod, TransactionOverrides, @@ -275,6 +281,14 @@ where provider: Arc, arbitrum_node_interface: Option>>, conn: ConnectionConf, + cache: Arc>, +} + +#[derive(Debug, Default)] +pub struct EthereumMailboxCache { + pub is_contract: HashMap, + pub latest_block: Option>, + pub eip1559_fee: Option, } impl EthereumMailbox @@ -304,16 +318,15 @@ where provider, arbitrum_node_interface, conn: conn.clone(), + cache: Default::default(), } } - /// Returns a ContractCall that processes the provided message. - async fn process_contract_call( + fn contract_call( &self, message: &HyperlaneMessage, metadata: &[u8], tx_gas_estimate: Option, - with_gas_estimate_buffer: bool, ) -> ChainResult> { let mut tx = self.contract.process( metadata.to_vec().into(), @@ -322,6 +335,18 @@ where if let Some(gas_estimate) = tx_gas_estimate { tx = tx.gas(gas_estimate); } + Ok(tx) + } + + /// Returns a ContractCall that processes the provided message. + async fn process_contract_call( + &self, + message: &HyperlaneMessage, + metadata: &[u8], + tx_gas_estimate: Option, + with_gas_estimate_buffer: bool, + ) -> ChainResult> { + let tx = self.contract_call(message, metadata, tx_gas_estimate)?; fill_tx_gas_params( tx, @@ -329,6 +354,7 @@ where &self.conn.transaction_overrides.clone(), &self.domain, with_gas_estimate_buffer, + self.cache.clone(), ) .await } @@ -380,6 +406,53 @@ where domain: self.domain.clone(), } } + + async fn submit_multicall( + &self, + multicall: &mut Multicall, + contract_calls: Vec>, + cache: Arc>, + ) -> ChainResult { + let batch = multicall::batch::<_, ()>(multicall, contract_calls.clone()).await?; + let call_with_gas_overrides = fill_tx_gas_params( + batch, + self.provider.clone(), + &self.conn.transaction_overrides.clone(), + &self.domain, + true, + cache, + ) + .await?; + let outcome = report_tx(call_with_gas_overrides).await?; + Ok(BatchResult::new(Some(outcome.into()), vec![])) + } + + async fn simulate_and_submit_batch( + &self, + multicall: &mut Multicall, + contract_calls: Vec>, + cache: Arc>, + ) -> ChainResult { + let batch_simulation = self.simulate_batch(multicall, contract_calls).await?; + batch_simulation.try_submit(cache).await + } + + async fn refresh_block_and_fee_cache(&self, tx: &TypedTransaction) -> ChainResult<()> { + let latest_block = self + .provider + .get_block(BlockNumber::Latest) + .await + .map_err(ChainCommunicationError::from_other)? + .ok_or_else(|| ProviderError::CustomError("Latest block not found".into()))?; + let eip1559_fee = + estimate_eip1559_fees(self.provider.clone(), None, &latest_block, &self.domain, tx) + .await + .ok(); + let mut cache = self.cache.lock().await; + cache.latest_block = Some(latest_block); + cache.eip1559_fee = eip1559_fee; + Ok(()) + } } #[derive(new)] @@ -397,9 +470,12 @@ impl BatchSimulation { } impl BatchSimulation { - pub async fn try_submit(self) -> ChainResult { + pub async fn try_submit( + self, + cache: Arc>, + ) -> ChainResult { if let Some(submittable_batch) = self.call { - let batch_outcome = submittable_batch.submit().await?; + let batch_outcome = submittable_batch.submit(cache).await?; Ok(BatchResult::new( Some(batch_outcome), self.excluded_call_indexes, @@ -418,13 +494,14 @@ pub struct SubmittableBatch { } impl SubmittableBatch { - pub async fn submit(self) -> ChainResult { + pub async fn submit(self, cache: Arc>) -> ChainResult { let call_with_gas_overrides = fill_tx_gas_params( self.call, self.provider, &self.transaction_overrides, &self.domain, true, + cache, ) .await?; let outcome = report_tx(call_with_gas_overrides).await?; @@ -504,37 +581,62 @@ where Ok(receipt.into()) } + /// Returns true if the mailbox supports batching + fn supports_batching(&self) -> bool { + true + } + #[instrument(skip(self, ops), fields(size=%ops.len()))] - async fn try_process_batch<'a>( - &self, - ops: Vec<&'a QueueOperation>, - ) -> ChainResult { + async fn process_batch<'a>(&self, ops: Vec<&'a QueueOperation>) -> ChainResult { let messages = ops .iter() .map(|op| op.try_batch()) .collect::>>>()?; - let mut multicall = build_multicall(self.provider.clone(), &self.conn, self.domain.clone()) - .await - .map_err(|e| HyperlaneEthereumError::MulticallError(e.to_string()))?; - let contract_call_futures = messages + let mut multicall = build_multicall( + self.provider.clone(), + &self.conn, + self.domain.clone(), + self.cache.clone(), + ) + .await + .map_err(|e| HyperlaneEthereumError::MulticallError(e.to_string()))?; + + let contract_calls = messages .iter() - .map(|batch_item| async { - self.process_contract_call( + .map(|batch_item| { + self.contract_call( &batch_item.data, &batch_item.submission_data.metadata, Some(batch_item.submission_data.gas_limit), - true, ) - .await }) - .collect::>(); - let contract_calls = join_all(contract_call_futures) + .collect::>>()?; + if let Some(contract_call) = contract_calls.first() { + self.refresh_block_and_fee_cache(&contract_call.tx).await?; + } + let filled_tx_params_futures = contract_calls.iter().map(|tx| { + fill_tx_gas_params( + tx.clone(), + self.provider.clone(), + &self.conn.transaction_overrides, + &self.domain, + true, + self.cache.clone(), + ) + }); + let contract_calls = join_all(filled_tx_params_futures) .await .into_iter() .collect::>>()?; - let batch_simulation = self.simulate_batch(&mut multicall, contract_calls).await?; - batch_simulation.try_submit().await + if self.conn.operation_batch.bypass_batch_simulation { + // submit the tx without checking if subcalls would revert + self.submit_multicall(&mut multicall, contract_calls, self.cache.clone()) + .await + } else { + self.simulate_and_submit_batch(&mut multicall, contract_calls, self.cache.clone()) + .await + } } #[instrument(skip(self), fields(msg=%message, metadata=%bytes_to_hex(metadata)))] diff --git a/rust/main/chains/hyperlane-ethereum/src/contracts/multicall.rs b/rust/main/chains/hyperlane-ethereum/src/contracts/multicall.rs index 68d5703e7d3..1011f9456c0 100644 --- a/rust/main/chains/hyperlane-ethereum/src/contracts/multicall.rs +++ b/rust/main/chains/hyperlane-ethereum/src/contracts/multicall.rs @@ -5,10 +5,13 @@ use ethers_contract::{builders::ContractCall, Multicall, MulticallResult, Multic use hyperlane_core::{ utils::hex_or_base58_to_h256, ChainResult, HyperlaneDomain, HyperlaneProvider, U256, }; +use tokio::sync::Mutex; use tracing::warn; use crate::{ConnectionConf, EthereumProvider}; +use super::EthereumMailboxCache; + const ALLOW_BATCH_FAILURES: bool = true; /// Conservative estimate picked by subtracting the gas used by individual calls from the total cost of `aggregate3` @@ -21,13 +24,27 @@ pub async fn build_multicall( provider: Arc, conn: &ConnectionConf, domain: HyperlaneDomain, + cache: Arc>, ) -> eyre::Result> { let address = conn .operation_batch .batch_contract_address .unwrap_or(hex_or_base58_to_h256("0xcA11bde05977b3631167028862bE2a173976CA11").unwrap()); - let ethereum_provider = EthereumProvider::new(provider.clone(), domain); - if !ethereum_provider.is_contract(&address).await? { + let is_contract_cache = { + let cache = cache.lock().await; + cache.is_contract.get(&address).cloned() + }; + let is_contract = match is_contract_cache { + Some(is_contract) => is_contract, + None => { + let ethereum_provider = EthereumProvider::new(provider.clone(), domain); + let is_contract = ethereum_provider.is_contract(&address).await?; + cache.lock().await.is_contract.insert(address, is_contract); + is_contract + } + }; + + if !is_contract { return Err(eyre::eyre!("Multicall contract not found at address")); } let multicall = match Multicall::new(provider.clone(), Some(address.into())).await { diff --git a/rust/main/chains/hyperlane-ethereum/src/contracts/validator_announce.rs b/rust/main/chains/hyperlane-ethereum/src/contracts/validator_announce.rs index 099a5c5caef..a51a255e439 100644 --- a/rust/main/chains/hyperlane-ethereum/src/contracts/validator_announce.rs +++ b/rust/main/chains/hyperlane-ethereum/src/contracts/validator_announce.rs @@ -98,6 +98,8 @@ where &self.conn.transaction_overrides, &self.domain, true, + // pass an empty value as the cache + Default::default(), ) .await } diff --git a/rust/main/chains/hyperlane-ethereum/src/rpc_clients/fallback.rs b/rust/main/chains/hyperlane-ethereum/src/rpc_clients/fallback.rs index a252635aa91..5750bc7b150 100644 --- a/rust/main/chains/hyperlane-ethereum/src/rpc_clients/fallback.rs +++ b/rust/main/chains/hyperlane-ethereum/src/rpc_clients/fallback.rs @@ -98,7 +98,7 @@ where type Error = ProviderError; // TODO: Refactor to use `FallbackProvider::call` - #[instrument] + #[instrument(skip(self, params))] async fn request(&self, method: &str, params: T) -> Result where T: Debug + Serialize + Send + Sync, diff --git a/rust/main/chains/hyperlane-ethereum/src/rpc_clients/trait_builder.rs b/rust/main/chains/hyperlane-ethereum/src/rpc_clients/trait_builder.rs index 7cd78eb5907..98138a9e102 100644 --- a/rust/main/chains/hyperlane-ethereum/src/rpc_clients/trait_builder.rs +++ b/rust/main/chains/hyperlane-ethereum/src/rpc_clients/trait_builder.rs @@ -1,9 +1,10 @@ use std::fmt::Debug; use std::str::FromStr; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; use std::time::Duration; use async_trait::async_trait; +use dashmap::DashMap; use ethers::middleware::gas_escalator::{Frequency, GasEscalatorMiddleware, GeometricGasPrice}; use ethers::middleware::gas_oracle::{ GasCategory, GasOracle, GasOracleMiddleware, Polygon, ProviderOracle, @@ -295,7 +296,27 @@ where GasEscalatorMiddleware::new(provider, escalator, FREQUENCY) } +/// Builds a new HTTP provider with the given URL. fn build_http_provider(url: Url) -> ChainResult { + let client = get_reqwest_client(&url)?; + Ok(Http::new_with_client(url, client)) +} + +/// Gets a cached reqwest client for the given URL, or builds a new one if it doesn't exist. +fn get_reqwest_client(url: &Url) -> ChainResult { + let client_cache = get_reqwest_client_cache(); + if let Some(client) = client_cache.get(url) { + return Ok(client.clone()); + } + let client = build_new_reqwest_client(url.clone())?; + client_cache.insert(url.clone(), client.clone()); + Ok(client) +} + +/// Builds a new reqwest client with the given URL. +/// Generally `get_reqwest_client` should be used instead of this function, +/// as it caches the client for reuse. +fn build_new_reqwest_client(url: Url) -> ChainResult { let mut queries_to_keep = vec![]; let mut headers = reqwest::header::HeaderMap::new(); @@ -325,11 +346,20 @@ fn build_http_provider(url: Url) -> ChainResult { .clear() .extend_pairs(queries_to_keep); - let http_client = Client::builder() + let client = Client::builder() .timeout(HTTP_CLIENT_TIMEOUT) .default_headers(headers) .build() .map_err(EthereumProviderConnectionError::from)?; - Ok(Http::new_with_client(url, http_client)) + Ok(client) +} + +/// A cache for reqwest clients, indexed by URL. +/// Generally creating a new Reqwest client is expensive due to some DNS +/// resolutions, so we cache them for reuse. +static REQWEST_CLIENT_CACHE: OnceLock> = OnceLock::new(); + +fn get_reqwest_client_cache() -> &'static DashMap { + REQWEST_CLIENT_CACHE.get_or_init(DashMap::new) } diff --git a/rust/main/chains/hyperlane-ethereum/src/tx.rs b/rust/main/chains/hyperlane-ethereum/src/tx.rs index 830d75df45a..788dee4ab4a 100644 --- a/rust/main/chains/hyperlane-ethereum/src/tx.rs +++ b/rust/main/chains/hyperlane-ethereum/src/tx.rs @@ -19,12 +19,12 @@ use ethers_core::{ }, }; use hyperlane_core::{ - utils::bytes_to_hex, ChainCommunicationError, ChainResult, HyperlaneDomain, ReorgPeriod, H256, - U256, + ChainCommunicationError, ChainResult, HyperlaneDomain, ReorgPeriod, H256, U256, }; -use tracing::{debug, error, info, warn}; +use tokio::sync::Mutex; +use tracing::{debug, error, info, instrument, warn}; -use crate::{EthereumReorgPeriod, Middleware, TransactionOverrides}; +use crate::{EthereumMailboxCache, EthereumReorgPeriod, Middleware, TransactionOverrides}; /// An amount of gas to add to the estimated gas pub const GAS_ESTIMATE_BUFFER: u32 = 75_000; @@ -60,19 +60,13 @@ where M: Middleware + 'static, D: Detokenize, { - let data = tx - .tx - .data() - .map(|b| bytes_to_hex(b)) - .unwrap_or_else(|| "None".into()); - let to = tx .tx .to() .cloned() .unwrap_or_else(|| NameOrAddress::Address(Default::default())); - info!(?to, %data, tx=?tx.tx, "Dispatching transaction"); + info!(?to, from=?tx.tx.from(), gas_limit=?tx.tx.gas(), gas_price=?tx.tx.gas_price(), nonce=?tx.tx.nonce(), "Dispatching transaction"); let dispatch_fut = tx.send(); let dispatched = dispatch_fut .await? @@ -80,6 +74,7 @@ where track_pending_tx(dispatched).await } +#[instrument(skip(pending_tx))] pub(crate) async fn track_pending_tx( pending_tx: PendingTransaction<'_, P>, ) -> ChainResult { @@ -116,6 +111,7 @@ pub(crate) async fn fill_tx_gas_params( transaction_overrides: &TransactionOverrides, domain: &HyperlaneDomain, with_gas_limit_overrides: bool, + cache: Arc>, ) -> ChainResult> where M: Middleware + 'static, @@ -134,13 +130,20 @@ where } } let gas_limit = estimated_gas_limit; + let (cached_latest_block, cached_eip1559_fee) = { + let cache = cache.lock().await; + (cache.latest_block.clone(), cache.eip1559_fee) + }; // Cap the gas limit to the block gas limit - let latest_block = provider - .get_block(BlockNumber::Latest) - .await - .map_err(ChainCommunicationError::from_other)? - .ok_or_else(|| ProviderError::CustomError("Latest block not found".into()))?; + let latest_block = match cached_latest_block { + Some(block) => block, + None => provider + .get_block(BlockNumber::Latest) + .await + .map_err(ChainCommunicationError::from_other)? + .ok_or_else(|| ProviderError::CustomError("Latest block not found".into()))?, + }; let block_gas_limit: U256 = latest_block.gas_limit.into(); let gas_limit = if gas_limit > block_gas_limit { warn!( @@ -159,9 +162,11 @@ where return Ok(tx.gas_price(gas_price).gas(gas_limit)); } - let Ok((base_fee, max_fee, max_priority_fee)) = - estimate_eip1559_fees(provider, None, &latest_block, domain, &tx.tx).await - else { + let eip1559_fee_result = match cached_eip1559_fee { + Some(fee) => Ok(fee), + None => estimate_eip1559_fees(provider.clone(), None, &latest_block, domain, &tx.tx).await, + }; + let Ok((base_fee, max_fee, max_priority_fee)) = eip1559_fee_result else { // Is not EIP 1559 chain return Ok(tx.gas(gas_limit)); }; @@ -208,14 +213,20 @@ where type FeeEstimator = fn(EthersU256, Vec>) -> (EthersU256, EthersU256); +pub type Eip1559Fee = ( + EthersU256, // base fee + EthersU256, // max fee + EthersU256, // max priority fee +); + /// Use this to estimate EIP 1559 fees with some chain-specific logic. -async fn estimate_eip1559_fees( +pub(crate) async fn estimate_eip1559_fees( provider: Arc, estimator: Option, latest_block: &Block, domain: &HyperlaneDomain, tx: &TypedTransaction, -) -> ChainResult<(EthersU256, EthersU256, EthersU256)> +) -> ChainResult where M: Middleware + 'static, { @@ -230,7 +241,7 @@ async fn estimate_eip1559_fees_zksync( provider: Arc, latest_block: &Block, tx: &TypedTransaction, -) -> ChainResult<(EthersU256, EthersU256, EthersU256)> +) -> ChainResult where M: Middleware + 'static, { diff --git a/rust/main/hyperlane-base/Cargo.toml b/rust/main/hyperlane-base/Cargo.toml index 1e326e41c65..0e3372d7039 100644 --- a/rust/main/hyperlane-base/Cargo.toml +++ b/rust/main/hyperlane-base/Cargo.toml @@ -10,11 +10,14 @@ version.workspace = true [dependencies] async-trait.workspace = true axum.workspace = true +aws-config.workspace = true +aws-sdk-s3.workspace = true bs58.workspace = true color-eyre = { workspace = true, optional = true } config.workspace = true console-subscriber.workspace = true convert_case.workspace = true +dashmap.workspace = true derive-new.workspace = true derive_builder.workspace = true ed25519-dalek.workspace = true @@ -65,7 +68,6 @@ hyperlane-sealevel = { path = "../chains/hyperlane-sealevel" } # dependency version is determined by etheres rusoto_core = "*" rusoto_kms = "*" -rusoto_s3 = "*" rusoto_sts = "*" [dev-dependencies] diff --git a/rust/main/hyperlane-base/src/cache/moka/dynamic_expiry.rs b/rust/main/hyperlane-base/src/cache/moka/dynamic_expiry.rs index 26767e29e77..0cd73178155 100644 --- a/rust/main/hyperlane-base/src/cache/moka/dynamic_expiry.rs +++ b/rust/main/hyperlane-base/src/cache/moka/dynamic_expiry.rs @@ -1,11 +1,24 @@ -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::{ + sync::OnceLock, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; use chrono::{offset::LocalResult, TimeZone, Utc}; use moka::Expiry; use serde::{Deserialize, Serialize}; /// Default expiration time for cache entries. -pub const DEFAULT_EXPIRATION: Duration = Duration::from_secs(60 * 2); +static DEFAULT_EXPIRATION: OnceLock = OnceLock::new(); + +pub fn default_expiration() -> Duration { + *DEFAULT_EXPIRATION.get_or_init(|| { + let secs = std::env::var("HYP_CACHEDEFAULTEXPIRATIONSECONDS") + .ok() + .and_then(|s| s.parse::().ok()) + .unwrap_or(120); // default: 2 minutes + Duration::from_secs(secs) + }) +} /// The type of expiration for a cache entry. /// @@ -55,7 +68,7 @@ impl Expiration { .or(Some(Duration::ZERO)) } ExpirationType::Never => None, - ExpirationType::Default => Some(DEFAULT_EXPIRATION), + ExpirationType::Default => Some(default_expiration()), } } diff --git a/rust/main/hyperlane-base/src/cache/moka/mod.rs b/rust/main/hyperlane-base/src/cache/moka/mod.rs index 8148b925ba5..e86d88697db 100644 --- a/rust/main/hyperlane-base/src/cache/moka/mod.rs +++ b/rust/main/hyperlane-base/src/cache/moka/mod.rs @@ -116,7 +116,7 @@ mod test { use hyperlane_core::{H256, U256}; - use crate::cache::moka::dynamic_expiry::DEFAULT_EXPIRATION; + use crate::cache::moka::dynamic_expiry::default_expiration; use super::*; @@ -300,7 +300,7 @@ mod test { // The second entry should have a TTL between 5 and 10 seconds .is_some_and(|duration| duration.as_secs() > 5 && duration.as_secs() <= 10), 2 => ttl.is_some_and(|duration| { - let default_secs = DEFAULT_EXPIRATION.as_secs(); + let default_secs = default_expiration().as_secs(); // The third entry should have a TTL of > 90% of the default duration.as_secs() > ((default_secs * 9) / 10) && duration.as_secs() <= default_secs diff --git a/rust/main/hyperlane-base/src/settings/checkpoint_syncer.rs b/rust/main/hyperlane-base/src/settings/checkpoint_syncer.rs index b1335f12e9d..bd1af317ef9 100644 --- a/rust/main/hyperlane-base/src/settings/checkpoint_syncer.rs +++ b/rust/main/hyperlane-base/src/settings/checkpoint_syncer.rs @@ -2,11 +2,11 @@ use crate::{ CheckpointSyncer, GcsStorageClientBuilder, LocalStorage, S3Storage, GCS_SERVICE_ACCOUNT_KEY, GCS_USER_SECRET, }; +use aws_config::Region; use core::str::FromStr; use eyre::{eyre, Context, Report, Result}; use hyperlane_core::{ChainCommunicationError, ReorgEvent}; use prometheus::IntGauge; -use rusoto_core::Region; use std::{env, path::PathBuf}; use tracing::error; use ya_gcp::{AuthFlow, ServiceAccountAuth}; @@ -76,9 +76,15 @@ impl FromStr for CheckpointSyncerConf { Ok(CheckpointSyncerConf::S3 { bucket: bucket.into(), folder, - region: region - .parse() - .context("Invalid region when parsing storage location")?, + // Wildly, aws_config doesn't provide any client-side way to validate a region string, so while + // we still have Rusoto around we just use that to validate the region string :) + region: aws_config::Region::new( + region + .parse::() + .context("Invalid region when parsing storage location")? + .name() + .to_owned(), + ), }) } "file" => Ok(CheckpointSyncerConf::LocalStorage { diff --git a/rust/main/hyperlane-base/src/settings/parser/mod.rs b/rust/main/hyperlane-base/src/settings/parser/mod.rs index 418ffa67fe3..0069aa8f088 100644 --- a/rust/main/hyperlane-base/src/settings/parser/mod.rs +++ b/rust/main/hyperlane-base/src/settings/parser/mod.rs @@ -220,6 +220,12 @@ fn parse_chain( .parse_u32() .unwrap_or(1); + let bypass_batch_simulation = chain + .chain(&mut err) + .get_opt_key("bypassBatchSimulation") + .parse_bool() + .unwrap_or(false); + cfg_unwrap_all!(&chain.cwp, err: [domain]); let connection = build_connection_conf( domain.domain_protocol(), @@ -230,6 +236,7 @@ fn parse_chain( OperationBatchConfig { batch_contract_address, max_batch_size, + bypass_batch_simulation, }, ); diff --git a/rust/main/hyperlane-base/src/settings/trace/mod.rs b/rust/main/hyperlane-base/src/settings/trace/mod.rs index 9f06b3ac0b8..ff10f5743d0 100644 --- a/rust/main/hyperlane-base/src/settings/trace/mod.rs +++ b/rust/main/hyperlane-base/src/settings/trace/mod.rs @@ -76,6 +76,8 @@ impl TracingConfig { .with_target("tendermint", Level::Info) .with_target("tokio", Level::Debug) .with_target("tokio_util", Level::Debug) + .with_target("aws_smithy", Level::Info) + .with_target("aws_sdk", Level::Info) // Enable Trace level for Tokio if you want to use tokio-console // .with_target("tokio", Level::Trace) // .with_target("tokio_util", Level::Trace) diff --git a/rust/main/hyperlane-base/src/types/multisig.rs b/rust/main/hyperlane-base/src/types/multisig.rs index 35fe3715098..9cbbfb40117 100644 --- a/rust/main/hyperlane-base/src/types/multisig.rs +++ b/rust/main/hyperlane-base/src/types/multisig.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use derive_new::new; use eyre::Result; +use futures::StreamExt; use tracing::{debug, instrument, warn}; use hyperlane_core::{ @@ -17,8 +18,7 @@ use crate::{CheckpointSyncer, CoreMetrics}; pub struct MultisigCheckpointSyncer { /// The checkpoint syncer for each valid validator signer address checkpoint_syncers: HashMap>, - metrics: Arc, - app_context: Option, + metrics: Option<(Arc, String)>, // first arg is the metrics, second is the app context } impl MultisigCheckpointSyncer { @@ -31,42 +31,56 @@ impl MultisigCheckpointSyncer { validators: &[H256], origin: &HyperlaneDomain, destination: &HyperlaneDomain, - ) -> Vec { + ) -> Vec<(H160, u32)> { // Get the latest_index from each validator's checkpoint syncer. // If a validator does not return a latest index, None is recorded so // this can be surfaced in the metrics. let mut latest_indices: HashMap> = HashMap::with_capacity(validators.len()); - for validator in validators { - let address = H160::from(*validator); - debug!( - ?address, - "Getting latest checkpoint from validator via checkpoint syncer", - ); - if let Some(checkpoint_syncer) = self.checkpoint_syncers.get(&address) { - // Gracefully handle errors getting the latest_index - match checkpoint_syncer.latest_index().await { - Ok(Some(index)) => { - debug!(?address, ?index, "Validator returned latest index"); - latest_indices.insert(H160::from(*validator), Some(index)); - } - result => { - debug!( - ?address, - ?result, - "Failed to get latest index from validator" - ); - latest_indices.insert(H160::from(*validator), None); - } + let syncer = validators + .iter() + .map(|v| H160::from(*v)) + .filter_map(|v| { + if let Some(checkpoint_syncer) = self.checkpoint_syncers.get(&v) { + Some((v, checkpoint_syncer)) + } else { + warn!(validator=%v, "Checkpoint syncer is not provided for validator"); + None + } + }) + .collect::>(); + let futures = syncer + .iter() + .map( + |(v, checkpoint_syncer)| async move { (v, checkpoint_syncer.latest_index().await) }, + ) + .collect::>(); + + let validator_index_results = futures::stream::iter(futures) + .buffer_unordered(10) + .collect::>() + .await; + + for (validator, latest_index) in validator_index_results { + match latest_index { + Ok(Some(index)) => { + debug!(?validator, ?index, "Validator returned latest index"); + latest_indices.insert(*validator, Some(index)); + } + result => { + debug!( + ?validator, + ?result, + "Failed to get latest index from validator" + ); + latest_indices.insert(*validator, None); } - } else { - warn!(?address, "Checkpoint syncer is not provided for validator"); } } - if let Some(app_context) = &self.app_context { - self.metrics + if let Some((metrics, app_context)) = &self.metrics { + metrics .validator_metrics .set_validator_latest_checkpoints( origin, @@ -78,7 +92,10 @@ impl MultisigCheckpointSyncer { } // Filter out any validators that did not return a latest index - latest_indices.values().copied().flatten().collect() + latest_indices + .into_iter() + .filter_map(|(address, index)| index.map(|i| (address, i))) + .collect() } /// Attempts to get the latest checkpoint with a quorum of signatures among @@ -119,8 +136,15 @@ impl MultisigCheckpointSyncer { // Sort in descending order. The n'th index will represent // the highest index for which we (supposedly) have (n+1) signed checkpoints - latest_indices.sort_by(|a, b| b.cmp(a)); - if let Some(&highest_quorum_index) = latest_indices.get(threshold - 1) { + latest_indices.sort_by(|a, b| b.1.cmp(&a.1)); + + // create a slice with the sorted validators + let validators = latest_indices + .iter() + .map(|(address, _)| H256::from(*address)) + .collect::>(); + + if let Some(&(_, highest_quorum_index)) = latest_indices.get(threshold - 1) { // The highest viable checkpoint index is the minimum of the highest index // we (supposedly) have a quorum for, and the maximum index for which we can // generate a proof. @@ -129,9 +153,10 @@ impl MultisigCheckpointSyncer { debug!(%start_index, %highest_quorum_index, "Highest quorum index is below the minimum index"); return Ok(None); } + for index in (minimum_index..=start_index).rev() { if let Ok(Some(checkpoint)) = - self.fetch_checkpoint(validators, threshold, index).await + self.fetch_checkpoint(&validators, threshold, index).await { return Ok(Some(checkpoint)); } @@ -157,14 +182,39 @@ impl MultisigCheckpointSyncer { let mut signed_checkpoints_per_root: HashMap> = HashMap::new(); - for validator in validators.iter() { - let addr = H160::from(*validator); - if let Some(checkpoint_syncer) = self.checkpoint_syncers.get(&addr) { + // we iterate in batches of N=threshold*1.5 to avoid waiting for all validators. + // This reaches a quorum faster without having to fetch all the signatures. + + // Also limit this number in case we have a large threshold + let batch_size = (threshold as f64 * 1.5) as usize; + let batch_size = batch_size.clamp(1, 10); + + for validators in validators.chunks(batch_size) { + // Go through each validator and get the checkpoint syncer. + // Create a future for each validator that fetches its signed checkpoint + let futures = validators + .iter() + .filter_map(|address| { + if let Some(syncer) = self.checkpoint_syncers.get(&H160::from(*address)) { + Some((address, syncer)) + } else { + debug!(validator=%address, "Checkpoint syncer not found"); + None + } + }) + .map(|(address, syncer)| { + let checkpoint_syncer = syncer.clone(); + async move { (address, checkpoint_syncer.fetch_checkpoint(index).await) } + }) + .collect::>(); + + let checkpoints = futures::future::join_all(futures).await; + + for (validator, checkpoint) in checkpoints { // Gracefully ignore an error fetching the checkpoint from a validator's // checkpoint syncer, which can happen if the validator has not // signed the checkpoint at `index`. - if let Ok(Some(signed_checkpoint)) = checkpoint_syncer.fetch_checkpoint(index).await - { + if let Ok(Some(signed_checkpoint)) = checkpoint { // If the signed checkpoint is for a different index, ignore it if signed_checkpoint.value.index != index { debug!( @@ -216,12 +266,135 @@ impl MultisigCheckpointSyncer { "Unable to find signed checkpoint" ); } - } else { - debug!(%validator, "Unable to find checkpoint syncer"); - continue; } } debug!("No quorum checkpoint found for message"); Ok(None) } } + +#[cfg(test)] +mod test { + + use std::str::FromStr; + + use aws_config::Region; + use hyperlane_core::KnownHyperlaneDomain; + + use crate::S3Storage; + + use super::*; + + #[tokio::test] + #[ignore] + #[tracing_test::traced_test] + async fn test_s3_checkpoint_syncer() { + let validators = vec![ + ( + "0x4d966438fe9E2B1e7124c87bBB90cB4F0F6C59a1", + ( + "hyperlane-mainnet3-arbitrum-validator-0".to_string(), + Region::new("us-east-1"), + ), + ), + ( + "0x5450447aeE7B544c462C9352bEF7cAD049B0C2Dc", + ( + "zpl-hyperlane-v3-arbitrum".to_string(), + Region::new("eu-central-1"), + ), + ), + ( + "0xec68258A7c882AC2Fc46b81Ce80380054fFB4eF2", + ( + "dsrv-hyperlane-v3-validator-signatures-validator7-arbitrum".to_string(), + Region::new("eu-central-1"), + ), + ), + ( + "0x38C7A4ca1273ead2E867d096aDBCDD0e2AcB21D8", + ( + "hyperlane-v3-validator-signatures-everstake-one-arbitrum".to_string(), + Region::new("us-east-2"), + ), + ), + ( + "0xb3AC35d3988bCA8C2fFD195b1c6bee18536B317b", + ( + "can-outrun-imperial-starships-v3-arbitrum".to_string(), + Region::new("eu-west-1"), + ), + ), + ( + "0x14d0B24d3a8F3aAD17DB4b62cBcEC12821c98Cb3", + ( + "hyperlane-validator-signatures-bwarelabs-ethereum/arbitrum".to_string(), + Region::new("eu-north-1"), + ), + ), + ( + "0xc4b877Dd49ABe9B38EA9184683f9664c0F9FADe3", + ( + "arbitrum-validator-signatures/arbitrum".to_string(), + Region::new("us-east-1"), + ), + ), + ]; + + let syncers = validators + .iter() + .map(|(address, (bucket, region))| { + let syncer = S3Storage::new(bucket.clone(), None, region.clone(), None); + ( + H160::from_str(address).unwrap(), + Arc::new(syncer) as Arc, + ) + }) + .collect::>(); + + // Create a multisig checkpoint syncer + let multisig_syncer = MultisigCheckpointSyncer::new(syncers, None); + + let validators = validators + .iter() + .map(|(address, _)| { + let address: H256 = H160::from_str(address).unwrap().into(); + address + }) + .collect::>(); + + // get the latest checkpoint from each validator + let mut latest_indices = multisig_syncer + .get_validator_latest_checkpoints_and_update_metrics( + validators.as_slice(), + &HyperlaneDomain::Known(KnownHyperlaneDomain::Arbitrum), + &HyperlaneDomain::Known(KnownHyperlaneDomain::Arbitrum), + ) + .await; + latest_indices.sort_by(|a, b| b.cmp(a)); + + let lowest_index = *latest_indices.last().unwrap(); + + let start_time = std::time::Instant::now(); + + for threshold in 2..=6 { + println!("Starting to fetch checkpoints with threshold {}", threshold); + if let Some(&(_, highest_quorum_index)) = latest_indices.get(threshold - 1) { + let result = multisig_syncer + .fetch_checkpoint_in_range( + validators.as_slice(), + threshold, + lowest_index.1, + highest_quorum_index, + &HyperlaneDomain::Known(KnownHyperlaneDomain::Arbitrum), + &HyperlaneDomain::Known(KnownHyperlaneDomain::Arbitrum), + ) + .await; + assert!(result.is_ok(), "Failed to fetch checkpoint"); + } + } + + let elapsed = start_time.elapsed(); + println!("Fetched checkpoints in {}ms", elapsed.as_millis()); + } +} diff --git a/rust/main/hyperlane-base/src/types/s3_storage.rs b/rust/main/hyperlane-base/src/types/s3_storage.rs index 8f77b1c12ef..8aa6ff225dc 100644 --- a/rust/main/hyperlane-base/src/types/s3_storage.rs +++ b/rust/main/hyperlane-base/src/types/s3_storage.rs @@ -1,25 +1,24 @@ use std::{fmt, sync::OnceLock, time::Duration}; use async_trait::async_trait; +use aws_config::{timeout::TimeoutConfig, BehaviorVersion, ConfigLoader, Region}; +use aws_sdk_s3::{ + error::SdkError, + operation::{get_object::GetObjectError as SdkGetObjectError, head_object::HeadObjectError}, + Client, +}; +use dashmap::DashMap; use derive_new::new; use eyre::{bail, Result}; -use futures_util::TryStreamExt; use hyperlane_core::{ReorgEvent, SignedAnnouncement, SignedCheckpointWithMessageId}; use prometheus::IntGauge; -use rusoto_core::{ - credential::{Anonymous, AwsCredentials, StaticProvider}, - Region, RusotoError, -}; -use rusoto_s3::{GetObjectError, GetObjectRequest, PutObjectRequest, S3Client, S3}; -use tokio::time::timeout; +use tokio::sync::OnceCell; -use crate::types::utils; -use crate::{settings::aws_credentials::AwsChainCredentialsProvider, CheckpointSyncer}; +use crate::CheckpointSyncer; -/// The timeout for S3 requests. Rusoto doesn't offer timeout configuration -/// out of the box, so S3 requests must be wrapped with a timeout. -/// See https://github.com/rusoto/rusoto/issues/1795. -const S3_REQUEST_TIMEOUT_SECONDS: u64 = 30; +/// The timeout for all S3 operations. +const S3_REQUEST_TIMEOUT: Duration = Duration::from_secs(10); +const S3_MAX_OBJECT_SIZE: i64 = 50 * 1024; // 50KiB #[derive(Clone, new)] /// Type for reading/writing to S3 @@ -30,16 +29,25 @@ pub struct S3Storage { folder: Option, /// The region of the bucket. region: Region, - /// A client with AWS credentials. + /// A client with AWS credentials. This client is not initialized globally and has a lifetime + /// tied to the S3Storage instance, so if heavy use of this client is expected, S3Storage + /// itself should be long-lived. #[new(default)] - authenticated_client: OnceLock, - /// A client without credentials for anonymous requests. - #[new(default)] - anonymous_client: OnceLock, + authenticated_client: OnceCell, /// The latest seen signed checkpoint index. latest_index: Option, } +/// A global cache of anonymous S3 clients, per region. +/// We've seen freshly created S3 clients make expensive DNS / TCP +/// requests when creating them. This cache allows us to reuse +/// anonymous clients across the entire agent. +static ANONYMOUS_CLIENT_CACHE: OnceLock>> = OnceLock::new(); + +fn get_anonymous_client_cache() -> &'static DashMap> { + ANONYMOUS_CLIENT_CACHE.get_or_init(DashMap::new) +} + impl fmt::Debug for S3Storage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("S3Storage") @@ -52,73 +60,119 @@ impl fmt::Debug for S3Storage { impl S3Storage { async fn write_to_bucket(&self, key: String, body: &str) -> Result<()> { - let req = PutObjectRequest { - key: self.get_composite_key(key), - bucket: self.bucket.clone(), - body: Some(Vec::from(body).into()), - content_type: Some("application/json".to_owned()), - ..Default::default() - }; - timeout( - Duration::from_secs(S3_REQUEST_TIMEOUT_SECONDS), - self.authenticated_client().put_object(req), - ) - .await??; + self.authenticated_client() + .await + .put_object() + .bucket(self.bucket.clone()) + .key(self.get_composite_key(key)) + .body(Vec::from(body).into()) + .content_type("application/json") + .send() + .await?; + Ok(()) } - /// Uses an anonymous client. This should only be used for publicly accessible buckets. + /// Check if the metadata for the object satisfies our size constraints. + /// If the object is too big, we return an error. + async fn check_metadata(&self, key: String) -> Result { + let metadata_req = self + .anonymous_client() + .await + .head_object() + .bucket(self.bucket.clone()) + .key(self.get_composite_key(key.clone())) + .send() + .await; + match metadata_req { + Ok(value) => match value.content_length { + Some(length) if length >= S3_MAX_OBJECT_SIZE => { + bail!("Object size for key {key} is too big: {}KiB", length / 1024); + } + Some(_) => Ok(true), + None => Ok(false), + }, + Err(SdkError::ServiceError(err)) => match err.err() { + HeadObjectError::NotFound(_) => Ok(false), + _ => bail!(err.into_err()), + }, + Err(e) => bail!(e), + } + } + async fn anonymously_read_from_bucket(&self, key: String) -> Result>> { - let req = GetObjectRequest { - key: self.get_composite_key(key), - bucket: self.bucket.clone(), - ..Default::default() - }; - let get_object_result = timeout( - Duration::from_secs(S3_REQUEST_TIMEOUT_SECONDS), - self.anonymous_client().get_object(req), - ) - .await?; + // check for metadata first + if !self.check_metadata(key.clone()).await? { + return Ok(None); + } + let get_object_result = self + .anonymous_client() + .await + .get_object() + .bucket(self.bucket.clone()) + .key(self.get_composite_key(key)) + .send() + .await; match get_object_result { - Ok(res) => match res.body { - Some(body) => Ok(Some(body.map_ok(|b| b.to_vec()).try_concat().await?)), - None => Ok(None), + Ok(res) => Ok(Some(res.body.collect().await?.into_bytes().to_vec())), + Err(SdkError::ServiceError(err)) => match err.err() { + SdkGetObjectError::NoSuchKey(_) => Ok(None), + _ => bail!(err.into_err()), }, - Err(RusotoError::Service(GetObjectError::NoSuchKey(_))) => Ok(None), Err(e) => bail!(e), } } - /// Gets an authenticated S3Client, creating it if it doesn't already exist. - fn authenticated_client(&self) -> &S3Client { - self.authenticated_client.get_or_init(|| { - S3Client::new_with( - utils::http_client_with_timeout().unwrap(), - AwsChainCredentialsProvider::new(), - self.region.clone(), - ) - }) + /// Gets an authenticated S3 client, creating it if it doesn't already exist + /// within &self. + async fn authenticated_client(&self) -> &Client { + self.authenticated_client + .get_or_init(|| async { + let config = self.default_aws_sdk_config_loader().load().await; + Client::new(&config) + }) + .await } - /// Gets an anonymous S3Client, creating it if it doesn't already exist. + /// Gets an anonymous S3 client, creating it if it doesn't already exist globally. /// An anonymous client doesn't have AWS credentials and will not sign S3 - /// requests with any credentials. + /// requests with any credentials. We globally cache the clients per region to avoid + /// expensive DNS / TCP initialization. /// We've experienced an inability to make GetObjectRequests to public /// S3 buckets when signing with credentials from an AWS account not from the - /// S3 bucket's AWS account. - fn anonymous_client(&self) -> &S3Client { - self.anonymous_client.get_or_init(|| { - // By default, these credentials are anonymous, see https://docs.rs/rusoto_credential/latest/rusoto_credential/struct.AwsCredentials.html#anonymous-example - let credentials = AwsCredentials::default(); - assert!(credentials.is_anonymous(), "AWS credentials not anonymous"); - - S3Client::new_with( - utils::http_client_with_timeout().unwrap(), - StaticProvider::from(credentials), - self.region.clone(), - ) + /// S3 bucket's AWS account. Additionally, this allows relayer operators to not + /// require AWS credentials. + async fn anonymous_client(&self) -> Client { + let cell = get_anonymous_client_cache() + .entry(self.region.clone()) + .or_default(); + + cell.get_or_init(|| async { + let config = self + .default_aws_sdk_config_loader() + // Make anonymous, important to not require AWS credentials + // to operate the relayer + .no_credentials() + .load() + .await; + Client::new(&config) }) + .await + .clone() + } + + /// A default ConfigLoader with timeout, region, and behavior version. + /// Unless overridden, credentials will be loaded from the env. + fn default_aws_sdk_config_loader(&self) -> aws_config::ConfigLoader { + ConfigLoader::default() + .timeout_config( + TimeoutConfig::builder() + .operation_timeout(S3_REQUEST_TIMEOUT) + .build(), + ) + .behavior_version(BehaviorVersion::latest()) + .region(self.region.clone()) } fn get_composite_key(&self, key: String) -> String { @@ -211,9 +265,9 @@ impl CheckpointSyncer for S3Storage { fn announcement_location(&self) -> String { match self.folder.as_deref() { - None | Some("") => format!("s3://{}/{}", self.bucket, self.region.name()), + None | Some("") => format!("s3://{}/{}", self.bucket, self.region), Some(folder_str) => { - format!("s3://{}/{}/{}", self.bucket, self.region.name(), folder_str) + format!("s3://{}/{}/{}", self.bucket, self.region, folder_str) } } } @@ -233,3 +287,31 @@ impl CheckpointSyncer for S3Storage { .map_err(Into::into) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_announcement_location() { + // Test with a folder + let s3_storage = S3Storage::new( + "test-bucket".to_string(), + Some("test-folder".to_string()), + Region::new("us-east-1"), + None, + ); + let location = s3_storage.announcement_location(); + assert_eq!(location, "s3://test-bucket/us-east-1/test-folder"); + + // Test without a folder + let s3_storage = S3Storage::new( + "test-bucket".to_string(), + None, + Region::new("us-east-1"), + None, + ); + let location = s3_storage.announcement_location(); + assert_eq!(location, "s3://test-bucket/us-east-1"); + } +} diff --git a/rust/main/hyperlane-core/src/config/mod.rs b/rust/main/hyperlane-core/src/config/mod.rs index a0a29d36d52..6b8b09462e2 100644 --- a/rust/main/hyperlane-core/src/config/mod.rs +++ b/rust/main/hyperlane-core/src/config/mod.rs @@ -27,8 +27,12 @@ pub type NoFilter = (); pub struct OperationBatchConfig { /// Optional batch contract address (e.g. Multicall3 on EVM chains) pub batch_contract_address: Option, + /// Batch size pub max_batch_size: u32, + + /// bypass batch simulation + pub bypass_batch_simulation: bool, } /// A trait that allows for constructing `Self` from a raw config type. diff --git a/rust/main/hyperlane-core/src/traits/mailbox.rs b/rust/main/hyperlane-core/src/traits/mailbox.rs index 52f3168e25d..e7f97238f02 100644 --- a/rust/main/hyperlane-core/src/traits/mailbox.rs +++ b/rust/main/hyperlane-core/src/traits/mailbox.rs @@ -4,8 +4,8 @@ use async_trait::async_trait; use derive_new::new; use crate::{ - traits::TxOutcome, utils::domain_hash, BatchItem, ChainCommunicationError, ChainResult, - HyperlaneContract, HyperlaneMessage, QueueOperation, ReorgPeriod, TxCostEstimate, H256, U256, + traits::TxOutcome, utils::domain_hash, ChainCommunicationError, ChainResult, HyperlaneContract, + HyperlaneMessage, QueueOperation, ReorgPeriod, TxCostEstimate, H256, U256, }; /// Interface for the Mailbox chain contract. Allows abstraction over different @@ -40,21 +40,16 @@ pub trait Mailbox: HyperlaneContract + Send + Sync + Debug { tx_gas_limit: Option, ) -> ChainResult; - /// Process a message with a proof against the provided signed checkpoint - async fn process_batch( - &self, - _messages: &[BatchItem], - ) -> ChainResult { - // Batching is not supported by default - Err(ChainCommunicationError::BatchingFailed) + /// True if the destination chain supports batching + /// (i.e. if the mailbox contract will succeed on a `process_batch` call) + fn supports_batching(&self) -> bool { + // Default to false + false } /// Try process the given operations as a batch. Returns the outcome of the /// batch (if one was submitted) and the operations that were not submitted. - async fn try_process_batch<'a>( - &self, - _ops: Vec<&'a QueueOperation>, - ) -> ChainResult { + async fn process_batch<'a>(&self, _ops: Vec<&'a QueueOperation>) -> ChainResult { // Batching is not supported by default Err(ChainCommunicationError::BatchingFailed) } diff --git a/rust/main/hyperlane-core/src/traits/pending_operation.rs b/rust/main/hyperlane-core/src/traits/pending_operation.rs index e2e9b702595..b75d599ffe5 100644 --- a/rust/main/hyperlane-core/src/traits/pending_operation.rs +++ b/rust/main/hyperlane-core/src/traits/pending_operation.rs @@ -1,5 +1,6 @@ use std::{ cmp::Ordering, + env, fmt::{Debug, Display}, io::Write, sync::Arc, @@ -10,6 +11,7 @@ use async_trait::async_trait; use num::CheckedDiv; use prometheus::IntGauge; use serde::{Deserialize, Serialize}; +use sha3::{digest::Update, Digest, Keccak256}; use strum::Display; use tracing::warn; @@ -156,7 +158,6 @@ pub trait PendingOperation: Send + Sync + Debug + TryBatchAs { fn reset_attempts(&mut self); /// Set the number of times this operation has been retried. - #[cfg(any(test, feature = "test-utils"))] fn set_retries(&mut self, retries: u32); /// Get the number of times this operation has been retried. @@ -363,18 +364,34 @@ impl Eq for QueueOperation {} impl Ord for QueueOperation { fn cmp(&self, other: &Self) -> Ordering { use Ordering::*; + + fn salted_hash(id: &H256, salt: &[u8]) -> H256 { + H256::from_slice(Keccak256::new().chain(id).chain(salt).finalize().as_slice()) + } + match (self.next_attempt_after(), other.next_attempt_after()) { (Some(a), Some(b)) => a.cmp(&b), // No time means it should come before (None, Some(_)) => Less, (Some(_), None) => Greater, (None, None) => { - if self.origin_domain_id() == other.origin_domain_id() { - // Should execute in order of nonce for the same origin - self.priority().cmp(&other.priority()) + let mixing = + env::var("HYPERLANE_RELAYER_MIXING_ENABLED").map_or(false, |v| v == "true"); + if !mixing { + if self.origin_domain_id() == other.origin_domain_id() { + // Should execute in order of nonce for the same origin + self.priority().cmp(&other.priority()) + } else { + // There is no priority between these messages, so arbitrarily use the id + self.id().cmp(&other.id()) + } } else { - // There is no priority between these messages, so arbitrarily use the id - self.id().cmp(&other.id()) + let salt = env::var("HYPERLANE_RELAYER_MIXING_SALT") + .map_or(0, |v| v.parse::().unwrap_or(0)) + .to_vec(); + let self_hash = salted_hash(&self.id(), &salt); + let other_hash = salted_hash(&other.id(), &salt); + self_hash.cmp(&other_hash) } } } @@ -397,14 +414,4 @@ pub enum PendingOperationResult { } #[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_encoding_pending_operation_status() { - let status = PendingOperationStatus::Retry(ReprepareReason::CouldNotFetchMetadata); - let encoded = status.to_vec(); - let decoded = PendingOperationStatus::read_from(&mut &encoded[..]).unwrap(); - assert_eq!(status, decoded); - } -} +mod tests; diff --git a/rust/main/hyperlane-core/src/traits/pending_operation/tests.rs b/rust/main/hyperlane-core/src/traits/pending_operation/tests.rs new file mode 100644 index 00000000000..1428ef7c47e --- /dev/null +++ b/rust/main/hyperlane-core/src/traits/pending_operation/tests.rs @@ -0,0 +1,154 @@ +use std::cmp::Ord; +use std::env; + +use super::*; + +#[derive(Debug, Serialize)] +struct MockQueueOperation { + id: H256, + origin_domain_id: u32, + priority: u32, +} + +#[async_trait] +#[typetag::serialize] +impl PendingOperation for MockQueueOperation { + fn id(&self) -> H256 { + self.id + } + fn priority(&self) -> u32 { + self.priority + } + fn origin_domain_id(&self) -> u32 { + self.origin_domain_id + } + fn next_attempt_after(&self) -> Option { + None + } + fn retrieve_status_from_db(&self) -> Option { + None + } + fn destination_domain(&self) -> &HyperlaneDomain { + unimplemented!() + } + fn sender_address(&self) -> &H256 { + unimplemented!() + } + fn recipient_address(&self) -> &H256 { + unimplemented!() + } + fn app_context(&self) -> Option { + None + } + fn get_metric(&self) -> Option> { + None + } + fn set_metric(&mut self, _metric: Arc) {} + fn status(&self) -> PendingOperationStatus { + unimplemented!() + } + fn set_status(&mut self, _status: PendingOperationStatus) {} + async fn prepare(&mut self) -> PendingOperationResult { + unimplemented!() + } + async fn submit(&mut self) -> PendingOperationResult { + unimplemented!() + } + fn set_submission_outcome(&mut self, _outcome: TxOutcome) {} + fn get_tx_cost_estimate(&self) -> Option { + None + } + async fn confirm(&mut self) -> PendingOperationResult { + unimplemented!() + } + fn set_operation_outcome( + &mut self, + _submission_outcome: TxOutcome, + _submission_estimated_cost: U256, + ) { + } + fn set_next_attempt_after(&mut self, _delay: Duration) {} + fn reset_attempts(&mut self) {} + #[cfg(any(test, feature = "test-utils"))] + fn set_retries(&mut self, _retries: u32) {} + fn get_retries(&self) -> u32 { + 0 + } + async fn payload(&self) -> ChainResult> { + unimplemented!() + } + fn on_reprepare( + &mut self, + _err_msg: Option, + _reason: ReprepareReason, + ) -> PendingOperationResult { + unimplemented!() + } +} + +impl TryBatchAs for MockQueueOperation {} + +#[test] +fn test_encoding_pending_operation_status() { + let status = PendingOperationStatus::Retry(ReprepareReason::CouldNotFetchMetadata); + let encoded = status.to_vec(); + let decoded = PendingOperationStatus::read_from(&mut &encoded[..]).unwrap(); + assert_eq!(status, decoded); +} + +#[test] +fn test_queue_operation_ord_without_mixing() { + env::set_var("HYPERLANE_RELAYER_MIXING_ENABLED", "false"); + + let op1 = Box::new(MockQueueOperation { + id: H256::from_low_u64_be(1), + origin_domain_id: 1, + priority: 10, + }) as QueueOperation; + let op2 = Box::new(MockQueueOperation { + id: H256::from_low_u64_be(2), + origin_domain_id: 1, + priority: 5, + }) as QueueOperation; + + assert!(op1 > op2); // Higher priority value means lower priority +} + +#[test] +fn test_queue_operation_ord_with_mixing() { + env::set_var("HYPERLANE_RELAYER_MIXING_ENABLED", "true"); + env::set_var("HYPERLANE_RELAYER_MIXING_SALT", "123"); + + let op1 = Box::new(MockQueueOperation { + id: H256::from_low_u64_be(1), + origin_domain_id: 1, + priority: 10, + }) as QueueOperation; + let op2 = Box::new(MockQueueOperation { + id: H256::from_low_u64_be(2), + origin_domain_id: 1, + priority: 5, + }) as QueueOperation; + + // Calculate salted hashes for both operations + let salt = env::var("HYPERLANE_RELAYER_MIXING_SALT") + .map_or(0, |v| v.parse::().unwrap_or(0)) + .to_vec(); + let salted_hash_op1 = H256::from_slice( + Keccak256::new() + .chain(op1.id()) + .chain(&salt) + .finalize() + .as_slice(), + ); + let salted_hash_op2 = H256::from_slice( + Keccak256::new() + .chain(op2.id()) + .chain(&salt) + .finalize() + .as_slice(), + ); + + // Assert that the ordering matches the salted hash comparison + assert_eq!(op1.cmp(&op2), salted_hash_op1.cmp(&salted_hash_op2)); +} diff --git a/rust/main/hyperlane-test/src/mocks/mailbox.rs b/rust/main/hyperlane-test/src/mocks/mailbox.rs index 5074d2a0be2..dc6b9bb00e2 100644 --- a/rust/main/hyperlane-test/src/mocks/mailbox.rs +++ b/rust/main/hyperlane-test/src/mocks/mailbox.rs @@ -55,6 +55,14 @@ mock! { message: &HyperlaneMessage, metadata: &[u8], ) -> Vec {} + + pub fn process_batch<'a>( + &self, + ops: Vec<&'a QueueOperation>, + ) -> ChainResult {} + + pub fn supports_batching(&self) -> bool { + } } } @@ -91,13 +99,6 @@ impl Mailbox for MockMailboxContract { self.process(message, metadata, tx_gas_limit) } - async fn process_batch( - &self, - messages: &[BatchItem], - ) -> ChainResult { - self.process_batch(messages).await - } - async fn process_estimate_costs( &self, message: &HyperlaneMessage, @@ -113,6 +114,14 @@ impl Mailbox for MockMailboxContract { ) -> ChainResult> { Ok(self.process_calldata(message, metadata)) } + + async fn process_batch<'a>(&self, ops: Vec<&'a QueueOperation>) -> ChainResult { + self.process_batch(ops) + } + + fn supports_batching(&self) -> bool { + self.supports_batching() + } } impl HyperlaneChain for MockMailboxContract { diff --git a/rust/main/submitter/src/chain_tx_adapter/chains/sealevel/adapter/tests/config.rs b/rust/main/submitter/src/chain_tx_adapter/chains/sealevel/adapter/tests/config.rs index b4ec65905f9..035510cbc71 100644 --- a/rust/main/submitter/src/chain_tx_adapter/chains/sealevel/adapter/tests/config.rs +++ b/rust/main/submitter/src/chain_tx_adapter/chains/sealevel/adapter/tests/config.rs @@ -29,6 +29,7 @@ fn test_configuration_fields() { operation_batch: OperationBatchConfig { batch_contract_address: None, max_batch_size: expected_max_batch_size, + ..Default::default() }, native_token: Default::default(), priority_fee_oracle: Default::default(), diff --git a/rust/main/utils/abigen/src/lib.rs b/rust/main/utils/abigen/src/lib.rs index 3fe21a55bb4..18b1811165c 100644 --- a/rust/main/utils/abigen/src/lib.rs +++ b/rust/main/utils/abigen/src/lib.rs @@ -68,6 +68,8 @@ pub fn generate_bindings_for_dir( /// Generate the bindings for a given ABI and return the new module name. Will /// create a file within the designated path with the correct `{module_name}.rs` /// format. +// We allow unused variables due to some feature flagging. +#[allow(unused_variables)] pub fn generate_bindings( contract_path: impl AsRef, output_dir: impl AsRef, diff --git a/solidity/coverage.sh b/solidity/coverage.sh index 2a8fe1c49a5..3a785017909 100755 --- a/solidity/coverage.sh +++ b/solidity/coverage.sh @@ -7,4 +7,5 @@ forge coverage \ --report lcov \ --report summary \ --no-match-coverage "(test|mock|node_modules|script)" \ + --no-match-test "Fork" \ --ir-minimum # https://github.com/foundry-rs/foundry/issues/3357 diff --git a/solidity/generate-artifact-exports.mjs b/solidity/generate-artifact-exports.mjs index 69327ffda46..f55f463c807 100755 --- a/solidity/generate-artifact-exports.mjs +++ b/solidity/generate-artifact-exports.mjs @@ -3,41 +3,58 @@ import { basename, dirname, join } from 'path'; import { glob } from 'typechain'; import { fileURLToPath } from 'url'; -const cwd = process.cwd(); +const CONFIG = { + cwd: process.cwd(), + outputDir: 'dist/zksync/', + artifactsDir: 'artifacts', + artifactGlobs: [ + `!./artifacts-zk/!(build-info)/**/*.dbg.json`, + `./artifacts-zk/!(build-info)/**/+([a-zA-Z0-9_]).json`, + ], + formatIdentifier: 'hh-zksolc-artifact-1', +}; + const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); -const ROOT_OUTPUT_DIR = join(__dirname, 'dist/zksync/'); -const ARTIFACTS_OUTPUT_DIR = join(ROOT_OUTPUT_DIR, 'artifacts'); +const ROOT_OUTPUT_DIR = join(__dirname, CONFIG.outputDir); +const ARTIFACTS_OUTPUT_DIR = join(ROOT_OUTPUT_DIR, CONFIG.artifactsDir); /** * @notice Templates for TypeScript artifact generation */ -const TEMPLATES = { - JS_ARTIFACT: `\ -export const {name} = {artifact}; -`, - - DTS_ARTIFACT: `\ -import type { ZKSyncArtifact } from '../types.js'; - -export declare const {name}: ZKSyncArtifact; -`, +class Templates { + static jsArtifact(name, artifact) { + return `export const ${name} = ${JSON.stringify(artifact)};`; + } - JS_INDEX: `\ -{imports} + static dtsArtifact(name) { + return `import type { ZKSyncArtifact } from '../types.js'; +export declare const ${name}: ZKSyncArtifact;`; + } + static jsIndex(imports, exports) { + return `${imports} export const zkSyncContractArtifacts = [ -{exports} -]; -`, +${exports} +];`; + } - DTS_INDEX: `\ -import type { ZKSyncArtifact } from './types.js'; + static dtsIndex() { + return `import type { ZKSyncArtifact } from './types.js'; +export declare const zkSyncContractArtifacts: readonly ZKSyncArtifact[];`; + } -export declare const zkSyncContractArtifacts: readonly ZKSyncArtifact[]; -`, -}; + // Generates a single import line for a contract in index file + static importLine(name) { + return `import { ${name} } from './artifacts/${name}.js';`; + } + + // Generates a single export line for a contract in index file + static exportLine(name) { + return ` ${name},`; + } +} class ArtifactGenerator { constructor() { @@ -50,10 +67,7 @@ class ArtifactGenerator { * @return {string[]} Array of file paths matching the glob pattern */ getArtifactPaths() { - return glob(cwd, [ - `!./artifacts-zk/!(build-info)/**/*.dbg.json`, - `./artifacts-zk/!(build-info)/**/+([a-zA-Z0-9_]).json`, - ]); + return glob(CONFIG.cwd, CONFIG.artifactGlobs); } /** @@ -73,45 +87,6 @@ class ArtifactGenerator { return JSON.parse(content); } - /** - * @notice Generates JavaScript content for a contract artifact - */ - generateJavaScriptContent(name, artifact) { - return TEMPLATES.JS_ARTIFACT.replace('{name}', name).replace( - '{artifact}', - JSON.stringify(artifact, null, 2), - ); - } - - /** - * @notice Generates TypeScript declaration content for a contract artifact - */ - generateDeclarationContent(name) { - return TEMPLATES.DTS_ARTIFACT.replace('{name}', name); - } - - /** - * @notice Generates index file contents - */ - generateIndexContents(artifactNames) { - const imports = artifactNames - .map((name) => `import { ${name} } from './artifacts/${name}.js';`) - .join('\n'); - const exports = artifactNames.map((name) => ` ${name},`).join('\n'); - - const jsContent = TEMPLATES.JS_INDEX.replace('{imports}', imports).replace( - '{exports}', - exports, - ); - - const dtsContent = TEMPLATES.DTS_INDEX.replace( - '{imports}', - imports, - ).replace('{exports}', exports); - - return { jsContent, dtsContent }; - } - /** * @notice Processes a single artifact file */ @@ -124,15 +99,35 @@ class ArtifactGenerator { const artifact = await this.readArtifactFile(filePath); + /** + * @notice Validates that the artifact was compiled with zksolc + * + * Format examples: + * - Valid: "_format": "hh-zksolc-artifact-1" (compiled with zksolc) + * - Invalid: "_format": "hh-sol-artifact-1" (standard Solidity compilation) + */ + if ( + !artifact._format || + !artifact._format.includes(CONFIG.formatIdentifier) + ) { + throw new Error( + `Artifact ${name} validation failed: invalid _format property. Expected ${ + CONFIG.formatIdentifier + } but got '${ + artifact._format || 'undefined' + }'. It may not be properly compiled with zksolc.`, + ); + } + // Generate and write .js file - const jsContent = this.generateJavaScriptContent(name, artifact); + const jsContent = Templates.jsArtifact(name, artifact); await fs.writeFile( join(ROOT_OUTPUT_DIR, 'artifacts', `${name}.js`), jsContent, ); // Generate and write .d.ts file - const dtsContent = this.generateDeclarationContent(name); + const dtsContent = Templates.dtsArtifact(name); await fs.writeFile( join(ROOT_OUTPUT_DIR, 'artifacts', `${name}.d.ts`), dtsContent, @@ -141,6 +136,23 @@ class ArtifactGenerator { this.processedFiles.add(name); } + /** + * @notice Generates index file contents + */ + generateIndexContents(artifactNames) { + const imports = artifactNames + .map((name) => Templates.importLine(name)) + .join('\n'); + const exports = artifactNames + .map((name) => Templates.exportLine(name)) + .join('\n'); + + const jsContent = Templates.jsIndex(imports, exports); + const dtsContent = Templates.dtsIndex(); + + return { jsContent, dtsContent }; + } + async generate() { try { await this.createOutputDirectory(); diff --git a/solidity/zk-hardhat.config.cts b/solidity/zk-hardhat.config.cts index 645359382ae..2e6caed3043 100644 --- a/solidity/zk-hardhat.config.cts +++ b/solidity/zk-hardhat.config.cts @@ -10,10 +10,18 @@ import { rootHardhatConfig } from './rootHardhatConfig.cjs'; module.exports = { ...rootHardhatConfig, zksolc: { - version: '1.5.3', + version: '1.5.12', compilerSource: 'binary', enableEraVMExtensions: true, }, + defaultNetwork: 'ZKsyncInMemoryNode', + networks: { + ZKsyncInMemoryNode: { + url: 'http://127.0.0.1:8011', + ethNetwork: '', + zksync: true, + }, + }, paths: { sources: './contracts', cache: './cache-zk', diff --git a/typescript/cli/scripts/run-e2e-test.sh b/typescript/cli/scripts/run-e2e-test.sh index c98fc9158f6..7ae565c6257 100755 --- a/typescript/cli/scripts/run-e2e-test.sh +++ b/typescript/cli/scripts/run-e2e-test.sh @@ -3,17 +3,19 @@ function cleanup() { set +e pkill -f anvil - rm -rf ./tmp + rm -rf ./test-configs/anvil/deployments rm -f ./test-configs/anvil/chains/anvil2/addresses.yaml rm -f ./test-configs/anvil/chains/anvil3/addresses.yaml + rm -f ./test-configs/anvil/chains/anvil4/addresses.yaml set -e } cleanup -echo "Starting anvil2 and anvil3 chain for E2E tests" -anvil --chain-id 31338 -p 8555 --state /tmp/anvil2/state --gas-price 1 > /dev/null & -anvil --chain-id 31347 -p 8600 --state /tmp/anvil3/state --gas-price 1 > /dev/null & +echo "Starting anvil2, anvil3 and anvil4 chains for E2E tests" +anvil --chain-id 31338 -p 8555 --gas-price 1 > /dev/null & +anvil --chain-id 31347 -p 8600 --gas-price 1 > /dev/null & +anvil --chain-id 31348 -p 8601 --gas-price 1 > /dev/null & echo "Running E2E tests" if [ -n "${CLI_E2E_TEST}" ]; then diff --git a/typescript/cli/src/commands/warp.ts b/typescript/cli/src/commands/warp.ts index a01d9712c1e..c14a3fc64e3 100644 --- a/typescript/cli/src/commands/warp.ts +++ b/typescript/cli/src/commands/warp.ts @@ -22,6 +22,14 @@ import { evaluateIfDryRunFailure } from '../deploy/dry-run.js'; import { runWarpRouteApply, runWarpRouteDeploy } from '../deploy/warp.js'; import { log, logBlue, logCommandHeader, logGreen } from '../logger.js'; import { runWarpRouteRead } from '../read/warp.js'; +import { + Executor, + IExecutor, + IMonitor, + IStrategy, + Monitor, + Strategy, +} from '../rebalancer/index.js'; import { sendTestTransfer } from '../send/transfer.js'; import { runSingleChainSelectionStep } from '../utils/chains.js'; import { @@ -62,6 +70,7 @@ export const warpCommand: CommandModule = { .command(deploy) .command(init) .command(read) + .command(rebalancer) .command(send) .command(verify) .version(false) @@ -399,6 +408,55 @@ export const check: CommandModuleWithContext<{ }, }; +export const rebalancer: CommandModuleWithContext<{ + warpRouteId: string; + checkFrequency: number; +}> = { + command: 'rebalancer', + describe: 'Run a warp route collateral rebalancer', + builder: { + warpRouteId: { + type: 'string', + description: 'The warp route ID to rebalance', + demandOption: true, + }, + checkFrequency: { + type: 'number', + description: 'Frequency to check balances in ms', + demandOption: true, + alias: 'v', + }, + }, + handler: async ({ context, warpRouteId, checkFrequency }) => { + logCommandHeader('Hyperlane Warp Rebalancer'); + + // Instantiates the warp route monitor + const monitor: IMonitor = new Monitor( + context.registry, + warpRouteId, + checkFrequency, + ); + + // Instantiates the strategy that will process monitor events and determine whether a rebalance is needed + const strategy: IStrategy = new Strategy(); + + // Instantiates the executor that will process strategy results and execute the rebalance + const executor: IExecutor = new Executor(); + + // Subscribes the strategy to the monitor + monitor.subscribe((event) => strategy.handleMonitorEvent(event)); + + // Subscribes the executor to the strategy + strategy.subscribe((event) => executor.handleStrategyEvent(event)); + + // Starts the monitor to begin polling balances. + // This will keep running until the process is terminated + await monitor.start(); + + logGreen('Rebalancer started successfully 🚀'); + }, +}; + export const verify: CommandModuleWithWriteContext<{ symbol: string; }> = { diff --git a/typescript/cli/src/rebalancer/executor/Executor.ts b/typescript/cli/src/rebalancer/executor/Executor.ts new file mode 100644 index 00000000000..e221b7da177 --- /dev/null +++ b/typescript/cli/src/rebalancer/executor/Executor.ts @@ -0,0 +1,10 @@ +import { IExecutor } from '../interfaces/IExecutor.js'; +import { StrategyEvent } from '../interfaces/IStrategy.js'; + +export class Executor implements IExecutor { + async handleStrategyEvent(_event: StrategyEvent): Promise { + // TODO: Replace with actual executor logic + // Current implementation is a placeholder used to test something in typescript/cli/src/tests/warp/warp-rebalancer.e2e-test.ts + console.log('Executing strategy event:', _event); + } +} diff --git a/typescript/cli/src/rebalancer/index.ts b/typescript/cli/src/rebalancer/index.ts new file mode 100644 index 00000000000..41255ea1f31 --- /dev/null +++ b/typescript/cli/src/rebalancer/index.ts @@ -0,0 +1,6 @@ +export * from './executor/Executor.js'; +export * from './strategy/Strategy.js'; +export * from './monitor/Monitor.js'; +export * from './interfaces/IExecutor.js'; +export * from './interfaces/IMonitor.js'; +export * from './interfaces/IStrategy.js'; diff --git a/typescript/cli/src/rebalancer/interfaces/IExecutor.ts b/typescript/cli/src/rebalancer/interfaces/IExecutor.ts new file mode 100644 index 00000000000..59c68339904 --- /dev/null +++ b/typescript/cli/src/rebalancer/interfaces/IExecutor.ts @@ -0,0 +1,11 @@ +import { StrategyEvent } from './IStrategy.js'; + +/** + * Interface for the class that will execute rebalancing transactions on-chain. + */ +export interface IExecutor { + /** + * Executes rebalancing based on the data provided by the strategy. + */ + handleStrategyEvent(event: StrategyEvent): Promise; +} diff --git a/typescript/cli/src/rebalancer/interfaces/IMonitor.ts b/typescript/cli/src/rebalancer/interfaces/IMonitor.ts new file mode 100644 index 00000000000..df316b35c4a --- /dev/null +++ b/typescript/cli/src/rebalancer/interfaces/IMonitor.ts @@ -0,0 +1,45 @@ +import { ChainName } from '@hyperlane-xyz/sdk'; + +/** + * Represents an event emitted by the monitor containing balance information + * across different chains and tokens. + * + * TODO: The monitor event could emit the same values as + * typescript/infra/scripts/warp-routes/monitor/monitor-warp-route-balances.ts + * which might be an excess for the rebalancer but might be required for subscribers to track the same metrics. + */ +export type MonitorEvent = { + /** + * Array of objects containing balance information for each token. + */ + balances: { + token: string; + /** + * The address that holds the amount of tokens represented by value. + */ + owner: string; + /** + * The chain the token lives in. + */ + chain: ChainName; + /** + * The amount of tokens held by the owner. + */ + value: bigint; + }[]; +}; + +/** + * Interface for a monitoring service that tracks token information across different chains. + */ +export interface IMonitor { + /** + * Allows subscribers to listen to monitored token data whenever it is emitted. + */ + subscribe(fn: (event: MonitorEvent) => void): void; + + /** + * Starts the monitoring long-running process. + */ + start(): Promise; +} diff --git a/typescript/cli/src/rebalancer/interfaces/IStrategy.ts b/typescript/cli/src/rebalancer/interfaces/IStrategy.ts new file mode 100644 index 00000000000..7d3ae8c311e --- /dev/null +++ b/typescript/cli/src/rebalancer/interfaces/IStrategy.ts @@ -0,0 +1,49 @@ +import { ChainName } from '@hyperlane-xyz/sdk'; + +import { MonitorEvent } from './IMonitor.js'; + +/** + * Represents an event emitted by the strategy containing routing information + * for token rebalancing across different chains. + */ +export type StrategyEvent = { + /** + * Array of objects containing routing information for token transfers. + * It is an array given that rebalancing might require multiple asset movements. + */ + route: { + /** + * The source chain where tokens will be transferred from. + */ + origin: ChainName; + /** + * The target chain where tokens will be transferred to. + */ + destination: ChainName; + /** + * The address of the token to be transferred. + */ + token: string; + /** + * The amount of tokens to be transferred. + */ + amount: bigint; + }[]; +}; + +/** + * Interface for a strategy service that determines optimal token routing + * based on monitored balance information. + */ +export interface IStrategy { + /** + * Allows subscribers to listen to rebalancing requirements whenever they are emitted. + */ + subscribe(fn: (event: StrategyEvent) => void): void; + + /** + * Processes balance information from the monitor and determines if rebalancing is needed. + * Should emit a StrategyEvent containing the rebalancing requirements. + */ + handleMonitorEvent(event: MonitorEvent): Promise; +} diff --git a/typescript/cli/src/rebalancer/monitor/Monitor.ts b/typescript/cli/src/rebalancer/monitor/Monitor.ts new file mode 100644 index 00000000000..803c47f31a3 --- /dev/null +++ b/typescript/cli/src/rebalancer/monitor/Monitor.ts @@ -0,0 +1,76 @@ +import EventEmitter from 'events'; + +import { IRegistry } from '@hyperlane-xyz/registry'; +import { MultiProtocolProvider } from '@hyperlane-xyz/sdk'; +import { WarpCore } from '@hyperlane-xyz/sdk'; +import { objMap, objMerge } from '@hyperlane-xyz/utils'; + +import { IMonitor, MonitorEvent } from '../interfaces/IMonitor.js'; + +/** + * Simple monitor implementation that polls warp route collateral balances and emits them as MonitorEvent. + */ +export class Monitor implements IMonitor { + private readonly MONITOR_EVENT = 'monitor'; + private readonly emitter = new EventEmitter(); + private interval: NodeJS.Timeout | undefined; + + /** + * @param registry - The registry that contains a collection of configs, artifacts, and schemas for Hyperlane. + * @param warpRouteId - The warp route ID to monitor. + * @param checkFrequency - The frequency to poll balances in ms. + */ + constructor( + private readonly registry: IRegistry, + private readonly warpRouteId: string, + private readonly checkFrequency: number, + ) {} + + subscribe(fn: (data: MonitorEvent) => void) { + this.emitter.on(this.MONITOR_EVENT, fn); + } + + async start() { + if (this.interval) { + // Cannot start the same monitor multiple times + throw new Error('Monitor already running'); + } + + // Build the WarpCore from the registry + const metadata = await this.registry.getMetadata(); + const addresses = await this.registry.getAddresses(); + const mailboxes = objMap(addresses, (_, { mailbox }) => ({ mailbox })); + const provider = new MultiProtocolProvider(objMerge(metadata, mailboxes)); + const warpCoreConfig = await this.registry.getWarpRoute(this.warpRouteId); + const warpCore = WarpCore.FromConfig(provider, warpCoreConfig); + + // Start the interval used to poll collateral balances + this.interval = setInterval(async () => { + const event: MonitorEvent = { + balances: [], + }; + + for (const token of warpCore.tokens) { + // Ignore non-collateralized tokens given that we only care about collateral balances + if (!token.isCollateralized()) { + continue; + } + + const adapter = token.getHypAdapter(warpCore.multiProvider); + + // Get the bridged supply of the collateral token to obtain how much collateral is available + const bridgedSupply = await adapter.getBridgedSupply(); + + event.balances.push({ + chain: token.chainName, + owner: token.addressOrDenom, + token: token.collateralAddressOrDenom!, + value: bridgedSupply!, + }); + } + + // Emit the event containing the collateral balances + this.emitter.emit(this.MONITOR_EVENT, event); + }, this.checkFrequency); + } +} diff --git a/typescript/cli/src/rebalancer/strategy/Strategy.ts b/typescript/cli/src/rebalancer/strategy/Strategy.ts new file mode 100644 index 00000000000..41bceece4c9 --- /dev/null +++ b/typescript/cli/src/rebalancer/strategy/Strategy.ts @@ -0,0 +1,31 @@ +import EventEmitter from 'events'; + +import { MonitorEvent } from '../interfaces/IMonitor.js'; +import { IStrategy, StrategyEvent } from '../interfaces/IStrategy.js'; + +/** + * Simple strategy implementation that processes token balances accross chains and emits a StrategyEvent + * containing if and how rebalancing is has to be applied. + */ +export class Strategy implements IStrategy { + private readonly STRATEGY_EVENT = 'strategy'; + private readonly emitter = new EventEmitter(); + + subscribe(fn: (event: StrategyEvent) => void): void { + this.emitter.on(this.STRATEGY_EVENT, fn); + } + + async handleMonitorEvent(event: MonitorEvent): Promise { + // TODO: Implement actual strategy logic + // Current implementation is a placeholder used to test something in typescript/cli/src/tests/warp/warp-rebalancer.e2e-test.ts + const strategyEvent: StrategyEvent = { + route: event.balances.map((b) => ({ + origin: b.chain, + destination: b.chain, + token: b.token, + amount: b.value, + })), + }; + this.emitter.emit(this.STRATEGY_EVENT, strategyEvent); + } +} diff --git a/typescript/cli/src/tests/commands/helpers.ts b/typescript/cli/src/tests/commands/helpers.ts index 3467fe1f651..8e9b3b87a70 100644 --- a/typescript/cli/src/tests/commands/helpers.ts +++ b/typescript/cli/src/tests/commands/helpers.ts @@ -45,6 +45,7 @@ export const E2E_TEST_BURN_ADDRESS = export const CHAIN_NAME_2 = 'anvil2'; export const CHAIN_NAME_3 = 'anvil3'; +export const CHAIN_NAME_4 = 'anvil4'; export const EXAMPLES_PATH = './examples'; export const CORE_CONFIG_PATH = `${EXAMPLES_PATH}/core-config.yaml`; diff --git a/typescript/cli/src/tests/commands/warp.ts b/typescript/cli/src/tests/commands/warp.ts index 278ed6d4344..b431fd89fc2 100644 --- a/typescript/cli/src/tests/commands/warp.ts +++ b/typescript/cli/src/tests/commands/warp.ts @@ -142,7 +142,7 @@ export function hyperlaneWarpSendRelay( destination: string, warpCorePath: string, relay = true, - value = 1, + value: number | string = 1, ): ProcessPromise { return $`yarn workspace @hyperlane-xyz/cli run hyperlane warp send \ ${relay ? '--relay' : ''} \ @@ -156,6 +156,16 @@ export function hyperlaneWarpSendRelay( --amount ${value}`; } +export function hyperlaneWarpRebalancer( + warpRouteId: string, + checkFrequency: number, +): ProcessPromise { + return $`yarn workspace @hyperlane-xyz/cli run hyperlane warp rebalancer \ + --registry ${REGISTRY_PATH} \ + --warpRouteId ${warpRouteId} \ + --checkFrequency ${checkFrequency}`; +} + /** * Reads the Warp route deployment config to specified output path. * @param warpCorePath path to warp core diff --git a/typescript/cli/src/tests/warp/warp-rebalancer.e2e-test.ts b/typescript/cli/src/tests/warp/warp-rebalancer.e2e-test.ts new file mode 100644 index 00000000000..ae26adfea4f --- /dev/null +++ b/typescript/cli/src/tests/warp/warp-rebalancer.e2e-test.ts @@ -0,0 +1,130 @@ +import { Wallet } from 'ethers'; + +import { createWarpRouteConfigId } from '@hyperlane-xyz/registry'; +import { TokenType, WarpRouteDeployConfig } from '@hyperlane-xyz/sdk'; +import { toWei } from '@hyperlane-xyz/utils'; + +import { writeYamlOrJson } from '../../utils/files.js'; +import { + ANVIL_KEY, + CHAIN_NAME_2, + CHAIN_NAME_3, + CHAIN_NAME_4, + CORE_CONFIG_PATH, + DEFAULT_E2E_TEST_TIMEOUT, + deployOrUseExistingCore, + deployToken, + getCombinedWarpRoutePath, +} from '../commands/helpers.js'; +import { + hyperlaneWarpDeploy, + hyperlaneWarpRebalancer, + hyperlaneWarpSendRelay, +} from '../commands/warp.js'; + +describe('hyperlane warp rebalancer e2e tests', async function () { + this.timeout(2 * DEFAULT_E2E_TEST_TIMEOUT); + + describe('hyperlane warp rebalancer', () => { + it('should successfully start and stop the warp rebalancer', async function () { + // Deploy core contracts on all chains + const chain2Addresses = await deployOrUseExistingCore( + CHAIN_NAME_2, + CORE_CONFIG_PATH, + ANVIL_KEY, + ); + const chain3Addresses = await deployOrUseExistingCore( + CHAIN_NAME_3, + CORE_CONFIG_PATH, + ANVIL_KEY, + ); + const chain4Addresses = await deployOrUseExistingCore( + CHAIN_NAME_4, + CORE_CONFIG_PATH, + ANVIL_KEY, + ); + + // Deploy ERC20s + const tokenChain2 = await deployToken(ANVIL_KEY, CHAIN_NAME_2); + const tokenChain3 = await deployToken(ANVIL_KEY, CHAIN_NAME_3); + const tokenSymbol = await tokenChain2.symbol(); + + // Deploy Warp Route + const warpDeploymentPath = getCombinedWarpRoutePath(tokenSymbol, [ + CHAIN_NAME_2, + CHAIN_NAME_3, + CHAIN_NAME_4, + ]); + const ownerAddress = new Wallet(ANVIL_KEY).address; + const warpConfig: WarpRouteDeployConfig = { + [CHAIN_NAME_2]: { + type: TokenType.collateral, + token: tokenChain2.address, + mailbox: chain2Addresses.mailbox, + owner: ownerAddress, + }, + [CHAIN_NAME_3]: { + type: TokenType.collateral, + token: tokenChain3.address, + mailbox: chain3Addresses.mailbox, + owner: ownerAddress, + }, + [CHAIN_NAME_4]: { + type: TokenType.synthetic, + mailbox: chain4Addresses.mailbox, + owner: ownerAddress, + }, + }; + writeYamlOrJson(warpDeploymentPath, warpConfig); + await hyperlaneWarpDeploy(warpDeploymentPath); + + // Bridge tokens from the collateral chains to the synthetic + await hyperlaneWarpSendRelay( + CHAIN_NAME_2, + CHAIN_NAME_4, + warpDeploymentPath, + true, + toWei(49), + ); + await hyperlaneWarpSendRelay( + CHAIN_NAME_3, + CHAIN_NAME_4, + warpDeploymentPath, + true, + toWei(51), + ); + + // Start the rebalancer + const warpRouteId = createWarpRouteConfigId(tokenSymbol.toUpperCase(), [ + CHAIN_NAME_2, + CHAIN_NAME_3, + CHAIN_NAME_4, + ]); + const process = hyperlaneWarpRebalancer(warpRouteId, 1000); + + // Verify that it logs an expected output + for await (const chunk of process.stdout) { + if ( + chunk.includes(`Executing strategy event: { + route: [ + { + origin: 'anvil2', + destination: 'anvil2', + token: '0x59b670e9fA9D0A427751Af201D676719a970857b', + amount: 49000000000000000000n + }, + { + origin: 'anvil3', + destination: 'anvil3', + token: '0x59b670e9fA9D0A427751Af201D676719a970857b', + amount: 51000000000000000000n + } + ]`) + ) { + process.kill(); + break; + } + } + }); + }); +}); diff --git a/typescript/cli/test-configs/anvil/chains/anvil4/metadata.yaml b/typescript/cli/test-configs/anvil/chains/anvil4/metadata.yaml new file mode 100644 index 00000000000..c80eb57afc9 --- /dev/null +++ b/typescript/cli/test-configs/anvil/chains/anvil4/metadata.yaml @@ -0,0 +1,22 @@ +# Configs for describing chain metadata for use in Hyperlane deployments or apps +# Consists of a map of chain names to metadata +# Schema here: https://github.com/hyperlane-xyz/hyperlane-monorepo/blob/main/typescript/sdk/src/metadata/chainMetadataTypes.ts +--- +chainId: 31348 +domainId: 31348 +name: anvil4 +protocol: ethereum +rpcUrls: + - http: http://127.0.0.1:8601 +blockExplorers: # Array: List of BlockExplorer configs + # Required fields: + - name: My Chain Explorer # String: Human-readable name for the explorer + url: https://mychain.com/explorer # String: Base URL for the explorer + apiUrl: https://mychain.com/api # String: Base URL for the explorer API + # Optional fields: + apiKey: myapikey # String: API key for the explorer (optional) + family: etherscan # ExplorerFamily: See ExplorerFamily for valid values +nativeToken: + name: Ether + symbol: ETH + decimals: 18 diff --git a/typescript/cosmos-sdk/.mocharc-e2e.json b/typescript/cosmos-sdk/.mocharc-e2e.json new file mode 100644 index 00000000000..44c560068b3 --- /dev/null +++ b/typescript/cosmos-sdk/.mocharc-e2e.json @@ -0,0 +1,8 @@ +{ + "extensions": ["ts"], + "spec": ["src/tests/index.e2e-test.ts"], + "node-option": [ + "experimental-specifier-resolution=node", + "loader=ts-node/esm" + ] +} diff --git a/typescript/cosmos-sdk/Dockerfile b/typescript/cosmos-sdk/Dockerfile new file mode 100644 index 00000000000..9c880958796 --- /dev/null +++ b/typescript/cosmos-sdk/Dockerfile @@ -0,0 +1,27 @@ +FROM golang:1.22 + +WORKDIR /app + +# install latest updates and clean up +RUN apt update && apt upgrade -y \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Define a build argument for the branch name +ARG BRANCH_NAME=v1.0.0-beta0 + +# install hypd from the specified branch +RUN git clone --depth 1 --branch $BRANCH_NAME https://github.com/bcp-innovations/hyperlane-cosmos.git \ + && cd hyperlane-cosmos \ + && make build-simapp \ + && mv build/hypd /app \ + && /app/hypd init-sample-chain \ + && cd .. \ + && rm -rf hyperlane-cosmos + +# rpc +EXPOSE 26657 +# api +EXPOSE 1317 + +CMD ["/app/hypd", "start"] diff --git a/typescript/cosmos-sdk/README.md b/typescript/cosmos-sdk/README.md index 55ca1ab0d5a..35f6fd8197e 100644 --- a/typescript/cosmos-sdk/README.md +++ b/typescript/cosmos-sdk/README.md @@ -63,6 +63,12 @@ await signer.signAndBroadcast(signer.getAccounts()[0], [txs...]); Node 18 or newer is required. +## Testing + +We have a `cosmos-sdk-e2e` job in CI that first runs a local node and then runs a suite of end-to-end tests. The `hyperlane-cosmos-simapp` image is created ad-hoc by the `hypd-docker` workflow, intended to be triggered manually by a developer when a new hyperlane-cosmos release is made. + +> Note: When updating the `cosmos-sdk` and `cosmos-types` package to a new `hyperlane-cosmos` version, it's important to release a new `hyperlane-cosmos-simapp` image and update the tag used in the `cosmos-sdk-e2e` job. This ensures that the end-to-end tests run against the correct version of the `hyperlane-cosmos` module. + ## Contribute First you need to install the dependencies by running `yarn install`. diff --git a/typescript/cosmos-sdk/compose.yaml b/typescript/cosmos-sdk/compose.yaml new file mode 100644 index 00000000000..98efb10438c --- /dev/null +++ b/typescript/cosmos-sdk/compose.yaml @@ -0,0 +1,6 @@ +services: + hyperlane-cosmos-simapp: + image: gcr.io/abacus-labs-dev/hyperlane-cosmos-simapp:v1.0.0-beta0 + ports: + - 26657:26657 + - 1317:1317 diff --git a/typescript/cosmos-sdk/eslint.config.mjs b/typescript/cosmos-sdk/eslint.config.mjs index 5be809affb9..18ef8a9fa54 100644 --- a/typescript/cosmos-sdk/eslint.config.mjs +++ b/typescript/cosmos-sdk/eslint.config.mjs @@ -5,4 +5,7 @@ export default [ { files: ['src/**/*.ts'], }, + { + ignores: ['src/tests/**/*.ts'], + }, ]; diff --git a/typescript/cosmos-sdk/package.json b/typescript/cosmos-sdk/package.json index d3585522ad2..74993fd98c0 100644 --- a/typescript/cosmos-sdk/package.json +++ b/typescript/cosmos-sdk/package.json @@ -26,16 +26,20 @@ "prettier": "prettier --write ./src", "clean": "rm -rf ./dist ./cache", "test": "echo \"no tests in cosmos-sdk\"", - "test:ci": "echo \"no tests in cosmos-sdk\"" + "test:ci": "echo \"no tests in cosmos-sdk\"", + "test:e2e": "./scripts/run-e2e-test.sh" }, "devDependencies": { "@eslint/js": "^9.15.0", + "@types/mocha": "^10.0.1", "@typescript-eslint/eslint-plugin": "^8.1.6", "@typescript-eslint/parser": "^8.1.6", "eslint": "^9.15.0", "eslint-config-prettier": "^9.1.0", "eslint-import-resolver-typescript": "^3.6.3", "eslint-plugin-import": "^2.31.0", + "mocha": "^10.2.0", + "mocha-steps": "^1.3.0", "prettier": "^2.8.8", "typescript": "5.3.3", "typescript-eslint": "^8.23.0" diff --git a/typescript/cosmos-sdk/scripts/run-e2e-test.sh b/typescript/cosmos-sdk/scripts/run-e2e-test.sh new file mode 100755 index 00000000000..962caa02589 --- /dev/null +++ b/typescript/cosmos-sdk/scripts/run-e2e-test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +function cleanup() { + docker compose down +} + +cleanup + +echo "Preparing E2E tests" +docker compose up --detach --wait + +if [[ $? -ne 0 ]]; then + echo "Failure starting local cosmos chain" + exit 1 +fi + +echo "Running E2E tests" +yarn mocha --config .mocharc-e2e.json + +cleanup + +echo "Completed E2E tests" diff --git a/typescript/cosmos-sdk/src/tests/1_interchain_security.e2e-test.ts b/typescript/cosmos-sdk/src/tests/1_interchain_security.e2e-test.ts new file mode 100644 index 00000000000..576486b57c8 --- /dev/null +++ b/typescript/cosmos-sdk/src/tests/1_interchain_security.e2e-test.ts @@ -0,0 +1,166 @@ +import { expect } from 'chai'; +import { step } from 'mocha-steps'; + +import { + MerkleRootMultisigISM, + MessageIdMultisigISM, +} from '../../../cosmos-types/dist/types/hyperlane/core/interchain_security/v1/types.js'; +import { + bytes32ToAddress, + isValidAddressEvm, +} from '../../../utils/dist/addresses.js'; +import { SigningHyperlaneModuleClient } from '../index.js'; + +import { createSigner } from './utils.js'; + +describe('1. cosmos sdk interchain security e2e tests', async function () { + this.timeout(100_000); + + let signer: SigningHyperlaneModuleClient; + + before(async () => { + signer = await createSigner('alice'); + }); + + step('create new NOOP ISM', async () => { + // ARRANGE + let isms = await signer.query.interchainSecurity.Isms({}); + expect(isms.isms).to.be.empty; + + // ACT + const txResponse = await signer.createNoopIsm({}); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const noopIsm = txResponse.response; + + expect(noopIsm.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(noopIsm.id))).to.be.true; + + isms = await signer.query.interchainSecurity.Isms({}); + expect(isms.isms).to.have.lengthOf(1); + + let ism = await signer.query.interchainSecurity.Ism({ + id: noopIsm.id, + }); + expect(ism.ism?.type_url).to.equal( + '/hyperlane.core.interchain_security.v1.NoopISM', + ); + + let decodedIsm = await signer.query.interchainSecurity.DecodedIsm({ + id: noopIsm.id, + }); + expect(decodedIsm.ism.id).to.equal(noopIsm.id); + expect(decodedIsm.ism.owner).to.equal(signer.account.address); + }); + + step('create new MessageIdMultisig ISM', async () => { + // ARRANGE + let isms = await signer.query.interchainSecurity.Isms({}); + expect(isms.isms).to.have.lengthOf(1); + + const threshold = 2; + const validators = [ + '0x3C24F29fa75869A1C9D19d9d6589Aae0B5227c3c', + '0xf719b4CC64d0E3a380e52c2720Abab13835F6d9c', + '0x98A56EdE1d6Dd386216DA8217D9ac1d2EE7c27c7', + ]; + + // note that the validators need to be sorted alphabetically + validators.sort(); + + // ACT + const txResponse = await signer.createMessageIdMultisigIsm({ + validators, + threshold, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const messageIdIsm = txResponse.response; + + expect(messageIdIsm.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(messageIdIsm.id))).to.be.true; + + isms = await signer.query.interchainSecurity.Isms({}); + expect(isms.isms).to.have.lengthOf(2); + + let ism = await signer.query.interchainSecurity.Ism({ + id: messageIdIsm.id, + }); + expect(ism.ism?.type_url).to.equal( + '/hyperlane.core.interchain_security.v1.MessageIdMultisigISM', + ); + + let decodedIsm = await signer.query.interchainSecurity.DecodedIsm({ + id: messageIdIsm.id, + }); + + expect(decodedIsm.ism.id).to.equal(messageIdIsm.id); + expect(decodedIsm.ism.owner).to.equal(signer.account.address); + + expect((decodedIsm.ism as MessageIdMultisigISM).threshold).to.equal( + threshold, + ); + expect((decodedIsm.ism as MessageIdMultisigISM).validators).deep.equal( + validators, + ); + }); + + step('create new MerkleRootMultisig ISM', async () => { + // ARRANGE + let isms = await signer.query.interchainSecurity.Isms({}); + expect(isms.isms).to.have.lengthOf(2); + + const threshold = 3; + const validators = [ + '0x0264258613775932aA466Be8BcC62418a9558eaB', + '0x829d3Cc78Fd664Bf160A17DaEad4df943ff7bAf0', + '0x3177Cc7328dE71Da934b1b7BF04b55C7D7251A63', + '0x270dC7A054a2aeda93Ee38a1b3C0727f5d8252d3', + ]; + + // note that the validators need to be sorted alphabetically + validators.sort(); + + // ACT + const txResponse = await signer.createMerkleRootMultisigIsm({ + validators, + threshold, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const merkleRootIsm = txResponse.response; + + expect(merkleRootIsm.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(merkleRootIsm.id))).to.be.true; + + isms = await signer.query.interchainSecurity.Isms({}); + expect(isms.isms).to.have.lengthOf(3); + + let ism = await signer.query.interchainSecurity.Ism({ + id: merkleRootIsm.id, + }); + expect(ism.ism?.type_url).to.equal( + '/hyperlane.core.interchain_security.v1.MerkleRootMultisigISM', + ); + + let decodedIsm = await signer.query.interchainSecurity.DecodedIsm({ + id: merkleRootIsm.id, + }); + + expect(decodedIsm.ism.id).to.equal(merkleRootIsm.id); + expect(decodedIsm.ism.owner).to.equal(signer.account.address); + + expect((decodedIsm.ism as MerkleRootMultisigISM).threshold).to.equal( + threshold, + ); + expect((decodedIsm.ism as MerkleRootMultisigISM).validators).deep.equal( + validators, + ); + }); +}); diff --git a/typescript/cosmos-sdk/src/tests/2_core.e2e-test.ts b/typescript/cosmos-sdk/src/tests/2_core.e2e-test.ts new file mode 100644 index 00000000000..5479d96edf4 --- /dev/null +++ b/typescript/cosmos-sdk/src/tests/2_core.e2e-test.ts @@ -0,0 +1,159 @@ +import { expect } from 'chai'; +import { step } from 'mocha-steps'; + +import { + bytes32ToAddress, + isValidAddressEvm, +} from '../../../utils/dist/addresses.js'; +import { createAnnounce } from '../../../utils/src/validator.js'; +import { SigningHyperlaneModuleClient } from '../index.js'; + +import { createSigner } from './utils.js'; + +describe('2. cosmos sdk core e2e tests', async function () { + this.timeout(100_000); + + let signer: SigningHyperlaneModuleClient; + + before(async () => { + signer = await createSigner('alice'); + }); + + step('create new mailbox', async () => { + // ARRANGE + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(0); + + const { isms } = await signer.query.interchainSecurity.DecodedIsms({}); + // take the Noop ISM + const ismId = isms[0].id; + + const domainId = 1234; + + // ACT + const txResponse = await signer.createMailbox({ + local_domain: domainId, + default_ism: ismId, + default_hook: '', + required_hook: '', + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const mailbox = txResponse.response; + + expect(mailbox.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(mailbox.id))).to.be.true; + + mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(1); + + let mailboxQuery = await signer.query.core.Mailbox({ + id: mailbox.id, + }); + + expect(mailboxQuery.mailbox).not.to.be.undefined; + expect(mailboxQuery.mailbox?.id).to.equal(mailbox.id); + expect(mailboxQuery.mailbox?.owner).to.equal(signer.account.address); + expect(mailboxQuery.mailbox?.local_domain).to.equal(domainId); + expect(mailboxQuery.mailbox?.default_ism).to.equal(ismId); + expect(mailboxQuery.mailbox?.default_hook).to.be.empty; + expect(mailboxQuery.mailbox?.required_hook).to.be.empty; + }); + + step('set mailbox', async () => { + // ARRANGE + const newOwner = (await createSigner('bob')).account.address; + + const domainId = 1234; + + const { isms } = await signer.query.interchainSecurity.DecodedIsms({}); + // this should be a noop ISM + const ismId = isms[0].id; + + const createMailboxTxResponse = await signer.createMailbox({ + local_domain: domainId, + default_ism: ismId, + default_hook: '', + required_hook: '', + }); + expect(createMailboxTxResponse.code).to.equal(0); + + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailboxBefore = mailboxes.mailboxes[mailboxes.mailboxes.length - 1]; + expect(mailboxBefore.owner).to.equal(signer.account.address); + + // ACT + const txResponse = await signer.setMailbox({ + mailbox_id: mailboxBefore.id, + default_ism: '', + default_hook: '', + required_hook: '', + new_owner: newOwner, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailboxAfter = mailboxes.mailboxes[mailboxes.mailboxes.length - 1]; + + expect(mailboxAfter.id).to.equal(mailboxBefore.id); + expect(mailboxAfter.owner).to.equal(newOwner); + expect(mailboxAfter.local_domain).to.equal(mailboxBefore.local_domain); + expect(mailboxAfter.default_ism).to.equal(mailboxBefore.default_ism); + expect(mailboxAfter.default_hook).to.equal(mailboxBefore.default_hook); + expect(mailboxAfter.required_hook).to.equal(mailboxBefore.required_hook); + }); + + step('announce validator', async () => { + // ARRANGE + const validatorAddress = '0x0b1caf89d1edb9ee161093b1ec94ca75611db492'; + const validatorPrivKey = + '38430941d3ea0e70f9a16192a833dbbf3541b3170781042067173bfe6cba4508'; + const storageLocation = 'aws://key.pub'; + + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailbox = mailboxes.mailboxes[0]; + + const signature = await createAnnounce( + validatorPrivKey, + storageLocation, + mailbox.id, + mailbox.local_domain, + ); + + // ACT + const txResponse = await signer.announceValidator({ + validator: validatorAddress, + storage_location: storageLocation, + signature, + mailbox_id: mailbox.id, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + let storageLocations = + await signer.query.interchainSecurity.AnnouncedStorageLocations({ + mailbox_id: mailbox.id, + validator_address: validatorAddress, + }); + expect(storageLocations.storage_locations).to.have.lengthOf(1); + expect(storageLocations.storage_locations[0]).to.equal(storageLocation); + + let latestStorageLocation = + await signer.query.interchainSecurity.LatestAnnouncedStorageLocation({ + mailbox_id: mailbox.id, + validator_address: validatorAddress, + }); + expect(latestStorageLocation.storage_location).to.equal(storageLocation); + }); +}); diff --git a/typescript/cosmos-sdk/src/tests/3_post_dispatch.e2e-test.ts b/typescript/cosmos-sdk/src/tests/3_post_dispatch.e2e-test.ts new file mode 100644 index 00000000000..f01365799f4 --- /dev/null +++ b/typescript/cosmos-sdk/src/tests/3_post_dispatch.e2e-test.ts @@ -0,0 +1,282 @@ +import { expect } from 'chai'; +import { step } from 'mocha-steps'; + +import { + bytes32ToAddress, + isValidAddressEvm, +} from '../../../utils/dist/addresses.js'; +import { formatMessage, messageId } from '../../../utils/src/messages.js'; +import { SigningHyperlaneModuleClient } from '../index.js'; + +import { createSigner } from './utils.js'; + +describe('3. cosmos sdk post dispatch e2e tests', async function () { + this.timeout(100_000); + + let signer: SigningHyperlaneModuleClient; + + before(async () => { + signer = await createSigner('alice'); + }); + + step('create new IGP hook', async () => { + // ARRANGE + let igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(0); + + const denom = 'uhyp'; + + // ACT + const txResponse = await signer.createIgp({ + denom, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const igp = txResponse.response; + + expect(igp.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(igp.id))).to.be.true; + + igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(1); + + let igpQuery = await signer.query.postDispatch.Igp({ + id: igp.id, + }); + + expect(igpQuery.igp).not.to.be.undefined; + expect(igpQuery.igp?.owner).to.equal(signer.account.address); + expect(igpQuery.igp?.denom).to.equal(denom); + }); + + step('create new Merkle Tree hook', async () => { + // ARRANGE + let merkleTrees = await signer.query.postDispatch.MerkleTreeHooks({}); + expect(merkleTrees.merkle_tree_hooks).to.have.lengthOf(0); + + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailbox = mailboxes.mailboxes[0]; + + // ACT + const txResponse = await signer.createMerkleTreeHook({ + mailbox_id: mailbox.id, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const merleTree = txResponse.response; + + expect(merleTree.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(merleTree.id))).to.be.true; + + merkleTrees = await signer.query.postDispatch.MerkleTreeHooks({}); + expect(merkleTrees.merkle_tree_hooks).to.have.lengthOf(1); + + let merkleTreeQuery = await signer.query.postDispatch.MerkleTreeHook({ + id: merleTree.id, + }); + + expect(merkleTreeQuery.merkle_tree_hook).not.to.be.undefined; + expect(merkleTreeQuery.merkle_tree_hook?.owner).to.equal( + signer.account.address, + ); + expect(merkleTreeQuery.merkle_tree_hook?.mailbox_id).to.equal(mailbox.id); + }); + + step('create new Noop hook', async () => { + // ARRANGE + let noopHooks = await signer.query.postDispatch.NoopHooks({}); + expect(noopHooks.noop_hooks).to.have.lengthOf(0); + + // ACT + const txResponse = await signer.createNoopHook({}); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const noopHook = txResponse.response; + + expect(noopHook.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(noopHook.id))).to.be.true; + + noopHooks = await signer.query.postDispatch.NoopHooks({}); + expect(noopHooks.noop_hooks).to.have.lengthOf(1); + + let noopHookQuery = await signer.query.postDispatch.NoopHook({ + id: noopHook.id, + }); + + expect(noopHookQuery.noop_hook).not.to.be.undefined; + expect(noopHookQuery.noop_hook?.owner).to.equal(signer.account.address); + }); + + step('set destination gas config', async () => { + // ARRANGE + let igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(1); + + const igp = igps.igps[0]; + const remoteDomainId = 1234; + const gasOverhead = '200000'; + const gasPrice = '1'; + const tokenExchangeRate = '10000000000'; + + let gasConfigs = await signer.query.postDispatch.DestinationGasConfigs({ + id: igp.id, + }); + expect(gasConfigs.destination_gas_configs).to.have.lengthOf(0); + + // ACT + const txResponse = await signer.setDestinationGasConfig({ + igp_id: igp.id, + destination_gas_config: { + remote_domain: remoteDomainId, + gas_oracle: { + token_exchange_rate: tokenExchangeRate, + gas_price: gasPrice, + }, + gas_overhead: gasOverhead, + }, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + gasConfigs = await signer.query.postDispatch.DestinationGasConfigs({ + id: igp.id, + }); + expect(gasConfigs.destination_gas_configs).to.have.lengthOf(1); + + const gasConfig = gasConfigs.destination_gas_configs[0]; + + expect(gasConfig.remote_domain).to.equal(remoteDomainId); + expect(gasConfig.gas_overhead).to.equal(gasOverhead); + expect(gasConfig.gas_oracle?.gas_price).to.equal(gasPrice); + expect(gasConfig.gas_oracle?.token_exchange_rate).to.equal( + tokenExchangeRate, + ); + }); + + step('pay for gas', async () => { + // ARRANGE + const address = '0xA56009c72c0191a1D56e2feA5Bd8250707FF1874'; + const destinationDomainId = 1234; + const denom = 'uhyp'; + const amount = { + denom, + amount: '1000000', + }; + + const igpCreateTxResponse = await signer.createIgp({ + denom, + }); + expect(igpCreateTxResponse.code).to.equal(0); + + let igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(2); + + const igpBefore = igps.igps[igps.igps.length - 1]; + expect(igpBefore.claimable_fees).to.be.empty; + + const testMessageId = messageId( + formatMessage( + 1, + 0, + destinationDomainId, + address, + destinationDomainId, + address, + '0x1234', + ), + ); + + // ACT + const txResponse = await signer.payForGas({ + igp_id: igpBefore.id, + message_id: testMessageId, + destination_domain: destinationDomainId, + gas_limit: '10000', + amount, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(2); + + const igpAfter = igps.igps[igps.igps.length - 1]; + + expect(igpAfter.id).to.equal(igpBefore.id); + expect(igpAfter.denom).to.equal(igpBefore.denom); + expect(igpAfter.claimable_fees).to.have.lengthOf(1); + expect(igpAfter.claimable_fees[0]).deep.equal(amount); + }); + + step('claim', async () => { + // ARRANGE + const denom = 'uhyp'; + const amount = { + denom, + amount: '1000000', + }; + + let igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(2); + + const igpBefore = igps.igps[igps.igps.length - 1]; + expect(igpBefore.claimable_fees).to.have.lengthOf(1); + expect(igpBefore.claimable_fees[0]).deep.equal(amount); + + // ACT + const txResponse = await signer.claim({ + igp_id: igpBefore.id, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(2); + + const igpAfter = igps.igps[igps.igps.length - 1]; + + expect(igpAfter.id).to.equal(igpBefore.id); + expect(igpAfter.denom).to.equal(igpBefore.denom); + expect(igpAfter.claimable_fees).to.be.empty; + }); + + step('set igp owner', async () => { + // ARRANGE + const newOwner = (await createSigner('bob')).account.address; + + let igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(2); + + const igpBefore = igps.igps[igps.igps.length - 1]; + expect(igpBefore.owner).to.equal(signer.account.address); + + // ACT + const txResponse = await signer.setIgpOwner({ + igp_id: igpBefore.id, + new_owner: newOwner, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + igps = await signer.query.postDispatch.Igps({}); + expect(igps.igps).to.have.lengthOf(2); + + const igpAfter = igps.igps[igps.igps.length - 1]; + + expect(igpAfter.id).to.equal(igpBefore.id); + expect(igpAfter.owner).to.equal(newOwner); + expect(igpAfter.denom).to.equal(igpBefore.denom); + }); +}); diff --git a/typescript/cosmos-sdk/src/tests/4_warp.e2e-test.ts b/typescript/cosmos-sdk/src/tests/4_warp.e2e-test.ts new file mode 100644 index 00000000000..aba04fa4579 --- /dev/null +++ b/typescript/cosmos-sdk/src/tests/4_warp.e2e-test.ts @@ -0,0 +1,331 @@ +import { expect } from 'chai'; +import { step } from 'mocha-steps'; + +import { HypTokenType } from '../../../cosmos-types/src/types/hyperlane/warp/v1/types.js'; +import { + addressToBytes32, + bytes32ToAddress, + convertToProtocolAddress, + isValidAddressEvm, +} from '../../../utils/src/addresses.js'; +import { formatMessage } from '../../../utils/src/messages.js'; +import { ProtocolType } from '../../../utils/src/types.js'; +import { SigningHyperlaneModuleClient } from '../index.js'; + +import { createSigner } from './utils.js'; + +describe('4. cosmos sdk warp e2e tests', async function () { + this.timeout(100_000); + + let signer: SigningHyperlaneModuleClient; + + before(async () => { + signer = await createSigner('alice'); + }); + + step('create new collateral token', async () => { + // ARRANGE + let tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(0); + + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailbox = mailboxes.mailboxes[0]; + const denom = 'uhyp'; + + // ACT + const txResponse = await signer.createCollateralToken({ + origin_mailbox: mailbox.id, + origin_denom: denom, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const token = txResponse.response; + + expect(token.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(token.id))).to.be.true; + + tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(1); + + let tokenQuery = await signer.query.warp.Token({ + id: token.id, + }); + + expect(tokenQuery.token).not.to.be.undefined; + expect(tokenQuery.token?.owner).to.equal(signer.account.address); + expect(tokenQuery.token?.origin_mailbox).to.equal(mailbox.id); + expect(tokenQuery.token?.origin_denom).to.equal(denom); + expect(tokenQuery.token?.ism_id).to.be.empty; + expect(tokenQuery.token?.token_type).to.equal( + HypTokenType.HYP_TOKEN_TYPE_COLLATERAL, + ); + }); + + step('create new synthetic token', async () => { + // ARRANGE + let tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(1); + + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailbox = mailboxes.mailboxes[0]; + + // ACT + const txResponse = await signer.createSyntheticToken({ + origin_mailbox: mailbox.id, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const token = txResponse.response; + + expect(token.id).to.be.not.empty; + expect(isValidAddressEvm(bytes32ToAddress(token.id))).to.be.true; + + tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(2); + + let tokenQuery = await signer.query.warp.Token({ + id: token.id, + }); + + expect(tokenQuery.token).not.to.be.undefined; + expect(tokenQuery.token?.owner).to.equal(signer.account.address); + expect(tokenQuery.token?.origin_mailbox).to.equal(mailbox.id); + expect(tokenQuery.token?.origin_denom).to.equal(`hyperlane/${token.id}`); + expect(tokenQuery.token?.ism_id).to.be.empty; + expect(tokenQuery.token?.token_type).to.equal( + HypTokenType.HYP_TOKEN_TYPE_SYNTHETIC, + ); + }); + + step('enroll remote router', async () => { + // ARRANGE + let tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(2); + + const token = tokens.tokens[0]; + + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailbox = mailboxes.mailboxes[0]; + + let remoteRouters = await signer.query.warp.RemoteRouters({ + id: token.id, + }); + expect(remoteRouters.remote_routers).to.have.lengthOf(0); + const gas = '10000'; + + // ACT + const txResponse = await signer.enrollRemoteRouter({ + token_id: token.id, + remote_router: { + receiver_domain: mailbox.local_domain, + receiver_contract: mailbox.id, + gas, + }, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + remoteRouters = await signer.query.warp.RemoteRouters({ + id: token.id, + }); + expect(remoteRouters.remote_routers).to.have.lengthOf(1); + + const remoteRouter = remoteRouters.remote_routers[0]; + + expect(remoteRouter.receiver_domain).to.equal(mailbox.local_domain); + expect(remoteRouter.receiver_contract).to.equal(mailbox.id); + expect(remoteRouter.gas).to.equal(gas); + }); + + step('remote transfer', async () => { + // ARRANGE + let tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(2); + + const token = tokens.tokens[0]; + + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + let mailbox = mailboxes.mailboxes[0]; + expect(mailbox.message_sent).to.equal(0); + + const isms = await signer.query.interchainSecurity.DecodedIsms({}); + const igps = await signer.query.postDispatch.Igps({}); + const merkleTreeHooks = await signer.query.postDispatch.MerkleTreeHooks({}); + + const mailboxTxResponse = await signer.setMailbox({ + mailbox_id: mailbox.id, + default_ism: isms.isms[0].id, + default_hook: igps.igps[0].id, + required_hook: merkleTreeHooks.merkle_tree_hooks[0].id, + new_owner: '', + }); + expect(mailboxTxResponse.code).to.equal(0); + + let remoteRouters = await signer.query.warp.RemoteRouters({ + id: token.id, + }); + expect(remoteRouters.remote_routers).to.have.lengthOf(1); + + const remoteRouter = remoteRouters.remote_routers[0]; + + const interchainGas = await signer.query.warp.QuoteRemoteTransfer({ + id: token.id, + destination_domain: remoteRouter.receiver_domain.toString(), + }); + + // ACT + const txResponse = await signer.remoteTransfer({ + token_id: token.id, + destination_domain: remoteRouter.receiver_domain, + recipient: addressToBytes32( + convertToProtocolAddress(signer.account.address, ProtocolType.Ethereum), + ProtocolType.Ethereum, + ), + amount: '1000000', + custom_hook_id: '', + gas_limit: remoteRouter.gas, + max_fee: interchainGas.gas_payment[0], + custom_hook_metadata: '', + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + const messageId = txResponse.response.message_id; + expect(isValidAddressEvm(bytes32ToAddress(messageId))).to.be.true; + + mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + mailbox = mailboxes.mailboxes[0]; + expect(mailbox.message_sent).to.equal(1); + }); + + step('process message', async () => { + // ARRANGE + const domainId = 1234; + const gas = '10000'; + + let mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailboxBefore = mailboxes.mailboxes[0]; + expect(mailboxBefore.message_received).to.equal(0); + + let tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(2); + + const token = tokens.tokens[1]; + + const routerTxResponse = await signer.enrollRemoteRouter({ + token_id: token.id, + remote_router: { + receiver_domain: mailboxBefore.local_domain, + receiver_contract: mailboxBefore.id, + gas, + }, + }); + + expect(routerTxResponse.code).to.equal(0); + + const message = formatMessage( + 3, + 0, + domainId, + mailboxBefore.id, + mailboxBefore.local_domain, + token.id, + '0x0000000000000000000000000c60e7ecd06429052223c78452f791aab5c5cac60000000000000000000000000000000000000000000000000000000002faf080', + ); + + // ACT + const txResponse = await signer.processMessage({ + mailbox_id: mailboxBefore.id, + metadata: '', + message, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + mailboxes = await signer.query.core.Mailboxes({}); + expect(mailboxes.mailboxes).to.have.lengthOf(2); + + const mailboxAfter = mailboxes.mailboxes[0]; + expect(mailboxAfter.message_received).to.equal(1); + }); + + step('unroll remote router', async () => { + // ARRANGE + let tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(2); + + const token = tokens.tokens[0]; + + let remoteRouters = await signer.query.warp.RemoteRouters({ + id: token.id, + }); + expect(remoteRouters.remote_routers).to.have.lengthOf(1); + + const receiverDomainId = 1234; + + // ACT + const txResponse = await signer.unrollRemoteRouter({ + token_id: token.id, + receiver_domain: receiverDomainId, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + remoteRouters = await signer.query.warp.RemoteRouters({ + id: token.id, + }); + expect(remoteRouters.remote_routers).to.have.lengthOf(0); + }); + + step('set token', async () => { + // ARRANGE + const newOwner = (await createSigner('bob')).account.address; + + let tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(2); + + const tokenBefore = tokens.tokens[tokens.tokens.length - 1]; + + // ACT + const txResponse = await signer.setToken({ + token_id: tokenBefore.id, + ism_id: '', + new_owner: newOwner, + }); + + // ASSERT + expect(txResponse.code).to.equal(0); + + tokens = await signer.query.warp.Tokens({}); + expect(tokens.tokens).to.have.lengthOf(2); + + const tokenAfter = tokens.tokens[tokens.tokens.length - 1]; + + expect(tokenAfter.id).to.equal(tokenBefore.id); + expect(tokenAfter.owner).to.equal(newOwner); + expect(tokenAfter.origin_mailbox).to.equal(tokenBefore.origin_mailbox); + expect(tokenAfter.origin_denom).to.equal(tokenBefore.origin_denom); + expect(tokenAfter.ism_id).to.equal(tokenBefore.ism_id); + expect(tokenAfter.token_type).to.equal(tokenBefore.token_type); + }); +}); diff --git a/typescript/cosmos-sdk/src/tests/index.e2e-test.ts b/typescript/cosmos-sdk/src/tests/index.e2e-test.ts new file mode 100644 index 00000000000..b3cc13d21f4 --- /dev/null +++ b/typescript/cosmos-sdk/src/tests/index.e2e-test.ts @@ -0,0 +1,5 @@ +// enforce order of test suites +import './1_interchain_security.e2e-test.js'; +import './2_core.e2e-test.js'; +import './3_post_dispatch.e2e-test.js'; +import './4_warp.e2e-test.js'; diff --git a/typescript/cosmos-sdk/src/tests/utils.ts b/typescript/cosmos-sdk/src/tests/utils.ts new file mode 100644 index 00000000000..597cb5dbc12 --- /dev/null +++ b/typescript/cosmos-sdk/src/tests/utils.ts @@ -0,0 +1,29 @@ +import { DirectSecp256k1Wallet } from '@cosmjs/proto-signing'; +import { GasPrice } from '@cosmjs/stargate'; + +import { SigningHyperlaneModuleClient } from '../index.js'; + +// These private keys are public and contain funds on the Hyperlane Cosmos Simapp chain +// which are only used for testing and contain no real funds. +// +// DO NOT USE THOSE KEYS IN PRODUCTION +const PKS = { + alice: '33913dd43a5d5764f7a23da212a8664fc4f5eedc68db35f3eb4a5c4f046b5b51', + bob: '0afcf195989ebb6306f23271e50832332180b73055eb57f6d3c53263127e7d78', + charlie: '8ef41fc20bf963ce18494c0f13e9303f70abc4c1d1ecfdb0a329d7fd468865b8', +}; + +export const createSigner = async (account: 'alice' | 'bob' | 'charlie') => { + const wallet = await DirectSecp256k1Wallet.fromKey( + Buffer.from(PKS[account], 'hex'), + 'hyp', + ); + + return SigningHyperlaneModuleClient.connectWithSigner( + 'http://127.0.0.1:26657', + wallet, + { + gasPrice: GasPrice.fromString('0.2uhyp'), + }, + ); +}; diff --git a/typescript/infra/config/environments/mainnet3/agent.ts b/typescript/infra/config/environments/mainnet3/agent.ts index ce508689c81..3a3eafca1f5 100644 --- a/typescript/infra/config/environments/mainnet3/agent.ts +++ b/typescript/infra/config/environments/mainnet3/agent.ts @@ -9,6 +9,7 @@ import { GasPaymentEnforcementPolicyType, IsmCacheConfig, IsmCachePolicy, + IsmCacheSelectorType, MatchingList, ModuleType, RpcConsensusType, @@ -740,19 +741,24 @@ const blacklist: MatchingList = [ })), ]; -const defaultIsmCacheConfig: IsmCacheConfig = { - // Default ISM Routing ISMs change configs based off message content, - // so they are not specified here. - moduleTypes: [ - ModuleType.AGGREGATION, - ModuleType.MERKLE_ROOT_MULTISIG, - ModuleType.MESSAGE_ID_MULTISIG, - ], - // SVM is explicitly not cached as the default ISM is a multisig ISM - // that routes internally. - chains: ethereumChainNames, - cachePolicy: IsmCachePolicy.IsmSpecific, -}; +const ismCacheConfigs: Array = [ + { + selector: { + type: IsmCacheSelectorType.DefaultIsm, + }, + // Default ISM Routing ISMs change configs based off message content, + // so they are not specified here. + moduleTypes: [ + ModuleType.AGGREGATION, + ModuleType.MERKLE_ROOT_MULTISIG, + ModuleType.MESSAGE_ID_MULTISIG, + ], + // SVM is explicitly not cached as the default ISM is a multisig ISM + // that routes internally. + chains: ethereumChainNames, + cachePolicy: IsmCachePolicy.IsmSpecific, + }, +]; const hyperlane: RootAgentConfig = { ...contextBase, @@ -763,12 +769,12 @@ const hyperlane: RootAgentConfig = { rpcConsensusType: RpcConsensusType.Fallback, docker: { repo, - tag: 'cecb0d8-20250411-150743', + tag: 'da3978b-20250414-155929', }, blacklist, gasPaymentEnforcement: gasPaymentEnforcement, metricAppContextsGetter, - defaultIsmCacheConfig, + ismCacheConfigs, allowContractCallCaching: true, resources: relayerResources, }, @@ -801,7 +807,7 @@ const releaseCandidate: RootAgentConfig = { rpcConsensusType: RpcConsensusType.Fallback, docker: { repo, - tag: 'cecb0d8-20250411-150743', + tag: 'da3978b-20250414-155929', }, blacklist, // We're temporarily (ab)using the RC relayer as a way to increase @@ -809,7 +815,7 @@ const releaseCandidate: RootAgentConfig = { // whitelist: releaseCandidateHelloworldMatchingList, gasPaymentEnforcement, metricAppContextsGetter, - defaultIsmCacheConfig, + ismCacheConfigs, allowContractCallCaching: true, resources: relayerResources, }, @@ -842,7 +848,7 @@ const neutron: RootAgentConfig = { blacklist, gasPaymentEnforcement, metricAppContextsGetter, - defaultIsmCacheConfig, + ismCacheConfigs, allowContractCallCaching: true, resources: relayerResources, }, diff --git a/typescript/infra/config/environments/mainnet3/funding.ts b/typescript/infra/config/environments/mainnet3/funding.ts index 76b60664658..f47f18b4ea0 100644 --- a/typescript/infra/config/environments/mainnet3/funding.ts +++ b/typescript/infra/config/environments/mainnet3/funding.ts @@ -19,7 +19,7 @@ export const keyFunderConfig: KeyFunderConfig< > = { docker: { repo: 'gcr.io/abacus-labs-dev/hyperlane-monorepo', - tag: '3fc4e3f-20250403-133425', + tag: '4fd2990-20250414-150005', }, // We're currently using the same deployer/key funder key as mainnet2. // To minimize nonce clobbering we offset the key funder cron @@ -33,6 +33,7 @@ export const keyFunderConfig: KeyFunderConfig< [Contexts.Hyperlane]: [Role.Relayer, Role.Kathy], [Contexts.ReleaseCandidate]: [Role.Relayer, Role.Kathy], }, + chainsToSkip: [], // desired balance config, must be set for each chain desiredBalancePerChain: desiredRelayerBalancePerChain, // if not set, keyfunder defaults to 0 diff --git a/typescript/infra/config/environments/mainnet3/governance/safe/regular.ts b/typescript/infra/config/environments/mainnet3/governance/safe/regular.ts new file mode 100644 index 00000000000..acb47bbc64f --- /dev/null +++ b/typescript/infra/config/environments/mainnet3/governance/safe/regular.ts @@ -0,0 +1,27 @@ +import { ChainMap } from '@hyperlane-xyz/sdk'; +import { Address } from '@hyperlane-xyz/utils'; + +export const regularSafes: ChainMap
= { + abstract: '0xcd81ccFe7D9306849136Fa96397113345a32ECf3', + arbitrum: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + base: '0x890ac177Fe3052B8676A65f32C1589Bc329f3d50', + berachain: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + blast: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + bsc: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + ethereum: '0x562Dfaac27A84be6C96273F5c9594DA1681C0DA7', + fraxtal: '0x890ac177Fe3052B8676A65f32C1589Bc329f3d50', + hyperevm: '0x290Eb7bbf939A36B2c350a668c04815E49757eDC', + linea: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + mantapacific: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + mode: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + optimism: '0x890ac177Fe3052B8676A65f32C1589Bc329f3d50', + sei: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + sophon: '0x113d3a19031Fe5DB58884D6aa54545dD4De499c0', + swell: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', + taiko: '0x890ac177Fe3052B8676A65f32C1589Bc329f3d50', + treasure: '0xcd81ccFe7D9306849136Fa96397113345a32ECf3', + zeronetwork: '0xcd81ccFe7D9306849136Fa96397113345a32ECf3', + zksync: '0xcd81ccFe7D9306849136Fa96397113345a32ECf3', + zklink: '0xcd81ccFe7D9306849136Fa96397113345a32ECf3', + zircuit: '0x7379D7bB2ccA68982E467632B6554fD4e72e9431', +}; diff --git a/typescript/infra/config/environments/mainnet3/governance/safe/safeConfig.ts b/typescript/infra/config/environments/mainnet3/governance/safe/safeConfig.ts new file mode 100644 index 00000000000..277bd56b055 --- /dev/null +++ b/typescript/infra/config/environments/mainnet3/governance/safe/safeConfig.ts @@ -0,0 +1,16 @@ +import { Address } from '@hyperlane-xyz/utils'; + +export const SIGNERS: Address[] = [ + '0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba', // 1 + '0xc3E966E79eF1aA4751221F55fB8A36589C24C0cA', // 2 + '0x2f43Ac3cD6A22E4Ba20d3d18d116b1f9420eD84B', // 3 + '0xfae231524539698f1d136d7b21e3b4144cdbf2a3', // 4 + '0x2C073004A6e4f37377F848193d6433260Ebe9b99', // 5 + '0x9f500df92175b2ac36f8d443382b219d211d354a', // 6 + '0x82950a6356316272dF1928C72F5F0A44D9673c88', // 7 + '0x861FC61a961F8AFDf115B8DE274101B9ECea2F26', // 8 + '0x3b548E88BA3259A6f45DEeA91449cdda5cF164b3', // 9 + '0xD5c0D17cCb9071D27a4F7eD8255F59989b9aee0d', // 10 +]; + +export const THRESHOLD = 1; diff --git a/typescript/infra/config/environments/testnet4/agent.ts b/typescript/infra/config/environments/testnet4/agent.ts index bc3b321bf80..d76efb607a4 100644 --- a/typescript/infra/config/environments/testnet4/agent.ts +++ b/typescript/infra/config/environments/testnet4/agent.ts @@ -3,6 +3,7 @@ import { GasPaymentEnforcementPolicyType, IsmCacheConfig, IsmCachePolicy, + IsmCacheSelectorType, ModuleType, RpcConsensusType, } from '@hyperlane-xyz/sdk'; @@ -14,6 +15,7 @@ import { } from '../../../src/config/agent/agent.js'; import { BaseRelayerConfig, + MetricAppContext, routerMatchingList, } from '../../../src/config/agent/relayer.js'; import { ALL_KEY_ROLES, Role } from '../../../src/roles.js'; @@ -228,26 +230,68 @@ const scraperResources = { }, }; -const defaultIsmCacheConfig: IsmCacheConfig = { - // Default ISM Routing ISMs change configs based off message content, - // so they are not specified here. - moduleTypes: [ - ModuleType.AGGREGATION, - ModuleType.MERKLE_ROOT_MULTISIG, - ModuleType.MESSAGE_ID_MULTISIG, - ], - // SVM is explicitly not cached as the default ISM is a multisig ISM - // that routes internally. - chains: ethereumChainNames, - cachePolicy: IsmCachePolicy.IsmSpecific, -}; - -const relayBlacklist: BaseRelayerConfig['blacklist'] = [ +// Kessel is a load test, these are contracts involved in the load +// test that we want to have certain relayers focus on or ignore. +const kesselMatchingList = [ { - // Ignore kessel runner test recipients. - // All 5 test recipients have the same address. recipientAddress: '0x492b3653A38e229482Bab2f7De4A094B18017246', }, +]; + +const kesselAppContext = 'kessel'; + +const metricAppContextsGetter = (): MetricAppContext[] => [ + { + name: 'helloworld', + matchingList: routerMatchingList(helloWorld[Contexts.Hyperlane].addresses), + }, + { + name: kesselAppContext, + matchingList: kesselMatchingList, + }, +]; + +const ismCacheConfigs: Array = [ + { + selector: { + type: IsmCacheSelectorType.DefaultIsm, + }, + // Default ISM Routing ISMs change configs based off message content, + // so they are not specified here. + moduleTypes: [ + ModuleType.AGGREGATION, + ModuleType.MERKLE_ROOT_MULTISIG, + ModuleType.MESSAGE_ID_MULTISIG, + ], + // SVM is explicitly not cached as the default ISM is a multisig ISM + // that routes internally. + chains: ethereumChainNames, + cachePolicy: IsmCachePolicy.IsmSpecific, + }, + { + selector: { + type: IsmCacheSelectorType.AppContext, + context: kesselAppContext, + }, + // Default ISM Routing ISMs change configs based off message content, + // so they are not specified here. + moduleTypes: [ + ModuleType.AGGREGATION, + ModuleType.MERKLE_ROOT_MULTISIG, + ModuleType.MESSAGE_ID_MULTISIG, + ModuleType.ROUTING, + ], + // SVM is explicitly not cached as the default ISM is a multisig ISM + // that routes internally. + chains: ethereumChainNames, + cachePolicy: IsmCachePolicy.IsmSpecific, + }, +]; + +const relayBlacklist: BaseRelayerConfig['blacklist'] = [ + // Ignore kessel runner test recipients. + // All 5 test recipients have the same address. + ...kesselMatchingList, { // In an effort to reduce some giant retry queues that resulted // from spam txs to the old TestRecipient before we were charging for @@ -283,15 +327,8 @@ const hyperlane: RootAgentConfig = { }, blacklist: [...releaseCandidateHelloworldMatchingList, ...relayBlacklist], gasPaymentEnforcement, - metricAppContextsGetter: () => [ - { - name: 'helloworld', - matchingList: routerMatchingList( - helloWorld[Contexts.Hyperlane].addresses, - ), - }, - ], - defaultIsmCacheConfig, + metricAppContextsGetter, + ismCacheConfigs, allowContractCallCaching: true, resources: relayerResources, }, @@ -327,7 +364,8 @@ const releaseCandidate: RootAgentConfig = { }, blacklist: relayBlacklist, gasPaymentEnforcement, - defaultIsmCacheConfig, + metricAppContextsGetter, + ismCacheConfigs, allowContractCallCaching: true, resources: relayerResources, }, @@ -362,17 +400,19 @@ const neutron: RootAgentConfig = { rpcConsensusType: RpcConsensusType.Fallback, docker: { repo, - tag: 'ef039ae-20250411-104801', + tag: '8e87bb6-20250416-174849', }, - whitelist: [ - { - recipientAddress: '0x492b3653A38e229482Bab2f7De4A094B18017246', - }, - ], + whitelist: kesselMatchingList, gasPaymentEnforcement, - defaultIsmCacheConfig, + metricAppContextsGetter, + ismCacheConfigs, allowContractCallCaching: true, - resources: relayerResources, + resources: { + requests: { + cpu: '20000m', + memory: '32Gi', + }, + }, }, }; diff --git a/typescript/infra/config/environments/testnet4/funding.ts b/typescript/infra/config/environments/testnet4/funding.ts index 98f470dae8d..99d5d362a9e 100644 --- a/typescript/infra/config/environments/testnet4/funding.ts +++ b/typescript/infra/config/environments/testnet4/funding.ts @@ -10,7 +10,7 @@ export const keyFunderConfig: KeyFunderConfig< > = { docker: { repo: 'gcr.io/abacus-labs-dev/hyperlane-monorepo', - tag: '8d76c56-20250328-185250', + tag: '4fd2990-20250414-150005', }, // We're currently using the same deployer key as testnet2. // To minimize nonce clobbering we offset the key funder cron @@ -24,6 +24,7 @@ export const keyFunderConfig: KeyFunderConfig< [Contexts.Hyperlane]: [Role.Relayer, Role.Kathy], [Contexts.ReleaseCandidate]: [Role.Relayer, Role.Kathy], }, + chainsToSkip: ['hyperliquidevmtestnet'], // desired balance config desiredBalancePerChain: { abstracttestnet: '0.1', diff --git a/typescript/infra/config/warp.ts b/typescript/infra/config/warp.ts index c2a4a22b70c..7dc05c485cb 100644 --- a/typescript/infra/config/warp.ts +++ b/typescript/infra/config/warp.ts @@ -162,6 +162,32 @@ async function getConfigFromMergedRegistry( return populateWarpRouteMailboxAddresses(warpRoute, registry); } +/** + * Retrieves all Warp configurations for the specified Warp route ID by fetching it from the MergedRegistry + * Also, populates their mailbox + * Will return in the form { [warRouteId]: { ...config } } + */ +export async function getWarpConfigMapFromMergedRegistry( + registryUris: string[], +): Promise>> { + const registry = getRegistry({ + registryUris, + enableProxy: true, + }); + const warpRouteMap = await registry.getWarpDeployConfigs(); + assert( + warpRouteMap, + `Warp route Configs not found for registry URIs: ${registryUris.join( + ', ', + )}`, + ); + return promiseObjAll( + objMap(warpRouteMap, async (_, warpRouteConfig) => + populateWarpRouteMailboxAddresses(warpRouteConfig, registry), + ), + ); +} + /** * Populates warp route configuration by filling in mailbox addresses for each chain entry * @param warpRoute The warp route configuration diff --git a/typescript/infra/helm/key-funder/templates/cron-job.yaml b/typescript/infra/helm/key-funder/templates/cron-job.yaml index a94884272b6..33479c1c1f8 100644 --- a/typescript/infra/helm/key-funder/templates/cron-job.yaml +++ b/typescript/infra/helm/key-funder/templates/cron-job.yaml @@ -43,6 +43,12 @@ spec: {{- range $chain, $balance := .Values.hyperlane.igpClaimThresholdPerChain }} - --igp-claim-threshold-per-chain - {{ $chain }}={{ $balance }} +{{- end }} +{{- if .Values.hyperlane.chainsToSkip }} + - --chain-skip-override +{{- range $index, $chain := .Values.hyperlane.chainsToSkip }} + - {{ $chain }} +{{- end }} {{- end }} env: - name: PROMETHEUS_PUSH_GATEWAY diff --git a/typescript/infra/helm/key-funder/values.yaml b/typescript/infra/helm/key-funder/values.yaml index 0fd652b97e0..6006dd3c544 100644 --- a/typescript/infra/helm/key-funder/values.yaml +++ b/typescript/infra/helm/key-funder/values.yaml @@ -5,6 +5,7 @@ hyperlane: runEnv: testnet2 # Used for fetching secrets chains: [] + chainsToSkip: [] contextFundingFrom: hyperlane # key = context, value = array of roles to fund contextsAndRolesToFund: diff --git a/typescript/infra/scripts/agent-utils.ts b/typescript/infra/scripts/agent-utils.ts index 459222620cd..2939c72a339 100644 --- a/typescript/infra/scripts/agent-utils.ts +++ b/typescript/infra/scripts/agent-utils.ts @@ -345,6 +345,13 @@ export function withSkipReview(args: Argv) { .default('skipReview', false); } +export function withPropose(args: Argv) { + return args + .describe('propose', 'Propose') + .boolean('propose') + .default('propose', false); +} + // Interactively gets a single warp route ID export async function getWarpRouteIdInteractive() { const choices = Object.values(WarpRouteIds) diff --git a/typescript/infra/scripts/check/check-warp-deploy.ts b/typescript/infra/scripts/check/check-warp-deploy.ts index 6fc578bb819..27d1f38d281 100644 --- a/typescript/infra/scripts/check/check-warp-deploy.ts +++ b/typescript/infra/scripts/check/check-warp-deploy.ts @@ -1,11 +1,13 @@ import chalk from 'chalk'; import { Gauge, Registry } from 'prom-client'; +import { DEFAULT_GITHUB_REGISTRY } from '@hyperlane-xyz/registry'; import { ChainName } from '@hyperlane-xyz/sdk'; +import { assert } from '@hyperlane-xyz/utils'; import { WarpRouteIds } from '../../config/environments/mainnet3/warp/warpIds.js'; -import { getWarpAddresses } from '../../config/registry.js'; -import { warpConfigGetterMap } from '../../config/warp.js'; +import { DEFAULT_REGISTRY_URI } from '../../config/registry.js'; +import { getWarpConfigMapFromMergedRegistry } from '../../config/warp.js'; import { submitMetrics } from '../../src/utils/metrics.js'; import { Modules, getWarpRouteIdsInteractive } from '../agent-utils.js'; import { getEnvironmentConfig } from '../core-utils.js'; @@ -40,6 +42,11 @@ async function main() { WarpRouteIds.ArbitrumBaseBlastBscEthereumGnosisLiskMantleModeOptimismPolygonScrollZeroNetworkZoraMainnet, ]; + const registries = [DEFAULT_GITHUB_REGISTRY, DEFAULT_REGISTRY_URI]; + const warpCoreConfigMap = await getWarpConfigMapFromMergedRegistry( + registries, + ); + let warpIdsToCheck: string[]; if (interactive) { warpIdsToCheck = await getWarpRouteIdsInteractive(); @@ -47,22 +54,21 @@ async function main() { console.log(chalk.yellow('Skipping the following warp routes:')); routesToSkip.forEach((route) => console.log(chalk.yellow(`- ${route}`))); - warpIdsToCheck = Object.keys(warpConfigGetterMap).filter( + warpIdsToCheck = Object.keys(warpCoreConfigMap).filter( (warpRouteId) => !routesToSkip.includes(warpRouteId), ); } - // Determine which chains have warp configs - const chainsWithWarpConfigs = warpIdsToCheck.reduce((chains, warpRouteId) => { - const warpAddresses = getWarpAddresses(warpRouteId); - Object.keys(warpAddresses).forEach((chain) => chains.add(chain)); + // Get all the chains from warpCoreConfigMap. Used to initialize the MultiProvider. + const warpConfigChains = warpIdsToCheck.reduce((chains, warpRouteId) => { + const warpConfigs = warpCoreConfigMap[warpRouteId]; + assert(warpConfigs, `Config not found in registry for ${warpRouteId}`); + Object.keys(warpConfigs).forEach((chain) => chains.add(chain)); return chains; }, new Set()); console.log( - `Found warp configs for chains: ${Array.from(chainsWithWarpConfigs) - .sort() - .join(', ')}`, + `Found warp configs for chains: ${Array.from(warpConfigChains).join(', ')}`, ); // Get the multiprovider once to avoid recreating it for each warp route @@ -74,7 +80,7 @@ async function main() { undefined, undefined, undefined, - Array.from(chainsWithWarpConfigs), + Array.from(warpConfigChains), ); // TODO: consider retrying this if check throws an error @@ -93,6 +99,7 @@ async function main() { fork, false, multiProvider, + registries, ); await governor.check(); diff --git a/typescript/infra/scripts/funding/fund-keys-from-deployer.ts b/typescript/infra/scripts/funding/fund-keys-from-deployer.ts index 247cddf0844..b36934564e2 100644 --- a/typescript/infra/scripts/funding/fund-keys-from-deployer.ts +++ b/typescript/infra/scripts/funding/fund-keys-from-deployer.ts @@ -152,7 +152,11 @@ async function main() { .boolean('skip-igp-claim') .describe('skip-igp-claim', 'If true, never claims funds from the IGP') - .default('skip-igp-claim', false).argv; + .default('skip-igp-claim', false) + + .array('chain-skip-override') + .describe('chain-skip-override', 'Array of chains to skip funding for') + .default('chain-skip-override', []).argv; constMetricLabels.hyperlane_deployment = environment; const config = getEnvironmentConfig(environment); @@ -170,6 +174,7 @@ async function main() { multiProvider, argv.contextsAndRoles, argv.skipIgpClaim, + argv.chainSkipOverride, argv.desiredBalancePerChain, argv.desiredKathyBalancePerChain ?? {}, argv.igpClaimThresholdPerChain ?? {}, @@ -186,6 +191,7 @@ async function main() { context, argv.contextsAndRoles[context]!, argv.skipIgpClaim, + argv.chainSkipOverride, argv.desiredBalancePerChain, argv.desiredKathyBalancePerChain ?? {}, argv.igpClaimThresholdPerChain ?? {}, @@ -238,6 +244,7 @@ class ContextFunder { public readonly context: Contexts, public readonly rolesToFund: FundableRole[], public readonly skipIgpClaim: boolean, + public readonly chainSkipOverride: ChainName[], public readonly desiredBalancePerChain: KeyFunderConfig< ChainName[] >['desiredBalancePerChain'], @@ -290,6 +297,7 @@ class ContextFunder { multiProvider: MultiProvider, contextsAndRolesToFund: ContextAndRolesMap, skipIgpClaim: boolean, + chainSkipOverride: ChainName[], desiredBalancePerChain: KeyFunderConfig< ChainName[] >['desiredBalancePerChain'], @@ -365,6 +373,7 @@ class ContextFunder { context, contextsAndRolesToFund[context]!, skipIgpClaim, + chainSkipOverride, desiredBalancePerChain, desiredKathyBalancePerChain, igpClaimThresholdPerChain, @@ -378,6 +387,7 @@ class ContextFunder { context: Contexts, rolesToFund: FundableRole[], skipIgpClaim: boolean, + chainSkipOverride: ChainName[], desiredBalancePerChain: KeyFunderConfig< ChainName[] >['desiredBalancePerChain'], @@ -430,6 +440,7 @@ class ContextFunder { context, rolesToFund, skipIgpClaim, + chainSkipOverride, desiredBalancePerChain, desiredKathyBalancePerChain, igpClaimThresholdPerChain, @@ -450,6 +461,14 @@ class ContextFunder { } private async fundChain(chain: string, keys: BaseAgentKey[]): Promise { + if (this.chainSkipOverride.includes(chain)) { + logger.warn( + { chain }, + `Configured to skip funding operations for chain ${chain}, skipping`, + ); + return; + } + const { promise, cleanup } = createTimeoutPromise( CHAIN_FUNDING_TIMEOUT_MS, `Timed out funding chain ${chain} after ${ diff --git a/typescript/infra/scripts/safes/governance/update-signers.ts b/typescript/infra/scripts/safes/governance/update-signers.ts new file mode 100644 index 00000000000..c2cd9da8402 --- /dev/null +++ b/typescript/infra/scripts/safes/governance/update-signers.ts @@ -0,0 +1,91 @@ +import Safe from '@safe-global/protocol-kit'; +import yargs from 'yargs'; + +import { ChainName } from '@hyperlane-xyz/sdk'; +import { rootLogger } from '@hyperlane-xyz/utils'; + +import { Contexts } from '../../../config/contexts.js'; +import { regularSafes } from '../../../config/environments/mainnet3/governance/safe/regular.js'; +import { + SIGNERS, + THRESHOLD, +} from '../../../config/environments/mainnet3/governance/safe/safeConfig.js'; +import { AnnotatedCallData } from '../../../src/govern/HyperlaneAppGovernor.js'; +import { SafeMultiSend } from '../../../src/govern/multisend.js'; +import { Role } from '../../../src/roles.js'; +import { getSafeAndService, updateSafeOwner } from '../../../src/utils/safe.js'; +import { withPropose } from '../../agent-utils.js'; +import { getEnvironmentConfig } from '../../core-utils.js'; + +async function main() { + const { propose } = await withPropose(yargs(process.argv.slice(2))).argv; + + const envConfig = getEnvironmentConfig('mainnet3'); + const multiProvider = await envConfig.getMultiProvider( + Contexts.Hyperlane, + Role.Deployer, + true, + Object.keys(regularSafes), + ); + + for (const [chain, safeAddress] of Object.entries(regularSafes)) { + let safeSdk: Safe.default; + try { + ({ safeSdk } = await getSafeAndService( + chain, + multiProvider, + safeAddress, + )); + } catch (error) { + rootLogger.error(`[${chain}] could not get safe: ${error}`); + continue; + } + + let safeMultiSend: SafeMultiSend; + try { + safeMultiSend = new SafeMultiSend( + multiProvider, + chain as ChainName, + safeAddress, + ); + } catch (error) { + rootLogger.error(`[${chain}] could not get safe multi send: ${error}`); + continue; + } + + let transactions: AnnotatedCallData[]; + try { + transactions = await updateSafeOwner(safeSdk, SIGNERS, THRESHOLD); + } catch (error) { + rootLogger.error(`[${chain}] could not update safe owner: ${error}`); + continue; + } + + rootLogger.info(`[${chain}] Generated transactions for updating signers`); + rootLogger.info(`[${chain}] ${JSON.stringify(transactions, null, 2)}`); + + if (propose) { + try { + await safeMultiSend.sendTransactions( + transactions.map((call) => ({ + to: call.to, + data: call.data, + value: call.value, + })), + ); + rootLogger.info(`[${chain}] Successfully sent transactions`); + } catch (error) { + rootLogger.error(`[${chain}] could not send transactions: ${error}`); + } + } + } + + if (!propose) { + rootLogger.info('Skipping sending transactions, pass --propose to send'); + } +} + +main().catch((error) => { + rootLogger.error(error); + process.exit(1); +}); diff --git a/typescript/infra/src/agents/index.ts b/typescript/infra/src/agents/index.ts index 871e6a348e9..4a0d921fa9c 100644 --- a/typescript/infra/src/agents/index.ts +++ b/typescript/infra/src/agents/index.ts @@ -112,6 +112,7 @@ export abstract class AgentHelmManager extends HelmManager protocol: metadata.protocol, blocks: { reorgPeriod }, maxBatchSize: 32, + bypassBatchSimulation: false, priorityFeeOracle, transactionSubmitter, }; @@ -185,7 +186,7 @@ export class RelayerHelmManager extends OmniscientAgentHelmManager { addressBlacklist: config.addressBlacklist, metricAppContexts: config.metricAppContexts, gasPaymentEnforcement: config.gasPaymentEnforcement, - defaultIsmCacheConfig: config.defaultIsmCacheConfig, + ismCacheConfigs: config.ismCacheConfigs, }; const envConfig = objOmitKeys( config, diff --git a/typescript/infra/src/config/agent/relayer.ts b/typescript/infra/src/config/agent/relayer.ts index 2d5f71d8d2f..56fc06f513b 100644 --- a/typescript/infra/src/config/agent/relayer.ts +++ b/typescript/infra/src/config/agent/relayer.ts @@ -54,7 +54,7 @@ export interface BaseRelayerConfig { transactionGasLimit?: BigNumberish; skipTransactionGasLimitFor?: string[]; metricAppContextsGetter?: () => MetricAppContext[]; - defaultIsmCacheConfig?: IsmCacheConfig; + ismCacheConfigs?: Array; allowContractCallCaching?: boolean; } @@ -67,7 +67,7 @@ export type RelayerConfigMapConfig = Pick< | 'addressBlacklist' | 'gasPaymentEnforcement' | 'metricAppContexts' - | 'defaultIsmCacheConfig' + | 'ismCacheConfigs' >; // The rest of the config is intended to be set as env vars. export type RelayerEnvConfig = Omit< @@ -137,8 +137,8 @@ export class RelayerConfigHelper extends AgentConfigHelper { baseConfig.metricAppContextsGetter(), ); } - if (baseConfig.defaultIsmCacheConfig) { - relayerConfig.defaultIsmCacheConfig = baseConfig.defaultIsmCacheConfig; + if (baseConfig.ismCacheConfigs) { + relayerConfig.ismCacheConfigs = baseConfig.ismCacheConfigs; } relayerConfig.allowContractCallCaching = baseConfig.allowContractCallCaching; diff --git a/typescript/infra/src/config/funding.ts b/typescript/infra/src/config/funding.ts index 279a6842ad7..d656ac724f9 100644 --- a/typescript/infra/src/config/funding.ts +++ b/typescript/infra/src/config/funding.ts @@ -27,6 +27,7 @@ export interface KeyFunderConfig desiredBalancePerChain: Record; desiredKathyBalancePerChain: ChainMap; igpClaimThresholdPerChain: ChainMap; + chainsToSkip: ChainName[]; } export interface CheckWarpDeployConfig extends CronJobConfig {} diff --git a/typescript/infra/src/funding/key-funder.ts b/typescript/infra/src/funding/key-funder.ts index ad63128ecf7..4281edf4318 100644 --- a/typescript/infra/src/funding/key-funder.ts +++ b/typescript/infra/src/funding/key-funder.ts @@ -46,6 +46,7 @@ export class KeyFunderHelmManager extends HelmManager { desiredBalancePerChain: this.config.desiredBalancePerChain, desiredKathyBalancePerChain: this.config.desiredKathyBalancePerChain, igpClaimThresholdPerChain: this.config.igpClaimThresholdPerChain, + chainsToSkip: this.config.chainsToSkip, }, image: { repository: this.config.docker.repo, diff --git a/typescript/infra/src/utils/safe.ts b/typescript/infra/src/utils/safe.ts index 258fc48ca0c..04fea612177 100644 --- a/typescript/infra/src/utils/safe.ts +++ b/typescript/infra/src/utils/safe.ts @@ -310,15 +310,20 @@ export async function deleteSafeTx( export async function updateSafeOwner( safeSdk: Safe.default, + owners?: Address[], + threshold?: number, ): Promise { - const threshold = await safeSdk.getThreshold(); - const owners = await safeSdk.getOwners(); - const newOwners = safeSigners.signers; - const ownersToRemove = owners.filter( + const currentThreshold = await safeSdk.getThreshold(); + const newThreshold = threshold ?? currentThreshold; + + const currentOwners = await safeSdk.getOwners(); + const newOwners = owners ?? safeSigners.signers; + + const ownersToRemove = currentOwners.filter( (owner) => !newOwners.some((newOwner) => eqAddress(owner, newOwner)), ); const ownersToAdd = newOwners.filter( - (newOwner) => !owners.some((owner) => eqAddress(newOwner, owner)), + (newOwner) => !currentOwners.some((owner) => eqAddress(newOwner, owner)), ); rootLogger.info(chalk.magentaBright('Owners to remove:', ownersToRemove)); @@ -329,7 +334,7 @@ export async function updateSafeOwner( for (const ownerToRemove of ownersToRemove) { const { data: removeTxData } = await safeSdk.createRemoveOwnerTx({ ownerAddress: ownerToRemove, - threshold, + threshold: newThreshold, }); transactions.push({ to: removeTxData.to, @@ -342,7 +347,7 @@ export async function updateSafeOwner( for (const ownerToAdd of ownersToAdd) { const { data: addTxData } = await safeSdk.createAddOwnerTx({ ownerAddress: ownerToAdd, - threshold, + threshold: newThreshold, }); transactions.push({ to: addTxData.to, diff --git a/typescript/sdk/src/deploy/proxy.ts b/typescript/sdk/src/deploy/proxy.ts index 674bd32d849..ecd00215977 100644 --- a/typescript/sdk/src/deploy/proxy.ts +++ b/typescript/sdk/src/deploy/proxy.ts @@ -1,4 +1,5 @@ import { ethers } from 'ethers'; +import { Provider as ZKSyncProvider } from 'zksync-ethers'; import { ProxyAdmin__factory } from '@hyperlane-xyz/core'; import { Address, ChainId, eqAddress } from '@hyperlane-xyz/utils'; @@ -7,6 +8,8 @@ import { transferOwnershipTransactions } from '../contracts/contracts.js'; import { AnnotatedEV5Transaction } from '../providers/ProviderType.js'; import { DeployedOwnableConfig } from '../types.js'; +type EthersLikeProvider = ethers.providers.Provider | ZKSyncProvider; + export type UpgradeConfig = { timelock: { delay: number; @@ -19,7 +22,7 @@ export type UpgradeConfig = { }; export async function proxyImplementation( - provider: ethers.providers.Provider, + provider: EthersLikeProvider, proxy: Address, ): Promise
{ // Hardcoded storage slot for implementation per EIP-1967 @@ -31,7 +34,7 @@ export async function proxyImplementation( } export async function isInitialized( - provider: ethers.providers.Provider, + provider: EthersLikeProvider, contract: Address, ): Promise { // Using OZ's Initializable 4.9 which keeps it at the 0x0 slot @@ -43,7 +46,7 @@ export async function isInitialized( } export async function proxyAdmin( - provider: ethers.providers.Provider, + provider: EthersLikeProvider, proxy: Address, ): Promise
{ // Hardcoded storage slot for admin per EIP-1967 @@ -72,7 +75,7 @@ export function proxyConstructorArgs( } export async function isProxy( - provider: ethers.providers.Provider, + provider: EthersLikeProvider, proxy: Address, ): Promise { const admin = await proxyAdmin(provider, proxy); diff --git a/typescript/sdk/src/deploy/proxyFactoryUtils.ts b/typescript/sdk/src/deploy/proxyFactoryUtils.ts new file mode 100644 index 00000000000..b4a27c709eb --- /dev/null +++ b/typescript/sdk/src/deploy/proxyFactoryUtils.ts @@ -0,0 +1,18 @@ +import { ethers } from 'ethers'; + +import { objMap } from '@hyperlane-xyz/utils'; + +import { proxyFactoryFactories } from './contracts.js'; +import { ProxyFactoryFactoriesAddresses } from './types.js'; + +/** + * Creates a default ProxyFactoryFactoriesAddresses object with all values set to ethers.constants.AddressZero. + * @returns {ProxyFactoryFactoriesAddresses} An object with all factory addresses set to AddressZero. + */ +export function createDefaultProxyFactoryFactories(): ProxyFactoryFactoriesAddresses { + const defaultAddress = ethers.constants.AddressZero; + return objMap( + proxyFactoryFactories, + () => defaultAddress, + ) as ProxyFactoryFactoriesAddresses; +} diff --git a/typescript/sdk/src/hook/types.ts b/typescript/sdk/src/hook/types.ts index ef02e2bfa58..170071a4d3a 100644 --- a/typescript/sdk/src/hook/types.ts +++ b/typescript/sdk/src/hook/types.ts @@ -46,6 +46,24 @@ export enum HookType { CCIP = 'ccipHook', } +export const HookTypeToContractNameMap: Record< + Exclude, + string +> = { + [HookType.MERKLE_TREE]: 'merkleTreeHook', + [HookType.INTERCHAIN_GAS_PAYMASTER]: 'interchainGasPaymaster', + [HookType.AGGREGATION]: 'staticAggregationHook', + [HookType.PROTOCOL_FEE]: 'protocolFee', + [HookType.OP_STACK]: 'opStackHook', + [HookType.ROUTING]: 'domainRoutingHook', + [HookType.FALLBACK_ROUTING]: 'fallbackDomainRoutingHook', + [HookType.AMOUNT_ROUTING]: 'amountRoutingHook', + [HookType.PAUSABLE]: 'pausableHook', + [HookType.ARB_L2_TO_L1]: 'arbL2ToL1Hook', + [HookType.MAILBOX_DEFAULT]: 'defaultHook', + [HookType.CCIP]: 'ccipHook', +}; + export type MerkleTreeHookConfig = z.infer; export type IgpHookConfig = z.infer; export type ProtocolFeeHookConfig = z.infer; diff --git a/typescript/sdk/src/index.ts b/typescript/sdk/src/index.ts index 077d8fee8bb..37e567bfd45 100644 --- a/typescript/sdk/src/index.ts +++ b/typescript/sdk/src/index.ts @@ -217,7 +217,12 @@ export { WeightedMultisigIsmConfig, WeightedMultisigIsmConfigSchema, } from './ism/types.js'; -export { collectValidators, moduleCanCertainlyVerify } from './ism/utils.js'; +export { + collectValidators, + moduleCanCertainlyVerify, + isStaticDeploymentSupported, + isIsmCompatible, +} from './ism/utils.js'; export { AgentChainMetadata, AgentChainMetadataSchema, @@ -242,6 +247,7 @@ export { GasPaymentEnforcementPolicyType, IsmCacheConfig, IsmCachePolicy, + IsmCacheSelectorType, RelayerConfig, RpcConsensusType, ScraperConfig, diff --git a/typescript/sdk/src/ism/types.ts b/typescript/sdk/src/ism/types.ts index bb7baf455d7..cf40129c9f9 100644 --- a/typescript/sdk/src/ism/types.ts +++ b/typescript/sdk/src/ism/types.ts @@ -75,6 +75,19 @@ export const MUTABLE_ISM_TYPE = [ IsmType.PAUSABLE, ]; +/** + * @notice Statically deployed ISM types + * @dev ISM types with immutable config embedded in contract bytecode via MetaProxy + */ +export const STATIC_ISM_TYPES = [ + IsmType.AGGREGATION, + IsmType.MERKLE_ROOT_MULTISIG, + IsmType.MESSAGE_ID_MULTISIG, + IsmType.WEIGHTED_MERKLE_ROOT_MULTISIG, + IsmType.WEIGHTED_MESSAGE_ID_MULTISIG, + IsmType.ICA_ROUTING, +]; + // mapping between the two enums export function ismTypeToModuleType(ismType: IsmType): ModuleType { switch (ismType) { diff --git a/typescript/sdk/src/ism/utils.ts b/typescript/sdk/src/ism/utils.ts index 30486035498..b93db8858a8 100644 --- a/typescript/sdk/src/ism/utils.ts +++ b/typescript/sdk/src/ism/utils.ts @@ -27,6 +27,7 @@ import { import { getChainNameFromCCIPSelector } from '../ccip/utils.js'; import { HyperlaneContracts } from '../contracts/types.js'; import { ProxyFactoryFactories } from '../deploy/contracts.js'; +import { ChainTechnicalStack } from '../metadata/chainMetadataTypes.js'; import { MultiProvider } from '../providers/MultiProvider.js'; import { ChainName } from '../types.js'; @@ -37,6 +38,7 @@ import { ModuleType, RoutingIsmConfig, RoutingIsmDelta, + STATIC_ISM_TYPES, ismTypeToModuleType, } from './types.js'; @@ -586,3 +588,36 @@ export function collectValidators( return new Set(validators); } + +/** + * Determines if static ISM deployment is supported on a given chain's technical stack + * @dev Currently, only ZkSync does not support static deployments + * @param chainTechnicalStack - The technical stack of the target chain + * @returns boolean - true if static deployment is supported, false for ZkSync + */ +export function isStaticDeploymentSupported( + chainTechnicalStack: ChainTechnicalStack | undefined, +): boolean { + return chainTechnicalStack !== ChainTechnicalStack.ZkSync; +} + +/** + * Checks if the given ISM type is compatible with the chain's technical stack. + * + * @param {Object} params - The parameters object + * @param {ChainTechnicalStack | undefined} params.chainTechnicalStack - The technical stack of the chain + * @param {IsmType} params.ismType - The type of Interchain Security Module (ISM) + * @returns {boolean} True if the ISM type is compatible with the chain, false otherwise + */ +export function isIsmCompatible({ + chainTechnicalStack, + ismType, +}: { + chainTechnicalStack: ChainTechnicalStack | undefined; + ismType: IsmType; +}): boolean { + // Skip compatibility check for non-static ISMs as they're always supported + if (!STATIC_ISM_TYPES.includes(ismType)) return true; + + return isStaticDeploymentSupported(chainTechnicalStack); +} diff --git a/typescript/sdk/src/metadata/agentConfig.ts b/typescript/sdk/src/metadata/agentConfig.ts index b2f4b406435..9eb673408e2 100644 --- a/typescript/sdk/src/metadata/agentConfig.ts +++ b/typescript/sdk/src/metadata/agentConfig.ts @@ -355,7 +355,25 @@ export enum IsmCachePolicy { IsmSpecific = 'ismSpecific', } +export enum IsmCacheSelectorType { + DefaultIsm = 'defaultIsm', + AppContext = 'appContext', +} + +const IsmCacheSelector = z.discriminatedUnion('type', [ + z.object({ + type: z.literal(IsmCacheSelectorType.DefaultIsm), + }), + z.object({ + type: z.literal(IsmCacheSelectorType.AppContext), + context: z.string(), + }), +]); + const IsmCacheConfigSchema = z.object({ + selector: IsmCacheSelector.describe( + 'The selector to use for the ISM cache policy', + ), moduleTypes: z .array(z.nativeEnum(ModuleType)) .describe('The ISM module types to use the cache policy for.'), @@ -420,11 +438,11 @@ export const RelayerAgentConfigSchema = AgentConfigSchema.extend({ .describe( 'A list of app contexts and their matching lists to use for metrics. A message will be classified as the first matching app context.', ), - defaultIsmCacheConfig: z - .union([IsmCacheConfigSchema, z.string().min(1)]) + ismCacheConfigs: z + .union([z.array(IsmCacheConfigSchema), z.string().min(1)]) .optional() .describe( - 'The default ISM cache config to use for all chains. If not specified, default caching will be used.', + 'The ISM cache configs to be used. If not specified, default caching will be used.', ), allowContractCallCaching: z .boolean() diff --git a/typescript/sdk/src/metadata/chainMetadataConversion.ts b/typescript/sdk/src/metadata/chainMetadataConversion.ts index cbe50446a10..15cd34ee466 100644 --- a/typescript/sdk/src/metadata/chainMetadataConversion.ts +++ b/typescript/sdk/src/metadata/chainMetadataConversion.ts @@ -12,14 +12,15 @@ import { import { PROTOCOL_TO_DEFAULT_NATIVE_TOKEN } from '../token/nativeTokenMetadata.js'; export function chainMetadataToViemChain(metadata: ChainMetadata): Chain { + const rpcUrls = metadata.rpcUrls.map((rpcUrl) => rpcUrl.http); return defineChain({ id: getChainIdNumber(metadata), name: metadata.displayName || metadata.name, network: metadata.name, nativeCurrency: metadata.nativeToken || test1.nativeToken!, rpcUrls: { - public: { http: [metadata.rpcUrls[0].http] }, - default: { http: [metadata.rpcUrls[0].http] }, + public: { http: rpcUrls }, + default: { http: rpcUrls }, }, blockExplorers: metadata.blockExplorers?.length ? { diff --git a/typescript/sdk/src/metadata/chainMetadataTypes.ts b/typescript/sdk/src/metadata/chainMetadataTypes.ts index 332c483d260..76d9415eff3 100644 --- a/typescript/sdk/src/metadata/chainMetadataTypes.ts +++ b/typescript/sdk/src/metadata/chainMetadataTypes.ts @@ -23,6 +23,7 @@ export enum ExplorerFamily { Blockscout = 'blockscout', Routescan = 'routescan', Voyager = 'voyager', + ZkSync = 'zksync', Other = 'other', } @@ -198,6 +199,11 @@ export const ChainMetadataSchemaObject = z.object({ .optional() .describe('Block settings for the chain/deployment.'), + bypassBatchSimulation: z + .boolean() + .optional() + .describe('Whether to bypass batch simulation for this chain.'), + chainId: z .union([ZNzUint, z.string()]) .describe(`The chainId of the chain. Uses EIP-155 for EVM chains`), diff --git a/typescript/sdk/src/router/ProxiedRouterChecker.ts b/typescript/sdk/src/router/ProxiedRouterChecker.ts index a297329ea43..5ae5c16c4b4 100644 --- a/typescript/sdk/src/router/ProxiedRouterChecker.ts +++ b/typescript/sdk/src/router/ProxiedRouterChecker.ts @@ -13,11 +13,17 @@ export abstract class ProxiedRouterChecker< getOwnableOverrides(chain: ChainName): AddressesMap | undefined { const config = this.configMap[chain]; let ownableOverrides = config?.ownerOverrides; + // timelock and proxyAdmin are mutally exclusive if (config?.timelock) { ownableOverrides = { ...ownableOverrides, proxyAdmin: this.app.getAddresses(chain).timelockController, }; + } else if (config?.proxyAdmin) { + ownableOverrides = { + ...ownableOverrides, + proxyAdmin: config.proxyAdmin.owner, + }; } return ownableOverrides; } diff --git a/typescript/sdk/src/utils/zksync.ts b/typescript/sdk/src/utils/zksync.ts new file mode 100644 index 00000000000..0b97e4330c2 --- /dev/null +++ b/typescript/sdk/src/utils/zksync.ts @@ -0,0 +1,32 @@ +import { ZKSyncArtifact, loadAllZKSyncArtifacts } from '@hyperlane-xyz/core'; + +/** + * @dev Retrieves a ZkSync artifact by its contract name or qualified name. + * @param name The name of the contract or qualified name in the format "sourceName:contractName". + * @return The corresponding ZKSyncArtifact if found, or undefined if not found. + */ +export const getZKSyncArtifactByContractName = async ( + name: string, +): Promise => { + // Load all ZkSync artifacts + const allArtifacts = loadAllZKSyncArtifacts(); + + // Find the artifact that matches the contract name or qualified name + const artifact = Object.values(allArtifacts).find( + ({ contractName, sourceName }: ZKSyncArtifact) => { + const lowerCaseContractName = contractName.toLowerCase(); + const lowerCaseName = name.toLowerCase(); + + // Check if the contract name matches + if (lowerCaseContractName === lowerCaseName) { + return true; + } + + // Check if the qualified name matches + const qualifiedName = `${sourceName}:${contractName}`; + return qualifiedName === name; // Return true if qualified name matches + }, + ); + + return artifact; +}; diff --git a/typescript/sdk/src/zksync/ZKSyncDeployer.ts b/typescript/sdk/src/zksync/ZKSyncDeployer.ts new file mode 100644 index 00000000000..92069263115 --- /dev/null +++ b/typescript/sdk/src/zksync/ZKSyncDeployer.ts @@ -0,0 +1,185 @@ +import { BigNumber, BytesLike, Overrides, utils } from 'ethers'; +import { + Contract, + ContractFactory, + Wallet, + types as zksyncTypes, +} from 'zksync-ethers'; + +import { ZKSyncArtifact } from '@hyperlane-xyz/core'; +import { assert } from '@hyperlane-xyz/utils'; + +import { defaultZKProviderBuilder } from '../providers/providerBuilders.js'; +import { getZKSyncArtifactByContractName } from '../utils/zksync.js'; + +/** + * Class for deploying contracts to the ZKSync network. + */ +export class ZKSyncDeployer { + public zkWallet: Wallet; + public deploymentType?: zksyncTypes.DeploymentType; + + constructor(zkWallet: Wallet, deploymentType?: zksyncTypes.DeploymentType) { + this.deploymentType = deploymentType; + + this.zkWallet = zkWallet.connect( + zkWallet.provider ?? + defaultZKProviderBuilder([{ http: 'http://127.0.0.1:8011' }], 260), + ); + } + + /** + * Loads and validates a ZKSync contract artifact by name + * @param contractTitle - Contract name or qualified name (sourceName:contractName) + * + * @returns The ZKSync artifact + */ + private async loadArtifact(contractTitle: string): Promise { + const artifact = await getZKSyncArtifactByContractName(contractTitle); + assert(artifact, `No ZKSync artifact for contract ${contractTitle} found!`); + return artifact; + } + + /** + * Estimates the price of calling a deploy transaction in ETH. + * + * @param artifact The previously loaded artifact object. + * @param constructorArguments List of arguments to be passed to the contract constructor. + * + * @returns Calculated fee in ETH wei + */ + public async estimateDeployFee( + artifact: ZKSyncArtifact, + constructorArguments: any[], + ): Promise { + const gas = await this.estimateDeployGas(artifact, constructorArguments); + const gasPrice = await this.zkWallet.provider.getGasPrice(); + return gas.mul(gasPrice); + } + + /** + * Estimates the amount of gas needed to execute a deploy transaction. + * + * @param artifact The previously loaded artifact object. + * @param constructorArguments List of arguments to be passed to the contract constructor. + * + * @returns Calculated amount of gas. + */ + public async estimateDeployGas( + artifact: ZKSyncArtifact, + constructorArguments: any[], + ): Promise { + const factoryDeps = await this.extractFactoryDeps(artifact); + + const factory = new ContractFactory( + artifact.abi, + artifact.bytecode, + this.zkWallet, + this.deploymentType, + ); + + // Encode deploy transaction so it can be estimated. + const deployTx = factory.getDeployTransaction(...constructorArguments, { + customData: { + factoryDeps, + }, + }); + deployTx.from = this.zkWallet.address; + + return this.zkWallet.provider.estimateGas(deployTx); + } + + /** + * Sends a deploy transaction to the zkSync network. + * For now, it will use defaults for the transaction parameters: + * - fee amount is requested automatically from the zkSync server. + * + * @param artifact The previously loaded artifact object. + * @param constructorArguments List of arguments to be passed to the contract constructor. + * @param overrides Optional object with additional deploy transaction parameters. + * @param additionalFactoryDeps Additional contract bytecodes to be added to the factory dependencies list. + * + * @returns A contract object. + */ + public async deploy( + artifact: ZKSyncArtifact, + constructorArguments: any[] = [], + overrides?: Overrides, + additionalFactoryDeps?: BytesLike[], + ): Promise { + const baseDeps = await this.extractFactoryDeps(artifact); + const additionalDeps = additionalFactoryDeps + ? additionalFactoryDeps.map((val) => utils.hexlify(val)) + : []; + const factoryDeps = [...baseDeps, ...additionalDeps]; + + const factory = new ContractFactory( + artifact.abi, + artifact.bytecode, + this.zkWallet, + this.deploymentType, + ); + + const { customData, ..._overrides } = overrides ?? {}; + + // Encode and send the deploy transaction providing factory dependencies. + const contract = await factory.deploy(...constructorArguments, { + ..._overrides, + customData: { + ...customData, + factoryDeps, + }, + }); + + await contract.deployed(); + + return contract; + } + + /** + * Extracts factory dependencies from the artifact. + * + * @param artifact Artifact to extract dependencies from + * + * @returns Factory dependencies in the format expected by SDK. + */ + async extractFactoryDeps(artifact: ZKSyncArtifact): Promise { + const visited = new Set(); + + visited.add(`${artifact.sourceName}:${artifact.contractName}`); + return this.extractFactoryDepsRecursive(artifact, visited); + } + + private async extractFactoryDepsRecursive( + artifact: ZKSyncArtifact, + visited: Set, + ): Promise { + // Load all the dependency bytecodes. + // We transform it into an array of bytecodes. + const factoryDeps: string[] = []; + for (const dependencyHash in artifact.factoryDeps) { + if ( + Object.prototype.hasOwnProperty.call( + artifact.factoryDeps, + dependencyHash, + ) + ) { + const dependencyContract = artifact.factoryDeps[dependencyHash]; + if (!visited.has(dependencyContract)) { + const dependencyArtifact = await this.loadArtifact( + dependencyContract, + ); + factoryDeps.push(dependencyArtifact.bytecode); + visited.add(dependencyContract); + const transitiveDeps = await this.extractFactoryDepsRecursive( + dependencyArtifact, + visited, + ); + factoryDeps.push(...transitiveDeps); + } + } + } + + return factoryDeps; + } +} diff --git a/typescript/utils/src/validator.ts b/typescript/utils/src/validator.ts index 63f135909d6..4876bc3869e 100644 --- a/typescript/utils/src/validator.ts +++ b/typescript/utils/src/validator.ts @@ -2,6 +2,7 @@ import { ethers } from 'ethers'; import { eqAddress } from './addresses.js'; import { domainHash } from './domains.js'; +import { fromHexString, toHexString } from './strings.js'; import { Address, Checkpoint, @@ -106,3 +107,34 @@ export class BaseValidator { throw new Error('Not implemented'); } } + +/** + * Create signature for validator announce + */ +export const createAnnounce = async ( + validatorPrivKey: string, + storageLocation: string, + mailboxId: string, + localDomain: number, +) => { + const domainIdBytes = Buffer.alloc(4); + domainIdBytes.writeUInt32BE(localDomain); + + const domainHashBytes = toHexString( + Buffer.concat([ + domainIdBytes, + fromHexString(mailboxId), + Buffer.from('HYPERLANE_ANNOUNCEMENT'), + ]), + ); + const domainHash = ethers.utils.keccak256(domainHashBytes); + + const announcementDigestBytes = toHexString( + Buffer.concat([fromHexString(domainHash), Buffer.from(storageLocation)]), + ); + const announcementDigest = ethers.utils.keccak256(announcementDigestBytes); + + return new ethers.Wallet(validatorPrivKey).signMessage( + fromHexString(announcementDigest), + ); +}; diff --git a/yarn.lock b/yarn.lock index 3cdc7c371b3..57d4dcdc282 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7745,12 +7745,15 @@ __metadata: "@cosmjs/stargate": "npm:^0.32.4" "@eslint/js": "npm:^9.15.0" "@hyperlane-xyz/cosmos-types": "npm:11.0.0" + "@types/mocha": "npm:^10.0.1" "@typescript-eslint/eslint-plugin": "npm:^8.1.6" "@typescript-eslint/parser": "npm:^8.1.6" eslint: "npm:^9.15.0" eslint-config-prettier: "npm:^9.1.0" eslint-import-resolver-typescript: "npm:^3.6.3" eslint-plugin-import: "npm:^2.31.0" + mocha: "npm:^10.2.0" + mocha-steps: "npm:^1.3.0" prettier: "npm:^2.8.8" typescript: "npm:5.3.3" typescript-eslint: "npm:^8.23.0" @@ -30136,6 +30139,13 @@ __metadata: languageName: node linkType: hard +"mocha-steps@npm:^1.3.0": + version: 1.3.0 + resolution: "mocha-steps@npm:1.3.0" + checksum: 10/ca36de467293b0c36290001cd0305df4a3e161fa52c20aff62fbca78566ec200610ff7b0f48eb720e76a3ffdbf91b04d478c27e15d31b1de7f0de787d63e774e + languageName: node + linkType: hard + "mocha@npm:7.1.2": version: 7.1.2 resolution: "mocha@npm:7.1.2"