diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml new file mode 100644 index 000000000..edf7cb883 --- /dev/null +++ b/.github/workflows/cli.yml @@ -0,0 +1,21 @@ +name: CLI + +on: + workflow_dispatch: + pull_request: + push: + branches: + - main + +jobs: + test-evm: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: docker build -f Dockerfile.cli --target cli-local-test . --progress=plain + + test-solana: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: docker build -f Dockerfile.cli --target cli-local-test-solana . --progress=plain diff --git a/Dockerfile.cli b/Dockerfile.cli new file mode 100644 index 000000000..2fccaa2b2 --- /dev/null +++ b/Dockerfile.cli @@ -0,0 +1,68 @@ +# NOTE: we use the backpackapp base image so we can use solana. You would think +# that the solana installer script just works, but you would be wrong. It seems +# to have been broken recently since the migration to the anza url. +# The old installer url returns a 403. So we instead rely on solana binaries cached on docker hub. Everything is fine. +FROM backpackapp/build:v0.30.1@sha256:c160fe32cba7c463981110b2aac2924de4833c06a0af6473a830ead880c4ef3b as base + +RUN apt update + +RUN apt install -y python3 +RUN apt install -y build-essential +RUN apt install -y git +RUN apt install -y curl +RUN apt install -y unzip + +RUN curl -fsSL https://bun.sh/install | bash + +RUN curl -L https://foundry.paradigm.xyz | bash +RUN bash -ci "foundryup" + +RUN apt install -y jq + +FROM base as base-solana + +RUN cargo install --git https://github.com/coral-xyz/anchor avm --locked --force +RUN avm install 0.29.0 +RUN avm use 0.29.0 + +FROM base as cli-remote +# NOTE: when invoking the installer outside of the source tree, it clones the +# repo and installs that way. +# This build stage tests that path. +COPY cli/install.sh cli/install.sh +RUN bash -ci "./cli/install.sh" +RUN bash -ci "which ntt" + +FROM base-solana as cli-local +# NOTE: when invoking the installer inside of the source tree, it installs from +# the local source tree. +# This build stage tests that path. +WORKDIR /app +COPY tsconfig.json tsconfig.json +COPY tsconfig.esm.json tsconfig.esm.json +COPY tsconfig.cjs.json tsconfig.cjs.json +COPY package.json package.json +COPY package-lock.json package-lock.json +COPY sdk sdk +COPY solana/package.json solana/package.json +COPY solana/ts solana/ts +COPY evm/ts evm/ts +COPY solana/tsconfig.*.json solana/ +COPY cli/package.json cli/package.json +COPY cli/package-lock.json cli/package-lock.json +COPY cli/src cli/src +COPY cli/install.sh cli/install.sh +RUN bash -ci "./cli/install.sh" +RUN bash -ci "which ntt" + +FROM cli-local as cli-local-test +COPY cli/test cli/test +COPY evm evm +RUN bash -ci "./cli/test/sepolia-bsc.sh" + +FROM cli-local as cli-local-test-solana +COPY cli/test cli/test +# evm/script needed for the cli, at least for now +COPY evm/script evm/script +COPY solana solana +RUN bash -ci "./cli/test/solana.sh --use-tmp-dir" diff --git a/cli/example-overrides.json b/cli/example-overrides.json new file mode 100644 index 000000000..1c53ed49c --- /dev/null +++ b/cli/example-overrides.json @@ -0,0 +1,13 @@ +{ + "chains": { + "Bsc": { + "rpc": "http://127.0.0.1:8545" + }, + "Sepolia": { + "rpc": "http://127.0.0.1:8546" + }, + "Solana": { + "rpc": "http://127.0.0.1:8899" + } + } +} diff --git a/cli/install.sh b/cli/install.sh new file mode 100755 index 000000000..f545df203 --- /dev/null +++ b/cli/install.sh @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# check that 'bun' is installed + +if ! command -v bun > /dev/null; then + echo "bun is not installed. Follow the instructions at https://bun.sh/docs/installation" + exit 1 +fi + +REPO="https://github.com/wormhole-foundation/example-native-token-transfers.git" + +function main { + branch="" + + while [[ $# -gt 0 ]]; do + key="$1" + + case $key in + -b|--branch) + branch="$2" + shift + shift + ;; + -r|--repo) + REPO="$2" + shift + shift + ;; + *) + echo "Unknown option $key" + exit 1 + ;; + esac + done + + path="" + mkdir -p "$HOME/.ntt-cli" + + # check if there's a package.json in the parent directory, with "name": "@wormhole-foundation/ntt-cli" + if [ -f "$(dirname $0)/package.json" ] && grep -q '"name": "@wormhole-foundation/ntt-cli"' "$(dirname $0)/package.json"; then + path="$(dirname $0)/.." + version=$(git -C "$path" rev-parse HEAD 2>/dev/null || echo "unknown") + dirty=$(git -C "$path" diff --quiet 2>/dev/null || echo "-dirty") + echo "$version$dirty" > "$HOME/.ntt-cli/version" + else + check_commit_included_in_main="false" + # if branch is set, use it. otherwise use the latest tag of the form "vX.Y.Z+cli" + if [ -z "$branch" ]; then + branch="$(select_branch)" + # if the branch was not set, we want to check that the default is included + # in the main branch, i.e. it has been reviewed + check_commit_included_in_main="true" + else + branch="origin/$branch" + fi + + # clone to $HOME/.ntt-cli if it doesn't exist, otherwise update it + echo "Cloning $REPO $branch" + + path="$HOME/.ntt-cli/.checkout" + + if [ ! -d "$path" ]; then + git clone "$REPO" "$path" + fi + pushd "$path" + # update origin url to REPO + git remote set-url origin "$REPO" + git fetch origin + if [ "$check_commit_included_in_main" = "true" ]; then + # check that the commit is included in the main branch + if ! git merge-base --is-ancestor "$branch" "origin/main"; then + echo "ref '$branch' is not included in the main branch" + exit 1 + fi + fi + # reset hard + git reset --hard "$branch" + version=$(git rev-parse HEAD) + dirty=$(git diff --quiet || echo "-dirty") + echo "$version$dirty" > "$HOME/.ntt-cli/version" + popd + fi + + absolute_path="$(cd $path && pwd)" + echo $absolute_path >> "$HOME/.ntt-cli/version" + + # jq would be nicer but it's not portable + # here we make the assumption that the file uses 2 spaces for indentation. + # this is a bit fragile, but we don't want to catch further nested objects + # (there might be a "version" in the scripts section, for example) + version=$(cat "$path/cli/package.json" | grep '^ "version":' | cut -d '"' -f 4) + echo "$version" >> "$HOME/.ntt-cli/version" + + remote_url=$(git -C "$path" remote get-url origin 2>/dev/null || echo "unknown") + echo "$remote_url" >> "$HOME/.ntt-cli/version" + + echo "Installing ntt CLI version $version" + install_cli "$path" +} + +# function that determines which branch/tag to clone +function select_branch { + # if the repo has a tag of the form "vX.Y.Z+cli", use that (the latest one) + branch="" + regex="refs/tags/v[0-9]*\.[0-9]*\.[0-9]*+cli" + if git ls-remote --tags "$REPO" | grep -q "$regex"; then + branch="$(git ls-remote --tags "$REPO" | grep "$regex" | sort -V | tail -n 1 | awk '{print $2}')" + else + # otherwise error + echo "No tag of the form vX.Y.Z+cli found" >&2 + exit 1 + fi + + echo "$branch" +} + +function install_cli { + cd "$1" + + # if 'ntt' is already installed, uninstall it + # just check with 'which' + if which ntt > /dev/null; then + echo "Removing existing ntt CLI" + rm $(which ntt) + fi + + # swallow the output of the first install + # TODO: figure out why it fails the first time. + bun install > /dev/null 2>&1 || true + bun install + + # make a temporary directory + + tmpdir="$(mktemp -d)" + + # create a temporary symlink 'npm' to 'bun' + + ln -s "$(command -v bun)" "$tmpdir/npm" + + # add the temporary directory to the PATH + + export PATH="$tmpdir:$PATH" + + # swallow the output of the first build + # TODO: figure out why it fails the first time. + bun --bun run --filter '*' build > /dev/null 2>&1 || true + bun --bun run --filter '*' build + + # remove the temporary directory + + rm -r "$tmpdir" + + # now link the CLI + + cd cli + + bun link + + bun link @wormhole-foundation/ntt-cli +} + +main "$@" diff --git a/cli/package.json b/cli/package.json index 3252ba0bb..a1926fc5a 100644 --- a/cli/package.json +++ b/cli/package.json @@ -1,5 +1,6 @@ { - "name": "cli", + "name": "@wormhole-foundation/ntt-cli", + "version": "1.0.0-beta", "module": "src/index.ts", "type": "module", "devDependencies": { @@ -13,7 +14,7 @@ "ntt": "src/index.ts" }, "dependencies": { + "chalk": "^5.3.0", "yargs": "^17.7.2" - }, - "version": "0.2.0" -} \ No newline at end of file + } +} diff --git a/cli/src/configuration.ts b/cli/src/configuration.ts new file mode 100644 index 000000000..77ea49783 --- /dev/null +++ b/cli/src/configuration.ts @@ -0,0 +1,200 @@ +import { assertChain, chains, type Chain } from "@wormhole-foundation/sdk"; +import * as yargs from "yargs"; +import fs from "fs"; +import { ensureNttRoot } from "."; +import chalk from "chalk"; + +// We support project-local and global configuration. +// The configuration is stored in JSON files in $HOME/.ntt-cli/config.json (global) and .ntt-cli/config.json (local). +// These can further be overridden by environment variables of the form CHAIN_KEY=value. +type Scope = "global" | "local"; + +type Config = { + chains: Partial<{ + [C in Chain]: ChainConfig; + }> +} + +type ChainConfig = Partial; + +// TODO: per-network configuration? (i.e. mainnet, testnet, etc) +const configTemplate = { + scan_api_key: "", +}; + +function assertChainConfigKey(key: string): asserts key is keyof ChainConfig { + const validKeys = Object.keys(configTemplate); + if (!validKeys.includes(key)) { + throw new Error(`Invalid key: ${key}`); + } +} + +const options = { + chain: { + describe: "Chain", + type: "string", + choices: chains, + demandOption: true, + }, + key: { + describe: "Key", + type: "string", + choices: Object.keys(configTemplate), + demandOption: true, + }, + value: { + describe: "Value", + type: "string", + demandOption: true, + }, + local: { + describe: "Use local configuration", + type: "boolean", + default: false, + }, + global: { + describe: "Use global configuration", + type: "boolean", + default: true, + } +} as const; +export const command = (args: yargs.Argv<{}>) => args + .command("set-chain ", + "set a configuration value for a chain", + (yargs) => yargs + .positional("chain", options.chain) + .positional("key", options.key) + .positional("value", options.value) + .option("local", options.local) + .option("global", options.global), + (argv) => { + const scope = resolveScope(argv.local, argv.global); + assertChain(argv.chain); + assertChainConfigKey(argv.key); + setChainConfig(scope, argv.chain, argv.key, argv.value); + }) + .command("unset-chain ", + "unset a configuration value for a chain", + (yargs) => yargs + .positional("chain", options.chain) + .positional("key", options.key) + .option("local", options.local) + .option("global", options.global), + (argv) => { + const scope = resolveScope(argv.local, argv.global); + assertChainConfigKey(argv.key); + assertChain(argv.chain); + setChainConfig(scope, argv.chain, argv.key, undefined); + }) + .command("get-chain ", + "get a configuration value", + (yargs) => yargs + .positional("chain", options.chain) + .positional("key", options.key) + .option("local", options.local) + .option("global", options.global), + (argv) => { + const scope = resolveScope(argv.local, argv.global); + assertChainConfigKey(argv.key); + assertChain(argv.chain); + const val = getChainConfig(argv.scope as Scope, argv.chain, argv.key); + if (!val) { + console.error("undefined"); + } else { + console.log(val); + } + }) + .demandCommand() + +function findOrCreateConfigFile(scope: Scope): string { + // if scope is global, touch $HOME/.ntt-cli/config.json + // if scope is local, touch .ntt-cli/config.json. In the latter case, make sure we're in an ntt project (call ensureNttRoot()) + + // if the file doesn't exist, write an empty object + let configDir; + + switch (scope) { + case "global": + if (!process.env.HOME) { + throw new Error("Could not determine home directory"); + } + configDir = `${process.env.HOME}/.ntt-cli`; + break; + case "local": + ensureNttRoot(); + configDir = ".ntt-cli"; + break; + } + + const emptyConfig: Config = { + chains: {}, + }; + + if (!fs.existsSync(configDir)) { + fs.mkdirSync(configDir); + } + const configFile = `${configDir}/config.json`; + if (!fs.existsSync(configFile)) { + fs.writeFileSync(configFile, JSON.stringify(emptyConfig, null, 2)); + } + return configFile; +} + +function setChainConfig(scope: Scope, chain: Chain, key: keyof ChainConfig, value: string | undefined) { + const configFile = findOrCreateConfigFile(scope); + const config = JSON.parse(fs.readFileSync(configFile, "utf-8")) as Config; + if (!config.chains[chain]) { + config.chains[chain] = {}; + } + config.chains[chain]![key] = value; + fs.writeFileSync(configFile, JSON.stringify(config, null, 2)); +} + +function getChainConfig(scope: Scope, chain: Chain, key: keyof ChainConfig): string | undefined { + const configFile = findOrCreateConfigFile(scope); + const config = JSON.parse(fs.readFileSync(configFile, "utf-8")) as Config; + return config.chains[chain]?.[key]; +} + +function envVarName(chain: Chain, key: keyof ChainConfig): string { + return `${chain.toUpperCase()}_${key.toUpperCase()}`; +} + +export function get( + chain: Chain, + key: keyof ChainConfig, + { reportError = false } +): string | undefined { + const varName = envVarName(chain, key); + const env = process.env[varName]; + if (env) { + console.info(chalk.yellow(`Using ${varName} for ${chain} ${key}`)); + return env; + } + const local = getChainConfig("local", chain, key); + if (local) { + console.info(chalk.yellow(`Using local configuration for ${chain} ${key} (in .ntt-cli/config.json)`)); + return local; + } + const global = getChainConfig("global", chain, key); + if (global) { + console.info(chalk.yellow(`Using global configuration for ${chain} ${key} (in $HOME/.ntt-cli/config.json)`)); + return global; + } + if (reportError) { + console.error(`Could not find configuration for ${chain} ${key}`); + console.error(`Please set it using 'ntt config set-chain ${chain} ${key} ' or by setting the environment variable ${varName}`); + } +} +function resolveScope(local: boolean, global: boolean) { + if (local && global) { + throw new Error("Cannot specify both --local and --global"); + } + if (local) { + return "local"; + } + if (global) { + return "global"; + } + throw new Error("Must specify either --local or --global"); +} diff --git a/cli/src/diff.ts b/cli/src/diff.ts new file mode 100644 index 000000000..f0ba286f4 --- /dev/null +++ b/cli/src/diff.ts @@ -0,0 +1,88 @@ +import chalk from "chalk"; + +export type Diff = { + push?: T; + pull?: T; +}; + + +// type that maps over the keys of an object (recursively), mapping each leaf type to Diff +type DiffMap = { + [K in keyof T]: T[K] extends object ? Partial> : Diff +} + +function isObject(obj: any): obj is Record { + return obj && typeof obj === 'object' && !Array.isArray(obj); +} + +export function diffObjects>(obj1: T, obj2: T): Partial> { + const result: Partial> = {}; + + for (const key in obj1) { + if (obj1.hasOwnProperty(key)) { + if (obj2.hasOwnProperty(key)) { + if (isObject(obj1[key]) && isObject(obj2[key])) { + result[key] = diffObjects(obj1[key], obj2[key]); + } else if (obj1[key] === obj2[key]) { + // result[key] = obj1[key] as any; + } else { + result[key] = { pull: obj2[key] , push: obj1[key]} as any; + } + } else { + result[key] = { push: obj1[key] } as any; + } + } + } + + for (const key in obj2) { + if (obj2.hasOwnProperty(key) && !obj1.hasOwnProperty(key)) { + result[key] = { pull: obj2[key] } as any; + } + } + + // prune empty objects + for (const key in result) { + if (isObject(result[key])) { + if (Object.keys(result[key]).length === 0) { + delete result[key]; + } + } + } + + return result; +} + +export function colorizeDiff(diff: any, indent = 2): string { + if (!isObject(diff)) return JSON.stringify(diff, null, indent); + + const jsonString = JSON.stringify(diff, null, indent); + let result = ''; + const lines = jsonString.split('\n'); + + for (const line of lines) { + const trimmedLine = line.trim(); + if (trimmedLine.startsWith('"') && trimmedLine.endsWith(': {')) { + const key = trimmedLine.slice(1, trimmedLine.indexOf('": {')); + if (isObject(diff[key]) && ('push' in diff[key] || 'pull' in diff[key])) { + const push = diff[key].push; + const pull = diff[key].pull; + if (push !== undefined && pull !== undefined) { + result += `${line}\n`; + } else if (push !== undefined) { + result += line.replace(trimmedLine, chalk.red(trimmedLine)) + '\n'; + } else if (pull !== undefined) { + result += line.replace(trimmedLine, chalk.green(trimmedLine)) + '\n'; + } + } else { + result += line + '\n'; + } + } else if (trimmedLine.startsWith('"push"') || trimmedLine.startsWith('"pull"')) { + const color = trimmedLine.startsWith('"push"') ? chalk.green : chalk.red; + result += line.replace(trimmedLine, color(trimmedLine)) + '\n'; + } else { + result += line + '\n'; + } + } + + return result; +} diff --git a/cli/src/evmsigner.ts b/cli/src/evmsigner.ts new file mode 100644 index 000000000..65a698e0f --- /dev/null +++ b/cli/src/evmsigner.ts @@ -0,0 +1,205 @@ +// NOTE: This file is a copy of the file from the wormhole-sdk package. The only +// change is messing with the gas parameters, because the original hardcoded +// values underpriced BSC testnet transactions, and they would get stuck in the mempool. +// +// Obviously this is a very short term stopgap. At the least, the sdk should +// probably support overriding the default gas parameters, but ideally it should +// be able to estimate the gas price and set it dynamically. (is that possible? idk) +// +// NOTE: we should now be able to use https://github.com/wormhole-foundation/wormhole-sdk-ts/pull/583 (thanks @ben) +import type { + Network, + SignOnlySigner, + SignedTx, + Signer, + UnsignedTransaction, +} from '@wormhole-foundation/sdk-connect'; +import { + PlatformNativeSigner, + chainToPlatform, + isNativeSigner, +} from '@wormhole-foundation/sdk-connect'; +import { + EvmPlatform, + type EvmChains, + _platform +} from '@wormhole-foundation/sdk-evm'; +import type { + Signer as EthersSigner, + Provider, + TransactionRequest, +} from 'ethers'; +import { NonceManager, Wallet } from 'ethers'; + +export async function getEvmSigner( + rpc: Provider, + key: string | EthersSigner, + opts?: { + maxGasLimit?: bigint; + chain?: EvmChains; + debug?: boolean; + }, +): Promise { + const signer: EthersSigner = + typeof key === 'string' ? new Wallet(key, rpc) : key; + + const chain = opts?.chain ?? (await EvmPlatform.chainFromRpc(rpc))[1]; + const managedSigner = new NonceManager(signer); + + if (managedSigner.provider === null) { + try { + managedSigner.connect(rpc); + } catch (e) { + console.error('Cannot connect to network for signer', e); + } + } + + return new EvmNativeSigner( + chain, + await signer.getAddress(), + managedSigner, + opts, + ); +} + +// Get a SignOnlySigner for the EVM platform +export async function getEvmSignerForKey( + rpc: Provider, + privateKey: string, +): Promise { + return getEvmSigner(rpc, privateKey); +} + +// Get a SignOnlySigner for the EVM platform +export async function getEvmSignerForSigner( + signer: EthersSigner, +): Promise { + if (!signer.provider) throw new Error('Signer must have a provider'); + return getEvmSigner(signer.provider!, signer, {}); +} + +export class EvmNativeSigner + extends PlatformNativeSigner + implements SignOnlySigner +{ + constructor( + _chain: C, + _address: string, + _signer: EthersSigner, + readonly opts?: { maxGasLimit?: bigint; debug?: boolean }, + ) { + super(_chain, _address, _signer); + } + + chain(): C { + return this._chain; + } + + address(): string { + return this._address; + } + + async sign(tx: UnsignedTransaction[]): Promise { + const chain = this.chain(); + + const signed = []; + + // default gas limit + const gasLimit = chain === 'ArbitrumSepolia' + ? 4_000_000n + : this.opts?.maxGasLimit ?? 500_000n; + + + // TODO: DIFF STARTS HERE + + let gasPrice = 200_000_000_000n; // 200gwei + let maxFeePerGas = 6_000_000_000n; // 6gwei + let maxPriorityFeePerGas = 1000_000_000n; // 1gwei + + // Celo does not support this call + if (chain !== 'Celo') { + const feeData = await this._signer.provider!.getFeeData(); + gasPrice = feeData.gasPrice ?? gasPrice; + maxFeePerGas = feeData.maxFeePerGas ?? maxFeePerGas; + maxPriorityFeePerGas = + feeData.maxPriorityFeePerGas ?? maxPriorityFeePerGas; + } + + // Oasis throws malformed errors unless we + // set it to use legacy transaction parameters + const gasOpts = + chain === 'Oasis' + ? { + gasLimit, + gasPrice: gasPrice, + // Hardcode type + type: 0, + } + : { + gasPrice, + maxFeePerGas, + maxPriorityFeePerGas, + gasLimit, + }; + + // TODO: DIFF ENDS HERE + + for (const txn of tx) { + const { transaction, description } = txn; + if (this.opts?.debug) + console.log(`Signing: ${description} for ${this.address()}`); + + const t: TransactionRequest = { + ...transaction, + ...gasOpts, + from: this.address(), + nonce: await this._signer.getNonce(), + }; + + // try { + // const estimate = await this._signer.provider!.estimateGas(t); + // t.gasLimit = estimate + estimate / 10n; // Add 10% buffer + // if (this.opts?.maxGasLimit && t.gasLimit > this.opts?.maxGasLimit) { + // throw new Error( + // `Gas limit ${t.gasLimit} exceeds maxGasLimit ${this.opts?.maxGasLimit}`, + // ); + // } + // } catch (e) { + // console.info('Failed to estimate gas for transaction: ', e); + // console.info('Using gas limit: ', t.gasLimit); + // } + + signed.push(await this._signer.signTransaction(t)); + } + return signed; + } +} + +export function isEvmNativeSigner( + signer: Signer, +): signer is EvmNativeSigner { + return ( + isNativeSigner(signer) && + chainToPlatform(signer.chain()) === _platform && + isEthersSigner(signer.unwrap()) + ); +} + +// No type guard provided by ethers, instanceof checks will fail on even slightly different versions of ethers +function isEthersSigner(thing: any): thing is EthersSigner { + return ( + 'provider' in thing && + typeof thing.connect === 'function' && + typeof thing.getAddress === 'function' && + typeof thing.getNonce === 'function' && + typeof thing.populateCall === 'function' && + typeof thing.populateTransaction === 'function' && + typeof thing.estimateGas === 'function' && + typeof thing.call === 'function' && + typeof thing.resolveName === 'function' && + typeof thing.signTransaction === 'function' && + typeof thing.sendTransaction === 'function' && + typeof thing.signMessage === 'function' && + typeof thing.signTypedData === 'function' + ); +} diff --git a/cli/src/getSigner.ts b/cli/src/getSigner.ts new file mode 100644 index 000000000..67504f72a --- /dev/null +++ b/cli/src/getSigner.ts @@ -0,0 +1,106 @@ +import solana from "@wormhole-foundation/sdk/platforms/solana"; +import * as myEvmSigner from "./evmsigner.js"; +import { ChainContext, Wormhole, chainToPlatform, type Chain, type ChainAddress, type Network, type Signer } from "@wormhole-foundation/sdk"; +import { Keypair } from "@solana/web3.js"; +import fs from "fs"; +import { encoding } from '@wormhole-foundation/sdk-connect'; + +export type SignerType = "privateKey" | "ledger"; + +export type SignerSource = { + type: SignerType; + source: string; +}; + +// TODO: copied these from the examples. do they exist in the sdk? +export interface SignerStuff { + chain: ChainContext; + signer: Signer; + address: ChainAddress; + source: SignerSource; +} + +// arguments to pass to `forge` +export function forgeSignerArgs( + source: SignerSource, +): string { + let signerArgs + switch (source.type) { + case "privateKey": + signerArgs = `--private-key ${source.source}`; + break; + case "ledger": + signerArgs = `--ledger --mnemonic-derivation-paths "${source.source}"`; + break; + default: + throw new Error("Unsupported signer type"); + } + return signerArgs; +} + +export async function getSigner( + chain: ChainContext, + type: SignerType, + source?: string, + filePath?: string +): Promise> { + let signer: Signer; + const platform = chainToPlatform(chain.chain); + switch (platform) { + case "Solana": + switch (type) { + case "privateKey": + let privateKey: string; + if (filePath) { + // Read the private key from the file if filePath is provided + const keyPair = Keypair.fromSecretKey(new Uint8Array(JSON.parse(fs.readFileSync(filePath, 'utf8')))); + privateKey = encoding.b58.encode(keyPair.secretKey); + } else { + const privateKeySource = source ?? process.env.SOLANA_PRIVATE_KEY; + if (privateKeySource === undefined) { + throw new Error("Private key not provided and SOLANA_PRIVATE_KEY env var not set"); + } + privateKey = privateKeySource; + } + signer = await solana.getSigner( + await chain.getRpc(), + privateKey, + { debug: false } + ); + break; + case "ledger": + throw new Error("Ledger not yet supported on Solana"); + default: + throw new Error("Unsupported signer type"); + } + break; + case "Evm": + switch (type) { + case "privateKey": + source = source ?? process.env.ETH_PRIVATE_KEY; + if (source === undefined) { + throw new Error("ETH_PRIVATE_KEY env var not set"); + } + signer = await myEvmSigner.getEvmSigner( + await chain.getRpc(), + source, + { debug: true } + ); + break; + case "ledger": + throw new Error("Ledger not yet supported on Evm"); + default: + throw new Error("Unsupported signer type"); + } + break; + default: + throw new Error("Unrecognized platform: " + platform); + } + + return { + chain, + signer: signer as Signer, + address: Wormhole.chainAddress(chain.chain, signer.address()), + source: { type, source } + }; +} diff --git a/cli/src/index.ts b/cli/src/index.ts index dbd95ec8e..e5a57d599 100755 --- a/cli/src/index.ts +++ b/cli/src/index.ts @@ -1,68 +1,881 @@ #!/usr/bin/env bun +import "./side-effects"; // doesn't quite work for silencing the bigint error message. why? +import evm from "@wormhole-foundation/sdk/platforms/evm"; +import solana from "@wormhole-foundation/sdk/platforms/solana"; +import { encoding } from '@wormhole-foundation/sdk-connect'; +import { execSync } from "child_process"; + +import evmDeployFile from "../../evm/script/DeployWormholeNtt.s.sol" with { type: "file" }; +import evmDeployFileHelper from "../../evm/script/helpers/DeployWormholeNttBase.sol" with { type: "file" }; + +import chalk from "chalk"; import yargs from "yargs"; import { $ } from "bun"; import { hideBin } from "yargs/helpers"; +import { Connection, Keypair, PublicKey } from "@solana/web3.js"; +import * as spl from "@solana/spl-token"; +import fs from "fs"; +import readline from "readline"; +import { ChainContext, UniversalAddress, Wormhole, assertChain, canonicalAddress, chainToPlatform, chains, isNetwork, networks, platforms, signSendWait, toUniversal, type AccountAddress, type Chain, type ChainAddress, type ConfigOverrides, type Network, type Platform } from "@wormhole-foundation/sdk"; +import "@wormhole-foundation/sdk-evm-ntt"; +import "@wormhole-foundation/sdk-solana-ntt"; +import "@wormhole-foundation/sdk-definitions-ntt"; +import type { Ntt, NttTransceiver } from "@wormhole-foundation/sdk-definitions-ntt"; + +import { type SolanaChains, SolanaAddress } from "@wormhole-foundation/sdk-solana"; -type Network = "mainnet" | "testnet" | "devnet"; +import { colorizeDiff, diffObjects } from "./diff"; +import { forgeSignerArgs, getSigner, type SignerType } from "./getSigner"; +import { NTT, SolanaNtt } from "@wormhole-foundation/sdk-solana-ntt"; +import type { EvmNtt, EvmNttWormholeTranceiver } from "@wormhole-foundation/sdk-evm-ntt"; +import type { EvmChains } from "@wormhole-foundation/sdk-evm"; +import { getAvailableVersions, getGitTagName } from "./tag"; +import * as configuration from "./configuration"; +import { ethers } from "ethers"; + +// TODO: contract upgrades on solana +// TODO: set special relaying? +// TODO: currently, we just default all evm chains to standard relaying. should we not do that? what's a good way to configure this? + +// TODO: check if manager can mint the token in burning mode (on solana it's +// simple. on evm we need to simulate with prank) +const overrides: ConfigOverrides = (function () { + // read overrides.json file if exists + if (fs.existsSync("overrides.json")) { + console.error(chalk.yellow("Using overrides.json")); + return JSON.parse(fs.readFileSync("overrides.json").toString()); + } else { + return {}; + } +})(); + +export type Deployment = { + ctx: ChainContext, + ntt: Ntt, + whTransceiver: NttTransceiver, + decimals: number, + manager: ChainAddress, + config: { + remote?: ChainConfig, + local?: ChainConfig, + }, +} + +// TODO: rename +export type ChainConfig = { + version: string, + mode: Ntt.Mode, + paused: boolean, + owner: string, + pauser?: string, + manager: string, + token: string, + transceivers: { + threshold: number, + wormhole: { address: string, pauser?: string }, + }, + limits: { + outbound: string, + inbound: Partial<{ [C in Chain]: string }>, + } +} -// TODO: grab this from sdkv2 -export function assertNetwork(n: string): asserts n is Network { - if (n !== "mainnet" && n !== "testnet" && n !== "devnet") { - throw Error(`Unknown network: ${n}`); +export type Config = { + network: Network, + chains: Partial<{ + [C in Chain]: ChainConfig + }>, + defaultLimits?: { + outbound: string, } } -export const NETWORK_OPTIONS = { - alias: "n", - describe: "Network", - choices: ["mainnet", "testnet", "devnet"], - demandOption: true, +const options = { + network: { + alias: "n", + describe: "Network", + choices: networks, + demandOption: true, + }, + deploymentPath: { + alias: "p", + describe: "Path to the deployment file", + default: "deployment.json", + type: "string", + }, + yes: { + alias: "y", + describe: "Skip confirmation", + type: "boolean", + default: false, + }, + signerType: { + alias: "s", + describe: "Signer type", + type: "string", + choices: ["privateKey", "ledger"], + default: "privateKey", + }, + verbose: { + alias: "v", + describe: "Verbose output", + type: "boolean", + default: false, + }, + chain: { + describe: "Chain", + type: "string", + choices: chains, + demandOption: true, + }, + address: { + describe: "Address", + type: "string", + demandOption: true, + }, + local: { + describe: "Use the current local version for deployment (advanced).", + type: "boolean", + default: false, + }, + version: { + describe: "Version of NTT to deploy", + type: "string", + demandOption: false, + }, + latest: { + describe: "Use the latest version", + type: "boolean", + default: false, + }, + platform: { + describe: "Platform", + type: "string", + choices: platforms, + demandOption: true, + }, + skipVerify: + { + describe: "Skip contract verification", + type: "boolean", + default: false, + }, + payer: { + describe: "Path to the payer json file (Solana)", + type: "string", + }, } as const; + +// TODO: this is a temporary hack to allow deploying from main (as we only need +// the changes to the evm script) +async function withCustomEvmDeployerScript(pwd: string, then: () => Promise): Promise { + ensureNttRoot(pwd); + const overrides = [ + { path: `${pwd}/evm/script/DeployWormholeNtt.s.sol`, with: evmDeployFile }, + { path: `${pwd}/evm/script/helpers/DeployWormholeNttBase.sol`, with: evmDeployFileHelper }, + ] + for (const { path, with: withFile } of overrides) { + const old = `${path}.old`; + if (fs.existsSync(path)) { + fs.copyFileSync(path, old); + } + fs.copyFileSync(withFile, path); + } + try { + return await then() + } finally { + // restore old files + for (const { path } of overrides) { + const old = `${path}.old`; + if (fs.existsSync(old)) { + fs.copyFileSync(old, path); + fs.unlinkSync(old); + } + } + } +} + yargs(hideBin(process.argv)) + .wrap(Math.min(process.stdout.columns || 120, 160)) // Use terminal width, but no more than 160 characters .scriptName("ntt") - .command( - "solana", + .version((() => { + const ver = nttVersion(); + if (!ver) { + return "unknown"; + } + const { version, commit, path, remote } = ver; + const defaultPath = `${process.env.HOME}/.ntt-cli/.checkout`; + const remoteString = remote.includes("wormhole-foundation") ? "" : `${remote}@`; + if (path === defaultPath) { + return `ntt v${version} (${remoteString}${commit})`; + } else { + return `ntt v${version} (${remoteString}${commit}) from ${path}`; + } + })()) + // config group of commands + .command("config", + "configuration commands", + configuration.command + ) + .command("update", + "update the NTT CLI", + (yargs) => yargs + .option("path", { + describe: "Path to a local NTT repo to install from. If not specified, the latest version will be installed.", + type: "string", + }) + .option("branch", { + describe: "Git branch to install from", + type: "string", + }) + .option("repo", { + describe: "Git repository to install from", + type: "string", + }) + .example("$0 update", "Update the NTT CLI to the latest version") + .example("$0 update --path /path/to/ntt", "Update the NTT CLI from a local repo") + .example("$0 update --branch cli", "Update the NTT CLI to the cli branch"), + async (argv) => { + const localPath = argv["path"]; + if (localPath) { + if (argv["ref"]) { + console.error("Cannot specify both --path and --ref"); + process.exit(1); + } + if (argv["repo"]) { + console.error("Cannot specify both --path and --repo"); + process.exit(1); + } + await $`${localPath}/cli/install.sh`; + } else { + let branchArg = ""; + let repoArg = ""; + if (argv["branch"]) { + branchArg = `--branch ${argv["branch"]}`; + } + if (argv["repo"]) { + repoArg = `--repo ${argv["repo"]}`; + } + const installScript = "https://raw.githubusercontent.com/wormhole-foundation/example-native-token-transfers/cli/cli/install.sh"; + // save it to "$HOME/.ntt-cli/install.sh" + const nttDir = `${process.env.HOME}/.ntt-cli`; + const installer = `${nttDir}/install.sh`; + execSync(`mkdir -p ${nttDir}`); + execSync(`curl -s ${installScript} > ${installer}`); + execSync(`chmod +x ${installer}`); + execSync(`${installer} ${branchArg} ${repoArg}`, { stdio: "inherit" }); + } + }) + .command("new ", + "create a new NTT project", + (yargs) => yargs + .positional("path", { + describe: "Path to the project", + type: "string", + demandOption: true, + }) + .example("$0 new my-ntt-project", "Create a new NTT project in the 'my-ntt-project' directory"), + async (argv) => { + const git = execSync("git rev-parse --is-inside-work-tree || echo false", { + stdio: ["inherit", null, null] + }); + if (git.toString().trim() === "true") { + console.error("Already in a git repository"); + process.exit(1); + } + const path = argv["path"]; + await $`git clone -b main https://github.com/wormhole-foundation/example-native-token-transfers.git ${path}`; + }) + .command("add-chain ", + "add a chain to the deployment file", + (yargs) => yargs + .positional("chain", options.chain) + // TODO: add ability to specify manager address (then just pull the config) + // .option("manager", { + // describe: "Manager address", + // type: "string", + // }) + .option("program-key", { + describe: "Path to program key json (Solana)", + type: "string", + }) + .option("payer", { + describe: "Path to payer key json (Solana)", + type: "string", + }) + .option("binary", { + describe: "Path to program binary (.so file -- Solana)", + type: "string", + }) + .option("token", { + describe: "Token address", + type: "string", + }) + .option("mode", { + alias: "m", + describe: "Mode", + type: "string", + choices: ["locking", "burning"], + }) + .option("solana-priority-fee", { + describe: "Priority fee for Solana deployment (in microlamports)", + type: "number", + default: 50000, + }) + .option("signer-type", options.signerType) + .option("skip-verify", options.skipVerify) + .option("ver", options.version) + .option("latest", options.latest) + .option("local", options.local) + .option("path", options.deploymentPath) + .option("yes", options.yes) + .example("$0 add-chain Ethereum --token 0x1234... --mode burning --latest", "Add Ethereum chain with the latest contract version in burning mode") + .example("$0 add-chain Solana --token Sol1234... --mode locking --ver 1.0.0", "Add Solana chain with a specific contract version in locking mode") + .example("$0 add-chain Avalanche --token 0xabcd... --mode burning --local", "Add Avalanche chain using the local contract version"), + async (argv) => { + const path = argv["path"]; + const deployments: Config = loadConfig(path); + const chain: Chain = argv["chain"]; + const version = resolveVersion(argv["latest"], argv["ver"], argv["local"], chainToPlatform(chain)); + let mode = argv["mode"] as Ntt.Mode | undefined; + const signerType = argv["signer-type"] as SignerType; + const token = argv["token"]; + const network = deployments.network as Network; + + if (chain in deployments.chains) { + console.error(`Chain ${chain} already exists in ${path}`); + process.exit(1); + } + + validateChain(network, chain); + + const existsLocking = Object.values(deployments.chains).some((c) => c.mode === "locking"); + + if (existsLocking) { + if (mode && mode === "locking") { + console.error("Only one locking chain is allowed"); + process.exit(1); + } + mode = "burning"; + } + + if (!mode) { + console.error("Mode is required (use --mode)"); + process.exit(1); + } + + if (!token) { + console.error("Token is required (use --token)"); + process.exit(1); + } + + // let's deploy + + // TODO: factor out to function to get chain context + const wh = new Wormhole(network, [solana.Platform, evm.Platform], overrides); + const ch = wh.getChain(chain); + + // TODO: make manager configurable + const deployedManager = await deploy(version, mode, ch, token, signerType, !argv["skip-verify"], argv["yes"], argv["payer"], argv["program-key"], argv["binary"], argv["solana-priority-fee"]); + + const [config, _ctx, _ntt, decimals] = + await pullChainConfig(network, deployedManager, overrides); + + console.log("token decimals:", chalk.yellow(decimals)); + + deployments.chains[chain] = config; + fs.writeFileSync(path, JSON.stringify(deployments, null, 2)); + console.log(`Added ${chain} to ${path}`); + }) + .command("upgrade ", + "upgrade the contract on a specific chain", + (yargs) => yargs + .positional("chain", options.chain) + .option("ver", options.version) + .option("latest", { + describe: "Use the latest version", + type: "boolean", + default: false, + }) + .option("local", options.local) + .option("signer-type", options.signerType) + .option("skip-verify", options.skipVerify) + .option("path", options.deploymentPath) + .option("yes", options.yes) + .option("payer", { + describe: "Path to payer key json (Solana)", + type: "string", + }) + .option("program-key", { + describe: "Path to program key json (Solana)", + type: "string", + }) + .option("binary", { + describe: "Path to program binary (.so file -- Solana)", + type: "string", + }) + .example("$0 upgrade Ethereum --latest", "Upgrade the Ethereum contract to the latest version") + .example("$0 upgrade Solana --ver 1.1.0", "Upgrade the Solana contract to version 1.1.0") + .example("$0 upgrade Polygon --local --skip-verify", "Upgrade the Polygon contract using the local version, skipping explorer bytecode verification"), + async (argv) => { + const path = argv["path"]; + const deployments: Config = loadConfig(path); + const chain: Chain = argv["chain"]; + const signerType = argv["signer-type"] as SignerType; + const network = deployments.network as Network; + + if (!(chain in deployments.chains)) { + console.error(`Chain ${chain} not found in ${path}`); + process.exit(1); + } + + const chainConfig = deployments.chains[chain]!; + const currentVersion = chainConfig.version; + const platform = chainToPlatform(chain); + + const toVersion = resolveVersion(argv["latest"], argv["ver"], argv["local"], platform); + + if (argv["local"]) { + await warnLocalDeployment(argv["yes"]); + } + + if (toVersion === currentVersion && !argv["local"]) { + console.log(`Chain ${chain} is already at version ${currentVersion}`); + process.exit(0); + } + + console.log(`Upgrading ${chain} from version ${currentVersion} to ${toVersion || 'local version'}`); + + if (!argv["yes"]) { + await askForConfirmation(); + } + + const wh = new Wormhole(network, [solana.Platform, evm.Platform], overrides); + const ch = wh.getChain(chain); + + const [_, ctx, ntt] = await pullChainConfig( + network, + { chain, address: toUniversal(chain, chainConfig.manager) }, + overrides + ); + + await upgrade( + currentVersion, + toVersion, + ntt, + ctx, + signerType, + !argv["skip-verify"], + argv["payer"], + argv["program-key"], + argv["binary"] + ); + + // reinit the ntt object to get the new version + // TODO: is there an easier way to do this? + const { ntt: upgraded } = await nttFromManager(ch, chainConfig.manager); + + chainConfig.version = getVersion(chain, upgraded) + fs.writeFileSync(path, JSON.stringify(deployments, null, 2)); + + console.log(`Successfully upgraded ${chain} to version ${toVersion || 'local version'}`); + } + ) + .command("clone
", + "initialize a deployment file from an existing contract", + (yargs) => yargs + .positional("network", options.network) + .positional("chain", options.chain) + .positional("address", options.address) + .option("path", options.deploymentPath) + .option("verbose", options.verbose) + .example("$0 clone Testnet Ethereum 0x5678...", "Clone an existing Ethereum deployment on Testnet") + .example("$0 clone Mainnet Solana Sol5678... --path custom-clone.json", "Clone an existing Solana deployment on Mainnet to a custom file"), + async (argv) => { + if (!isNetwork(argv["network"])) { + console.error("Invalid network"); + process.exit(1); + } + + const path = argv["path"]; + const verbose = argv["verbose"]; + // check if the file exists + if (fs.existsSync(path)) { + console.error(`Deployment file already exists at ${path}`); + process.exit(1); + } + + // step 1. grab the config + // step 2. discover registrations + // step 3. grab registered peer configs + // + // NOTE: we don't recursively grab peer configs. This means the + // discovered peers will be the ones that are directly registered with + // the starting manager (the one we're cloning). + // For example, if we're cloning manager A, and it's registered with + // B, and B is registered with C, but C is not registered with A, then + // C will not be included in the cloned deployment. + // We could do peer discovery recursively but that would be a lot + // slower, since peer discovery is already O(n) in the number of + // supported chains (50+), because there is no way to enumerate the peers, so we + // need to query all possible chains to see if they're registered. + + const chain = argv["chain"]; + assertChain(chain) + + const manager = argv["address"]; + const network = argv["network"]; + + const universalManager = toUniversal(chain, manager); + + const ntts: Partial<{ [C in Chain]: Ntt }> = {}; + + const [config, _ctx, ntt, _decimals] = + await pullChainConfig(network, { chain, address: universalManager }, overrides); + + ntts[chain] = ntt as any; + + const configs: Partial<{ [C in Chain]: ChainConfig }> = { + [chain]: config, + } + + // discover peers + let count = 0; + for (const c of chains) { + process.stdout.write(`[${count}/${chains.length - 1}] Fetching peer config for ${c}`); + await new Promise((resolve) => setTimeout(resolve, 100)); + count++; + + const peer = await retryWithExponentialBackoff(() => ntt.getPeer(c), 5, 5000); + + process.stdout.write(`\n`); + if (peer === null) { + continue; + } + const address: UniversalAddress = peer.address.address.toUniversalAddress() + const [peerConfig, _ctx, peerNtt] = await pullChainConfig(network, { chain: c, address }, overrides); + ntts[c] = peerNtt as any; + configs[c] = peerConfig; + } + + // sort chains by name + const sorted = Object.fromEntries(Object.entries(configs).sort(([a], [b]) => a.localeCompare(b))); + + // sleep for a bit to avoid rate limiting when making the getDecimals call + // this can happen when the last we hit the rate limit just in the last iteration of the loop above. + // (happens more often than you'd think, because the rate limiter + // gets more aggressive after each hit) + await new Promise((resolve) => setTimeout(resolve, 2000)); + + // now loop through the chains, and query their peer information to get the inbound limits + await pullInboundLimits(ntts, sorted, verbose) + + const deployment: Config = { + network: argv["network"], + chains: sorted, + }; + fs.writeFileSync(path, JSON.stringify(deployment, null, 2)); + }) + .command("init ", + "initialize a deployment file", + (yargs) => yargs + .positional("network", options.network) + .option("path", options.deploymentPath) + .example("$0 init Testnet", "Initialize a new deployment file for the Testnet network") + .example("$0 init Mainnet --path custom.json", "Initialize a new deployment file for Mainnet with a custom file name"), + async (argv) => { + if (!isNetwork(argv["network"])) { + console.error("Invalid network"); + process.exit(1); + } + const deployment = { + network: argv["network"], + chains: {}, + }; + const path = argv["path"]; + // check if the file exists + if (fs.existsSync(path)) { + console.error(`Deployment file already exists at ${path}. Specify a different path with --path`); + process.exit(1); + } + fs.writeFileSync(path, JSON.stringify(deployment, null, 2)); + }) + .command("pull", + "pull the remote configuration", + (yargs) => yargs + .option("path", options.deploymentPath) + .option("yes", options.yes) + .option("verbose", options.verbose) + .example("$0 pull", "Pull the latest configuration from the blockchain for all chains") + .example("$0 pull --yes", "Pull the latest configuration and apply changes without confirmation"), + async (argv) => { + const deployments: Config = loadConfig(argv["path"]); + const verbose = argv["verbose"]; + const network = deployments.network as Network; + const path = argv["path"]; + const deps: Partial<{ [C in Chain]: Deployment }> = await pullDeployments(deployments, network, verbose); + + let changed = false; + for (const [chain, deployment] of Object.entries(deps)) { + assertChain(chain); + const diff = diffObjects(deployments.chains[chain]!, deployment.config.remote!); + if (Object.keys(diff).length !== 0) { + console.error(chalk.reset(colorizeDiff({ [chain]: diff }))); + changed = true; + deployments.chains[chain] = deployment.config.remote! + } + } + if (!changed) { + console.log(`${path} is already up to date`); + process.exit(0); + } + + if (!argv["yes"]) { + await askForConfirmation(); + } + fs.writeFileSync(path, JSON.stringify(deployments, null, 2)); + console.log(`Updated ${path}`); + }) + .command("push", + "push the local configuration", + (yargs) => yargs + .option("path", options.deploymentPath) + .option("yes", options.yes) + .option("signer-type", options.signerType) + .option("verbose", options.verbose) + .option("skip-verify", options.skipVerify) + .option("payer", options.payer) + .example("$0 push", "Push local configuration changes to the blockchain") + .example("$0 push --signer-type ledger", "Push changes using a Ledger hardware wallet for signing") + .example("$0 push --skip-verify", "Push changes without verifying contracts on EVM chains") + .example("$0 push --payer ", "Path to the payer json file (Solana), instead of setting SOLANA_PRIVATE_KEY env variable"), + async (argv) => { + const deployments: Config = loadConfig(argv["path"]); + const verbose = argv["verbose"]; + const network = deployments.network as Network; + const deps: Partial<{ [C in Chain]: Deployment }> = await pullDeployments(deployments, network, verbose); + const signerType = argv["signer-type"] as SignerType; + const payerPath = argv["payer"]; + + const missing = await missingConfigs(deps, verbose); + + if (checkConfigErrors(deps)) { + console.error("There are errors in the config file. Please fix these before continuing."); + process.exit(1); + } + + for (const [chain, missingConfig] of Object.entries(missing)) { + assertChain(chain); + const ntt = deps[chain]!.ntt; + const ctx = deps[chain]!.ctx; + const signer = await getSigner(ctx, signerType, undefined, payerPath); + for (const manager of missingConfig.managerPeers) { + const tx = ntt.setPeer(manager.address, manager.tokenDecimals, manager.inboundLimit, signer.address.address) + await signSendWait(ctx, tx, signer.signer) + } + for (const transceiver of missingConfig.transceiverPeers) { + const tx = ntt.setWormholeTransceiverPeer(transceiver, signer.address.address) + await signSendWait(ctx, tx, signer.signer) + } + for (const evmChain of missingConfig.evmChains) { + const tx = (await ntt.getTransceiver(0) as EvmNttWormholeTranceiver).setIsEvmChain(evmChain, true) + await signSendWait(ctx, tx, signer.signer) + } + for (const relaying of missingConfig.standardRelaying) { + const tx = (await ntt.getTransceiver(0) as EvmNttWormholeTranceiver).setIsWormholeRelayingEnabled(relaying, true) + await signSendWait(ctx, tx, signer.signer) + } + if (missingConfig.solanaWormholeTransceiver) { + if (chainToPlatform(chain) !== "Solana") { + console.error("Solana wormhole transceiver can only be set on Solana chains"); + continue; + } + const solanaNtt = ntt as SolanaNtt; + const tx = solanaNtt.registerTransceiver({ + payer: signer.address.address as AccountAddress, + owner: signer.address.address as AccountAddress, + transceiver: solanaNtt.program.programId + }) + try { + await signSendWait(ctx, tx, signer.signer) + } catch (e: any) { + console.error(e.logs); + } + } + if (missingConfig.solanaUpdateLUT) { + if (chainToPlatform(chain) !== "Solana") { + console.error("Solana update LUT can only be set on Solana chains"); + continue; + } + const solanaNtt = ntt as SolanaNtt; + const tx = solanaNtt.initializeOrUpdateLUT({ payer: new SolanaAddress(signer.address.address).unwrap() }) + try { + await signSendWait(ctx, tx, signer.signer) + } catch (e: any) { + console.error(e.logs); + } + } + } + + // pull deps again + const depsAfterRegistrations: Partial<{ [C in Chain]: Deployment }> = await pullDeployments(deployments, network, verbose); + + for (const [chain, deployment] of Object.entries(depsAfterRegistrations)) { + assertChain(chain); + await pushDeployment(deployment as any, signerType, !argv["skip-verify"], argv["yes"], payerPath); + } + }) + .command("status", + "check the status of the deployment", + (yargs) => yargs + .option("path", options.deploymentPath) + .option("verbose", options.verbose) + .example("$0 status", "Check the status of the deployment across all chains") + .example("$0 status --verbose", "Check the status with detailed output"), + async (argv) => { + const path = argv["path"]; + const verbose = argv["verbose"]; + // TODO: I don't like the variable names here + const deployments: Config = loadConfig(path); + + const network = deployments.network as Network; + + let deps: Partial<{ [C in Chain]: Deployment }> = await pullDeployments(deployments, network, verbose); + + let fixable = 0; + + const extraInfo: any = {}; + + if (checkConfigErrors(deps)) { + console.error("There are errors in the config file. Please fix these before continuing."); + process.exit(1); + } + + // diff remote and local configs + for (const [chain, deployment] of Object.entries(deps)) { + assertChain(chain); + const local = deployment.config.local; + const remote = deployment.config.remote; + const a = { [chain]: local! }; + const b = { [chain]: remote! }; + + const diff = diffObjects(a, b); + if (Object.keys(diff).length !== 0) { + console.error(chalk.reset(colorizeDiff(diff))); + fixable++; + } + + if (verbose) { + const immutables = await getImmutables(chain, deployment.ntt); + if (immutables) { + extraInfo[chain] = immutables; + } + const pdas = await getPdas(chain, deployment.ntt); + if (pdas) { + extraInfo[chain] = pdas; + } + } + } + + if (Object.keys(extraInfo).length > 0) { + console.log(chalk.yellow(JSON.stringify(extraInfo, null, 2))); + } + + // verify peers + const missing = await missingConfigs(deps, verbose); + + if (Object.keys(missing).length > 0) { + fixable++; + } + + for (const [chain, missingConfig] of Object.entries(missing)) { + console.error(`${chain} status:`); + for (const manager of missingConfig.managerPeers) { + console.error(` Missing manager peer: ${manager.address.chain}`); + } + for (const transceiver of missingConfig.transceiverPeers) { + console.error(` Missing transceiver peer: ${transceiver.chain}`); + } + for (const evmChain of missingConfig.evmChains) { + console.error(` ${evmChain} needs to be configured as an EVM chain`); + } + for (const relaying of missingConfig.standardRelaying) { + console.warn(` No standard relaying: ${relaying}`); + } + if (missingConfig.solanaWormholeTransceiver) { + console.error(" Missing Solana wormhole transceiver"); + } + if (missingConfig.solanaUpdateLUT) { + console.error(" Missing or outdated LUT"); + } + } + + if (fixable > 0) { + console.error("Run `ntt pull` to pull the remote configuration (overwriting the local one)"); + console.error("Run `ntt push` to push the local configuration (overwriting the remote one) by executing the necessary transactions"); + process.exit(1); + } else { + console.log(`${path} is up to date with the on-chain configuration.`); + process.exit(0); + } + }) + .command("solana", "Solana commands", (yargs) => { yargs - .command( - "deploy", - "deploy the solana program", - (yargs) => yargs.option("network", NETWORK_OPTIONS), + .command("key-base58 ", + "print private key in base58", + (yargs) => yargs + .positional("keypair", { + describe: "Path to keypair.json", + type: "string", + demandOption: true, + }), (argv) => { - throw new Error("Not implemented"); + const keypair = Keypair.fromSecretKey(new Uint8Array(JSON.parse(fs.readFileSync(argv["keypair"]).toString()))); + console.log(encoding.b58.encode(keypair.secretKey)); }) - .command( - "upgrade", - "upgrade the solana program", + .command("token-authority ", + "print the token authority address for a given program ID", (yargs) => yargs - .option("network", NETWORK_OPTIONS) - .option("dir", { - alias: "d", - describe: "Path to the solana workspace", - default: ".", - demandOption: false, + .positional("programId", { + describe: "Program ID", type: "string", + demandOption: true, + }), + (argv) => { + const programId = new PublicKey(argv["programId"]); + const tokenAuthority = NTT.pdas(programId).tokenAuthority(); + console.log(tokenAuthority.toBase58()); + }) + .command("ata ", + "print the token authority address for a given program ID", + (yargs) => yargs + .positional("mint", { + describe: "Mint address", + type: "string", + demandOption: true, }) - .option("keypair", { - alias: "k", - describe: "Path to the keypair", + .positional("owner", { + describe: "Owner address", + type: "string", demandOption: true, + }) + .positional("tokenProgram", { + describe: "Token program ID", type: "string", + choices: ["legacy", "token22"], + demandOption: true, }), - async (argv) => { - // TODO: the hardcoded stuff should be factored out once - // we support other networks and programs - // TODO: currently the keypair is the upgrade authority. we should support governance program too - const network = argv.network; - const keypair = argv.keypair; - const dir = argv.dir; - const objectFile = "example_native_token_transfers.so"; - const programId = "nttiK1SepaQt6sZ4WGW5whvc9tEnGXGxuKeptcQPCcS"; - assertNetwork(network); - await $`cargo build-sbf --manifest-path=${dir}/Cargo.toml --no-default-features --features "${cargoNetworkFeature(network)}"` - await $`solana program deploy --program-id ${programId} ${dir}/target/deploy/${objectFile} --keypair ${keypair} -u ${solanaMoniker(network)}` + (argv) => { + const mint = new PublicKey(argv["mint"]); + const owner = new PublicKey(argv["owner"]); + const tokenProgram = argv["tokenProgram"] === "legacy" + ? spl.TOKEN_PROGRAM_ID + : spl.TOKEN_2022_PROGRAM_ID + const ata = spl.getAssociatedTokenAddressSync(mint, owner, true, tokenProgram); + console.log(ata.toBase58()); }) .demandCommand() } @@ -72,25 +885,1159 @@ yargs(hideBin(process.argv)) .demandCommand() .parse(); +// Implicit configuration that's missing from a contract deployment. These are +// implicit in the sense that they don't need to be explicitly set in the +// deployment file. +// For example, all managers and transceivers need to be registered with each other. +// Additionally, the EVM chains need to be registered as such, and the standard relaying +// needs to be enabled for all chains where this is supported. +type MissingImplicitConfig = { + managerPeers: Ntt.Peer[]; + transceiverPeers: ChainAddress[]; + evmChains: Chain[]; + standardRelaying: Chain[]; + solanaWormholeTransceiver: boolean; + solanaUpdateLUT: boolean; +} + +function checkConfigErrors(deps: Partial<{ [C in Chain]: Deployment }>): number { + let fatal = 0; + for (const [chain, deployment] of Object.entries(deps)) { + assertChain(chain); + const config = deployment.config.local!; + if (!checkNumberFormatting(config.limits.outbound, deployment.decimals)) { + console.error(`ERROR: ${chain} has an outbound limit (${config.limits.outbound}) with the wrong number of decimals. The number should have ${deployment.decimals} decimals.`); + fatal++; + } + if (config.limits.outbound === formatNumber(0n, deployment.decimals)) { + console.warn(chalk.yellow(`${chain} has an outbound limit of 0`)); + } + for (const [c, limit] of Object.entries(config.limits.inbound)) { + if (!checkNumberFormatting(limit, deployment.decimals)) { + console.error(`ERROR: ${chain} has an inbound limit with the wrong number of decimals for ${c} (${limit}). The number should have ${deployment.decimals} decimals.`); + fatal++; + } + if (limit === formatNumber(0n, deployment.decimals)) { + console.warn(chalk.yellow(`${chain} has an inbound limit of 0 from ${c}`)); + } + } + } + return fatal; +} + +function createWorkTree(platform: Platform, version: string): string { + const tag = getGitTagName(platform, version); + if (!tag) { + console.error(`No tag found matching ${version} for ${platform}`); + process.exit(1); + } + + const worktreeName = `.deployments/${platform}-${version}`; + + if (fs.existsSync(worktreeName)) { + console.log(chalk.yellow(`Worktree already exists at ${worktreeName}. Resetting to ${tag}`)); + execSync(`git -C ${worktreeName} reset --hard ${tag}`, { + stdio: "inherit" + }); + } else { + // create worktree + execSync(`git worktree add ${worktreeName} ${tag}`, { + stdio: "inherit" + }); + } + + // NOTE: we create this symlink whether or not the file exists. + // this way, if it's created later, the symlink will be correct + execSync(`ln -fs $(pwd)/overrides.json $(pwd)/${worktreeName}/overrides.json`, { + stdio: "inherit" + }); + + console.log(chalk.green(`Created worktree at ${worktreeName} from tag ${tag}`)); + return worktreeName; +} + +async function upgrade( + _fromVersion: string, + toVersion: string | null, + ntt: Ntt, + ctx: ChainContext, + signerType: SignerType, + evmVerify: boolean, + solanaPayer?: string, + solanaProgramKeyPath?: string, + solanaBinaryPath?: string +): Promise { + // TODO: check that fromVersion is safe to upgrade to toVersion from + const platform = chainToPlatform(ctx.chain); + const worktree = toVersion ? createWorkTree(platform, toVersion) : "."; + switch (platform) { + case "Evm": + const evmNtt = ntt as EvmNtt; + const evmCtx = ctx as ChainContext; + return upgradeEvm(worktree, evmNtt, evmCtx, signerType, evmVerify); + case "Solana": + if (solanaPayer === undefined || !fs.existsSync(solanaPayer)) { + console.error("Payer not found. Specify with --payer"); + process.exit(1); + } + const solanaNtt = ntt as SolanaNtt; + const solanaCtx = ctx as ChainContext; + return upgradeSolana(worktree, toVersion, solanaNtt, solanaCtx, solanaPayer, solanaProgramKeyPath, solanaBinaryPath); + default: + throw new Error("Unsupported platform"); + } +} + +async function upgradeEvm( + pwd: string, + ntt: EvmNtt, + ctx: ChainContext, + signerType: SignerType, + evmVerify: boolean +): Promise { + ensureNttRoot(pwd); + + console.log("Upgrading EVM chain", ctx.chain); + + const signer = await getSigner(ctx, signerType); + const signerArgs = forgeSignerArgs(signer.source); + + console.log("Installing forge dependencies...") + execSync("forge install", { + cwd: `${pwd}/evm`, + stdio: "pipe" + }); + + let verifyArgs: string = ""; + if (evmVerify) { + // TODO: verify etherscan api key? + const etherscanApiKey = configuration.get(ctx.chain, "scan_api_key", { reportError: true }) + if (!etherscanApiKey) { + process.exit(1); + } + verifyArgs = `--verify --etherscan-api-key ${etherscanApiKey}`; + } + + console.log("Upgrading manager..."); + await withCustomEvmDeployerScript(pwd, async () => { + execSync( + `forge script --via-ir script/DeployWormholeNtt.s.sol \ +--rpc-url ${ctx.config.rpc} \ +--sig "upgrade(address)" \ +${ntt.managerAddress} \ +${signerArgs} \ +--broadcast \ +${verifyArgs} | tee last-run.stdout`, { + cwd: `${pwd}/evm`, + stdio: "inherit" + }); + }); + +} + +async function upgradeSolana( + pwd: string, + version: string | null, + ntt: SolanaNtt, + ctx: ChainContext, + payer: string, + programKeyPath?: string, + binaryPath?: string +): Promise { + if (version === null) { + throw new Error("Cannot upgrade Solana to local version"); // TODO: this is not hard to enabled + } + const mint = (await (ntt.getConfig())).mint; + await deploySolana(pwd, version, await ntt.getMode(), ctx, mint.toBase58(), payer, false, programKeyPath, binaryPath); + // TODO: call initializeOrUpdateLUT. currently it's done in the following 'ntt push' step. +} + +async function deploy( + version: string | null, + mode: Ntt.Mode, + ch: ChainContext, + token: string, + signerType: SignerType, + evmVerify: boolean, + yes: boolean, + solanaPayer?: string, + solanaProgramKeyPath?: string, + solanaBinaryPath?: string, + solanaPriorityFee?: number +): Promise> { + if (version === null) { + await warnLocalDeployment(yes); + } + const platform = chainToPlatform(ch.chain); + const worktree = version ? createWorkTree(platform, version) : "."; + switch (platform) { + case "Evm": + return await deployEvm(worktree, mode, ch, token, signerType, evmVerify); + case "Solana": + if (solanaPayer === undefined || !fs.existsSync(solanaPayer)) { + console.error("Payer not found. Specify with --payer"); + process.exit(1); + } + const solanaCtx = ch as ChainContext; + return await deploySolana(worktree, version, mode, solanaCtx, token, solanaPayer, true, solanaProgramKeyPath, solanaBinaryPath, solanaPriorityFee) as ChainAddress; + default: + throw new Error("Unsupported platform"); + } +} + +async function deployEvm( + pwd: string, + mode: Ntt.Mode, + ch: ChainContext, + token: string, + signerType: SignerType, + verify: boolean, +): Promise> { + ensureNttRoot(pwd); + + const wormhole = ch.config.contracts.coreBridge; + if (!wormhole) { + console.error("Core bridge not found"); + process.exit(1); + } + const relayer = ch.config.contracts.relayer; + if (!relayer) { + console.error("Relayer not found"); + process.exit(1); + } + + const rpc = ch.config.rpc; + const specialRelayer = "0x63BE47835c7D66c4aA5B2C688Dc6ed9771c94C74"; // TODO: how to configure this? + + const provider = new ethers.JsonRpcProvider(rpc); + const abi = ["function decimals() external view returns (uint8)"]; + const tokenContract = new ethers.Contract(token, abi, provider); + const decimals: number = await tokenContract.decimals(); + + // TODO: should actually make these ENV variables. + const sig = "run(address,address,address,address,uint8,uint8)"; + const modeUint = mode === "locking" ? 0 : 1; + const signer = await getSigner(ch, signerType); + const signerArgs = forgeSignerArgs(signer.source); + + // TODO: verify etherscan api key? + let verifyArgs: string[] = []; + if (verify) { + const etherscanApiKey = configuration.get(ch.chain, "scan_api_key", { reportError: true }) + if (!etherscanApiKey) { + process.exit(1); + } + verifyArgs = ["--verify", "--etherscan-api-key", etherscanApiKey] + } + + console.log("Installing forge dependencies...") + execSync("forge install", { + cwd: `${pwd}/evm`, + stdio: "pipe" + }); + + console.log("Deploying manager..."); + const deploy = async (simulate: boolean): Promise => { + const simulateArg = simulate ? "" : "--skip-simulation"; + await withCustomEvmDeployerScript(pwd, async () => { + try { + execSync(` +forge script --via-ir script/DeployWormholeNtt.s.sol \ +--rpc-url ${rpc} \ +${simulateArg} \ +--sig "${sig}" ${wormhole} ${token} ${relayer} ${specialRelayer} ${decimals} ${modeUint} \ +--broadcast ${verifyArgs.join(' ')} ${signerArgs} 2>&1 | tee last-run.stdout`, { + cwd: `${pwd}/evm`, + encoding: 'utf8', + stdio: 'inherit' + }); + } catch (error) { + console.error("Failed to deploy manager"); + // NOTE: we don't exit here. instead, we check if the manager was + // deployed successfully (below) and proceed if it was. + // process.exit(1); + } + }); + return fs.readFileSync(`${pwd}/evm/last-run.stdout`).toString(); + } + + // we attempt to deploy with simulation first, then without if it fails + let out = await deploy(true); + if (out.includes("Simulated execution failed")) { + if (out.includes("NotActivated")) { + console.error("Simulation failed, likely because the token contract is compiled against a different EVM version. It's probably safe to continue without simulation.") + await askForConfirmation("Do you want to proceed with the deployment without simulation?"); + } else { + console.error("Simulation failed. Please read the error message carefully, and proceed with caution."); + await askForConfirmation("Do you want to proceed with the deployment without simulation?"); + } + out = await deploy(false); + } + + if (!out) { + console.error("Failed to deploy manager"); + process.exit(1); + } + const logs = out.split("\n").map((l) => l.trim()).filter((l) => l.length > 0); + const manager = logs.find((l) => l.includes("NttManager: 0x"))?.split(" ")[1]; + if (!manager) { + console.error("Manager not found"); + process.exit(1); + } + const universalManager = toUniversal(ch.chain, manager); + return { chain: ch.chain, address: universalManager }; +} + +async function deploySolana( + pwd: string, + version: string | null, + mode: Ntt.Mode, + ch: ChainContext, + token: string, + payer: string, + initialize: boolean, + managerKeyPath?: string, + binaryPath?: string, + priorityFee?: number +): Promise> { + ensureNttRoot(pwd); + + // TODO: if the binary is provided, we should not check addresses in the source tree. (so we should move around the control flow a bit) + // TODO: factor out some of this into separate functions to help readability of this function (maybe even move to a different file) + + const wormhole = ch.config.contracts.coreBridge; + if (!wormhole) { + console.error("Core bridge not found"); + process.exit(1); + } + + // grep example_native_token_transfers = ".*" + // in solana/Anchor.toml + // TODO: what if they rename the program? + const existingProgramId = fs.readFileSync(`${pwd}/solana/Anchor.toml`).toString().match(/example_native_token_transfers = "(.*)"/)?.[1]; + if (!existingProgramId) { + console.error("Program ID not found in Anchor.toml (looked for example_native_token_transfers = \"(.*)\")"); + process.exit(1); + } + + let programKeypairPath; + let programKeypair; + + if (managerKeyPath) { + if (!fs.existsSync(managerKeyPath)) { + console.error(`Program keypair not found: ${managerKeyPath}`); + process.exit(1); + } + programKeypairPath = managerKeyPath; + programKeypair = Keypair.fromSecretKey(new Uint8Array(JSON.parse(fs.readFileSync(managerKeyPath).toString()))); + } else { + const programKeyJson = `${existingProgramId}.json`; + if (!fs.existsSync(programKeyJson)) { + console.error(`Program keypair not found: ${programKeyJson}`); + console.error("Run `solana-keygen` to create a new keypair (either with 'new', or with 'grind'), and pass it to this command with --program-key"); + console.error("For example: solana-keygen grind --starts-with ntt:1 --ignore-case") + process.exit(1); + } + programKeypairPath = programKeyJson; + programKeypair = Keypair.fromSecretKey(new Uint8Array(JSON.parse(fs.readFileSync(programKeyJson).toString()))); + if (existingProgramId !== programKeypair.publicKey.toBase58()) { + console.error(`The private key in ${programKeyJson} does not match the existing program ID: ${existingProgramId}`); + process.exit(1); + } + } + + // see if the program key matches the existing program ID. if not, we need + // to update the latter in the Anchor.toml file and the lib.rs file(s) + const providedProgramId = programKeypair.publicKey.toBase58(); + if (providedProgramId !== existingProgramId) { + // only ask for confirmation if the current directory is ".". if it's + // something else (a worktree) then it's a fresh checkout and we just + // override the address anyway. + if (pwd === ".") { + console.error(`Program keypair does not match the existing program ID: ${existingProgramId}`); + await askForConfirmation(`Do you want to update the program ID in the Anchor.toml file and the lib.rs file to ${providedProgramId}?`); + } + + const anchorTomlPath = `${pwd}/solana/Anchor.toml`; + const libRsPath = `${pwd}/solana/programs/example-native-token-transfers/src/lib.rs`; + + const anchorToml = fs.readFileSync(anchorTomlPath).toString(); + const newAnchorToml = anchorToml.replace(existingProgramId, providedProgramId); + fs.writeFileSync(anchorTomlPath, newAnchorToml); + const libRs = fs.readFileSync(libRsPath).toString(); + const newLibRs = libRs.replace(existingProgramId, providedProgramId); + fs.writeFileSync(libRsPath, newLibRs); + } + + + // First we check that the provided mint's mint authority is the program's token authority PDA when in burning mode. + // This is checked in the program initialiser anyway, but we can save some + // time by checking it here and failing early (not to mention better + // diagnostics). + + const emitter = NTT.pdas(providedProgramId).emitterAccount().toBase58(); + const payerKeypair = Keypair.fromSecretKey(new Uint8Array(JSON.parse(fs.readFileSync(payer).toString()))); + + // this is not super pretty... I want to initialise the 'ntt' object, but + // because it's not deployed yet, fetching the version will fail, and thus default to whatever the default version is. + // We want to use the correct version (because the sdk's behaviour depends on it), so we first create a dummy ntt instance, + // let that fill in all the necessary fields, and then create a new instance with the correct version. + // It should be possible to avoid this dummy object and just instantiate 'SolanaNtt' directly, but I wasn't + // sure where the various pieces are plugged together and this seemed easier. + // TODO: refactor this to avoid the dummy object + const dummy: SolanaNtt = await ch.getProtocol("Ntt", { + ntt: { + manager: providedProgramId, + token: token, + transceiver: { wormhole: emitter }, + } + }) as SolanaNtt; + + const ntt: SolanaNtt = new SolanaNtt( + dummy.network, + dummy.chain, + dummy.connection, + dummy.contracts, + version ?? undefined + ); + + // get the mint authority of 'token' + const tokenMint = new PublicKey(token); + // const tokenInfo = await ch.connection.getTokenInfo(tokenMint); + const connection: Connection = await ch.getRpc(); + const mintInfo = await connection.getAccountInfo(tokenMint) + if (!mintInfo) { + console.error(`Mint ${token} not found on ${ch.chain} ${ch.network}`); + process.exit(1); + } + const mint = spl.unpackMint(tokenMint, mintInfo, mintInfo.owner); + + if (mode === "burning") { + const expectedMintAuthority = ntt.pdas.tokenAuthority().toBase58(); + const actualMintAuthority: string | null = mint.mintAuthority?.toBase58() ?? null; + if (actualMintAuthority !== expectedMintAuthority) { + console.error(`Mint authority mismatch for ${token}`); + console.error(`Expected: ${expectedMintAuthority}`); + console.error(`Actual: ${actualMintAuthority}`); + console.error(`Set the mint authority to the program's token authority PDA with e.g.:`); + console.error(`spl-token authorize ${token} mint ${expectedMintAuthority}`); + process.exit(1); + } + } + + let binary: string; + + const skipDeploy = false; + + if (!skipDeploy) { + if (binaryPath) { + binary = binaryPath; + } else { + // build the program + // TODO: build with docker + checkAnchorVersion(); + const proc = Bun.spawn( + ["anchor", + "build", + "-p", "example_native_token_transfers", + "--", "--no-default-features", "--features", cargoNetworkFeature(ch.network) + ], { + cwd: `${pwd}/solana` + }); + + // const _out = await new Response(proc.stdout).text(); + + await proc.exited; + if (proc.exitCode !== 0) { + process.exit(proc.exitCode ?? 1); + } + + binary = `${pwd}/solana/target/deploy/example_native_token_transfers.so`; + } + + + await checkSolanaBinary(binary, wormhole, providedProgramId, version ?? undefined) + + // if buffer.json doesn't exist, create it + if (!fs.existsSync(`buffer.json`)) { + execSync(`solana-keygen new -o buffer.json --no-bip39-passphrase`); + } else { + console.info("buffer.json already exists.") + askForConfirmation("Do you want continue an exiting deployment? If not, delete the buffer.json file and run the command again."); + } + + const deployCommand = [ + "solana", + "program", + "deploy", + "--program-id", programKeypairPath, + "--buffer", `buffer.json`, + binary, + "--keypair", payer, + "-u", ch.config.rpc + ]; + + if (priorityFee !== undefined) { + deployCommand.push("--with-compute-unit-price", priorityFee.toString()); + } + + const deployProc = Bun.spawn(deployCommand); + + const out = await new Response(deployProc.stdout).text(); + + await deployProc.exited; + + if (deployProc.exitCode !== 0) { + process.exit(deployProc.exitCode ?? 1); + } + + // success. remove buffer.json + fs.unlinkSync("buffer.json"); + + console.log(out); + } + + if (initialize) { + // wait 3 seconds + await new Promise((resolve) => setTimeout(resolve, 3000)); + + const tx = ntt.initialize( + toUniversal(ch.chain, payerKeypair.publicKey.toBase58()), + { + mint: new PublicKey(token), + mode, + outboundLimit: 100000000n, + }); + + const signer = await getSigner(ch, "privateKey", encoding.b58.encode(payerKeypair.secretKey)); + + try { + await signSendWait(ch, tx, signer.signer); + } catch (e: any) { + console.error(e.logs); + } + } + + return { chain: ch.chain, address: toUniversal(ch.chain, providedProgramId) }; +} + +async function missingConfigs( + deps: Partial<{ [C in Chain]: Deployment }>, + verbose: boolean, +): Promise> { + const missingConfigs: Partial<{ [C in Chain]: MissingImplicitConfig }> = {}; + + for (const [fromChain, from] of Object.entries(deps)) { + let count = 0; + assertChain(fromChain); + + let missing: MissingImplicitConfig = { + managerPeers: [], + transceiverPeers: [], + evmChains: [], + standardRelaying: [], + solanaWormholeTransceiver: false, + solanaUpdateLUT: false, + }; + + if (chainToPlatform(fromChain) === "Solana") { + const solanaNtt = from.ntt as SolanaNtt; + const selfWormholeTransceiver = solanaNtt.pdas.registeredTransceiver(new PublicKey(solanaNtt.contracts.ntt!.manager)).toBase58(); + const registeredSelfTransceiver = await retryWithExponentialBackoff(() => solanaNtt.connection.getAccountInfo(new PublicKey(selfWormholeTransceiver)), 5, 5000); + if (registeredSelfTransceiver === null) { + count++; + missing.solanaWormholeTransceiver = true; + } + + // here we just check if the LUT update function returns an instruction. + // if it does, it means the LUT is missing or outdated. notice that + // we're not actually updating the LUT here, just checking if it's + // missing, so it's ok to use the 0 pubkey as the payer. + const updateLUT = solanaNtt.initializeOrUpdateLUT({ payer: new PublicKey(0) }); + // check if async generator is non-empty + if (!(await updateLUT.next()).done) { + count++; + missing.solanaUpdateLUT = true; + } + } + + for (const [toChain, to] of Object.entries(deps)) { + assertChain(toChain); + if (fromChain === toChain) { + continue; + } + if (verbose) { + process.stdout.write(`Verifying registration for ${fromChain} -> ${toChain}......\n`); + } + const peer = await retryWithExponentialBackoff(() => from.ntt.getPeer(toChain), 5, 5000); + if (peer === null) { + const configLimit = from.config.local?.limits?.inbound?.[toChain]?.replace(".", ""); + count++; + missing.managerPeers.push({ + address: to.manager, + tokenDecimals: to.decimals, + inboundLimit: BigInt(configLimit ?? 0), + }); + } else { + // @ts-ignore TODO + if (!Buffer.from(peer.address.address.address).equals(Buffer.from(to.manager.address.address))) { + console.error(`Peer address mismatch for ${fromChain} -> ${toChain}`); + } + if (peer.tokenDecimals !== to.decimals) { + console.error(`Peer decimals mismatch for ${fromChain} -> ${toChain}`); + } + } + + if (chainToPlatform(fromChain) === "Evm") { + const toIsEvm = chainToPlatform(toChain) === "Evm"; + + const remoteToEvm = await (await from.ntt.getTransceiver(0) as EvmNttWormholeTranceiver).isEvmChain(toChain); + if (toIsEvm && !remoteToEvm) { + count++; + missing.evmChains.push(toChain); + } + + const standardRelaying = await (await from.ntt.getTransceiver(0) as EvmNttWormholeTranceiver).isWormholeRelayingEnabled(toChain); + if (toIsEvm && !standardRelaying) { + count++; + missing.standardRelaying.push(toChain); + } + } + + const transceiverPeer = await retryWithExponentialBackoff(() => from.whTransceiver.getPeer(toChain), 5, 5000); + if (transceiverPeer === null) { + count++; + missing.transceiverPeers.push(to.whTransceiver.getAddress()); + } else { + // @ts-ignore TODO + if (!Buffer.from(transceiverPeer.address.address).equals(Buffer.from(to.whTransceiver.getAddress().address.address))) { + console.error(`Transceiver peer address mismatch for ${fromChain} -> ${toChain}`); + } + } + + } + if (count > 0) { + missingConfigs[fromChain] = missing; + } + } + return missingConfigs; +} + +async function pushDeployment(deployment: Deployment, signerType: SignerType, evmVerify: boolean, yes: boolean, filePath?: string): Promise { + const diff = diffObjects(deployment.config.local!, deployment.config.remote!); + if (Object.keys(diff).length === 0) { + return; + } + + const canonical = canonicalAddress(deployment.manager); + console.log(`Pushing changes to ${deployment.manager.chain} (${canonical})`) + + console.log(chalk.reset(colorizeDiff(diff))); + if (!yes) { + await askForConfirmation(); + } + + const ctx = deployment.ctx; + + const signer = await getSigner(ctx, signerType, undefined, filePath); + + let txs = []; + // we perform this last to make sure we don't accidentally lock ourselves out + let updateOwner: ReturnType | undefined = undefined; + let managerUpgrade: { from: string, to: string } | undefined; + for (const k of Object.keys(diff)) { + if (k === "version") { + // TODO: check against existing version, and make sure no major version changes + managerUpgrade = { from: diff[k]!.pull!, to: diff[k]!.push! }; + } else if (k === "owner") { + const address: AccountAddress = toUniversal(deployment.manager.chain, diff[k]?.push!); + updateOwner = deployment.ntt.setOwner(address, signer.address.address); + } else if (k === "pauser") { + const address: AccountAddress = toUniversal(deployment.manager.chain, diff[k]?.push!); + txs.push(deployment.ntt.setPauser(address, signer.address.address)); + } else if (k === "paused") { + if (diff[k]?.push === true) { + txs.push(deployment.ntt.pause(signer.address.address)); + } else { + txs.push(deployment.ntt.unpause(signer.address.address)); + } + } else if (k === "limits") { + const newOutbound = diff[k]?.outbound?.push; + if (newOutbound) { + // TODO: verify amount has correct number of decimals? + // remove "." from string and convert to bigint + const newOutboundBigint = BigInt(newOutbound.replace(".", "")); + txs.push(deployment.ntt.setOutboundLimit(newOutboundBigint, signer.address.address)); + } + const inbound = diff[k]?.inbound; + if (inbound) { + for (const chain of Object.keys(inbound)) { + assertChain(chain); + const newInbound = inbound[chain]?.push; + if (newInbound) { + // TODO: verify amount has correct number of decimals? + const newInboundBigint = BigInt(newInbound.replace(".", "")); + txs.push(deployment.ntt.setInboundLimit(chain, newInboundBigint, signer.address.address)); + } + } + } + } else if (k === "transceivers") { + // TODO: refactor this nested loop stuff into separate functions at least + // alternatively we could first recursively collect all the things + // to do into a flattened list (with entries like + // transceivers.wormhole.pauser), and have a top-level mapping of + // these entries to how they should be handled + for (const j of Object.keys(diff[k] as object)) { + if (j === "wormhole") { + for (const l of Object.keys(diff[k]![j] as object)) { + if (l === "pauser") { + const newTransceiverPauser = toUniversal(deployment.manager.chain, diff[k]![j]![l]!.push!); + txs.push(deployment.whTransceiver.setPauser(newTransceiverPauser, signer.address.address)); + } else { + console.error(`Unsupported field: ${k}.${j}.${l}`); + process.exit(1); + } + } + } else { + console.error(`Unsupported field: ${k}.${j}`); + process.exit(1); + + } + } + } else { + console.error(`Unsupported field: ${k}`); + process.exit(1); + } + } + if (managerUpgrade) { + await upgrade(managerUpgrade.from, managerUpgrade.to, deployment.ntt, ctx, signerType, evmVerify); + } + for (const tx of txs) { + await signSendWait(ctx, tx, signer.signer) + } + if (updateOwner) { + await signSendWait(ctx, updateOwner, signer.signer) + } +} + +async function pullDeployments(deployments: Config, network: Network, verbose: boolean): Promise }>> { + let deps: Partial<{ [C in Chain]: Deployment }> = {}; + + for (const [chain, deployment] of Object.entries(deployments.chains)) { + if (verbose) { + process.stdout.write(`Fetching config for ${chain}......\n`); + } + assertChain(chain); + const managerAddress: string | undefined = deployment.manager; + if (managerAddress === undefined) { + console.error(`manager field not found for chain ${chain}`); + // process.exit(1); + continue; + } + const [remote, ctx, ntt, decimals] = await pullChainConfig( + network, + { chain, address: toUniversal(chain, managerAddress) }, + overrides + ); + const local = deployments.chains[chain]; + + // TODO: what if it's not index 0... + // we should check that the address of this transceiver matches the + // address in the config. currently we just assume that ix 0 is the wormhole one + const whTransceiver = await ntt.getTransceiver(0); + if (whTransceiver === null) { + console.error(`Wormhole transceiver not found for ${chain}`); + process.exit(1); + } + + deps[chain] = { + ctx, + ntt, + decimals, + manager: { chain, address: toUniversal(chain, managerAddress) }, + whTransceiver, + config: { + remote, + local, + } + }; + } + + const config = Object.fromEntries(Object.entries(deps).map(([k, v]) => [k, v.config.remote])); + const ntts = Object.fromEntries(Object.entries(deps).map(([k, v]) => [k, v.ntt])); + await pullInboundLimits(ntts, config, verbose); + return deps; +} + +async function pullChainConfig( + network: N, + manager: ChainAddress, + overrides?: ConfigOverrides +): Promise<[ChainConfig, ChainContext, Ntt, number]> { + const wh = new Wormhole(network, [solana.Platform, evm.Platform], overrides); + const ch = wh.getChain(manager.chain); + + const nativeManagerAddress = canonicalAddress(manager); + + const { ntt, addresses }: { ntt: Ntt; addresses: Partial; } = + await nttFromManager(ch, nativeManagerAddress); + + const mode = await ntt.getMode(); + const outboundLimit = await ntt.getOutboundLimit(); + const threshold = await ntt.getThreshold(); + + const decimals = await ntt.getTokenDecimals(); + // insert decimal point into number + const outboundLimitDecimals = formatNumber(outboundLimit, decimals); + + const paused = await ntt.isPaused(); + const owner = await ntt.getOwner(); + const pauser = await ntt.getPauser(); + + const version = getVersion(manager.chain, ntt); + + const transceiverPauser = await ntt.getTransceiver(0).then((t) => t?.getPauser() ?? null); + + const config: ChainConfig = { + version, + mode, + paused, + owner: owner.toString(), + manager: nativeManagerAddress, + token: addresses.token!, + transceivers: { + threshold, + wormhole: { address: addresses.transceiver!.wormhole! }, + }, + limits: { + outbound: outboundLimitDecimals, + inbound: {}, + }, + }; + if (transceiverPauser) { + config.transceivers.wormhole.pauser = transceiverPauser.toString(); + } + if (pauser) { + config.pauser = pauser.toString(); + } + return [config, ch, ntt, decimals]; +} + +async function getImmutables(chain: C, ntt: Ntt) { + const platform = chainToPlatform(chain); + if (platform !== "Evm") { + return null; + } + const evmNtt = ntt as EvmNtt; + const transceiver = await evmNtt.getTransceiver(0) as EvmNttWormholeTranceiver; + const consistencyLevel = await transceiver.transceiver.consistencyLevel(); + const wormholeRelayer = await transceiver.transceiver.wormholeRelayer(); + const specialRelayer = await transceiver.transceiver.specialRelayer(); + const gasLimit = await transceiver.transceiver.gasLimit(); + + const token = await evmNtt.manager.token(); + const tokenDecimals = await evmNtt.manager.tokenDecimals(); + + const whTransceiverImmutables = { + consistencyLevel, + wormholeRelayer, + specialRelayer, + gasLimit, + }; + return { + manager: { + token, + tokenDecimals, + }, + wormholeTransceiver: whTransceiverImmutables, + }; +} + +async function getPdas(chain: C, ntt: Ntt) { + const platform = chainToPlatform(chain); + if (platform !== "Solana") { + return null; + } + const solanaNtt = ntt as SolanaNtt; + const config = solanaNtt.pdas.configAccount(); + const emitter = solanaNtt.pdas.emitterAccount(); + const outboxRateLimit = solanaNtt.pdas.outboxRateLimitAccount(); + const tokenAuthority = solanaNtt.pdas.tokenAuthority(); + const lutAccount = solanaNtt.pdas.lutAccount(); + const lutAuthority = solanaNtt.pdas.lutAuthority(); + + return { + config, + emitter, + outboxRateLimit, + tokenAuthority, + lutAccount, + lutAuthority, + }; +} + +function getVersion(chain: C, ntt: Ntt): string { + const platform = chainToPlatform(chain); + switch (platform) { + case "Evm": + return (ntt as EvmNtt).version + case "Solana": + return (ntt as SolanaNtt).version + default: + throw new Error("Unsupported platform"); + } +} + +// TODO: there should be a more elegant way to do this, than creating a +// "dummy" NTT, then calling verifyAddresses to get the contract diff, then +// finally reconstructing the "real" NTT object from that +async function nttFromManager( + ch: ChainContext, + nativeManagerAddress: string +): Promise<{ ntt: Ntt; addresses: Partial }> { + const onlyManager = await ch.getProtocol("Ntt", { + ntt: { + manager: nativeManagerAddress, + token: null, + transceiver: { wormhole: null }, + } + }); + const diff = await onlyManager.verifyAddresses(); + + const addresses: Partial = { manager: nativeManagerAddress, ...diff }; + + const ntt = await ch.getProtocol("Ntt", { + ntt: addresses + }); + return { ntt, addresses }; +} + +function formatNumber(num: bigint, decimals: number) { + if (num === 0n) { + return "0." + "0".repeat(decimals); + } + const str = num.toString(); + const formatted = str.slice(0, -decimals) + "." + str.slice(-decimals); + if (formatted.startsWith(".")) { + return "0" + formatted; + } + return formatted; +} + +function checkNumberFormatting(formatted: string, decimals: number): boolean { + // check that the string has the correct number of decimals + const parts = formatted.split("."); + if (parts.length !== 2) { + return false; + } + if (parts[1].length !== decimals) { + return false; + } + return true; +} + function cargoNetworkFeature(network: Network): string { switch (network) { - case "mainnet": + case "Mainnet": return "mainnet"; - case "testnet": + case "Testnet": return "solana-devnet"; - case "devnet": + case "Devnet": return "tilt-devnet"; + default: + throw new Error("Unsupported network"); } } -function solanaMoniker(network: Network): string { - switch (network) { - case "mainnet": - return "m"; - case "testnet": - return "d"; - case "devnet": - return "l"; +async function askForConfirmation(prompt: string = "Do you want to continue?"): Promise { + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + }); + const answer = await new Promise((resolve) => { + rl.question(`${prompt} [y/n]`, resolve); + }); + rl.close(); + + if (answer !== "y") { + console.log("Aborting"); + process.exit(0); + } +} + +// NOTE: modifies the config object in place +// TODO: maybe introduce typestate for having pulled inbound limits? +async function pullInboundLimits(ntts: Partial<{ [C in Chain]: Ntt }>, config: Config["chains"], verbose: boolean) { + for (const [c1, ntt1] of Object.entries(ntts)) { + assertChain(c1); + const chainConf = config[c1]; + if (!chainConf) { + console.error(`Chain ${c1} not found in deployment`); + process.exit(1); + } + const decimals = await ntt1.getTokenDecimals(); + for (const [c2, ntt2] of Object.entries(ntts)) { + assertChain(c2); + if (ntt1 === ntt2) { + continue; + } + if (verbose) { + process.stdout.write(`Fetching inbound limit for ${c1} -> ${c2}.......\n`); + } + const peer = await retryWithExponentialBackoff(() => ntt1.getPeer(c2), 5, 5000); + if (chainConf.limits?.inbound === undefined) { + chainConf.limits.inbound = {}; + } + + const limit = peer?.inboundLimit ?? 0n; + + chainConf.limits.inbound[c2] = formatNumber(limit, decimals) + + } + } +} + +async function checkSolanaBinary(binary: string, wormhole: string, providedProgramId: string, version?: string) { + // ensure binary path exists + if (!fs.existsSync(binary)) { + console.error(`.so file not found: ${binary}`); + process.exit(1); + } + // console.log(`Checking binary ${binary} for wormhole and provided program ID`); + + // convert wormhole and providedProgramId from base58 to hex + const wormholeHex = new PublicKey(wormhole).toBuffer().toString("hex"); + const providedProgramIdHex = new PublicKey(providedProgramId).toBuffer().toString("hex"); + const versionHex = version ? Buffer.from(version).toString("hex") : undefined; + + if (!searchHexInBinary(binary, wormholeHex)) { + console.error(`Wormhole address not found in binary: ${wormhole}`); + process.exit(1); + } + if (!searchHexInBinary(binary, providedProgramIdHex)) { + console.error(`Provided program ID not found in binary: ${providedProgramId}`); + process.exit(1); + } + if (versionHex && !searchHexInBinary(binary, versionHex)) { + // TODO: figure out how to search for the version string in the binary + // console.error(`Version string not found in binary: ${version}`); + // process.exit(1); + } +} + +// not the most efficient, but at least it's definitely portable +function searchHexInBinary(binaryPath: string, searchHex: string) { + const buffer = fs.readFileSync(binaryPath); + const hexString = buffer.toString('hex'); + const found = hexString.includes(searchHex); + + return found; +} + +export function ensureNttRoot(pwd: string = ".") { + if (!fs.existsSync(`${pwd}/evm/foundry.toml`) || !fs.existsSync(`${pwd}/solana/Anchor.toml`)) { + console.error("Run this command from the root of an NTT project."); + process.exit(1); + } +} + +function checkAnchorVersion() { + const expected = "0.29.0"; + try { + execSync("which anchor"); + } catch { + console.error("Anchor CLI is not installed.\nSee https://www.anchor-lang.com/docs/installation") + process.exit(1); + } + const version = execSync("anchor --version").toString().trim(); + // version looks like "anchor-cli 0.14.0" + const [_, v] = version.split(" "); + if (v !== expected) { + console.error(`Anchor CLI version must be ${expected} but is ${v}`); + process.exit(1); + } +} +function loadConfig(path: string): Config { + if (!fs.existsSync(path)) { + console.error(`File not found: ${path}`); + console.error(`Create with 'ntt init' or specify another file with --path`); + process.exit(1); + } + const deployments: Config = JSON.parse(fs.readFileSync(path).toString()); + return deployments; +} + +function resolveVersion(latest: boolean, ver: string | undefined, local: boolean, platform: Platform): string | null { + if ((latest ? 1 : 0) + (ver ? 1 : 0) + (local ? 1 : 0) !== 1) { + console.error("Specify exactly one of --latest, --ver, or --local"); + const available = getAvailableVersions(platform); + console.error(`Available versions for ${platform}:\n${available.join("\n")}`); + process.exit(1); + } + if (latest) { + const available = getAvailableVersions(platform); + return available.sort().reverse()[0]; + } else if (ver) { + return ver; + } else { + // local version + return null; + } +} + +function warnLocalDeployment(yes: boolean): Promise { + if (!yes) { + console.warn(chalk.yellow("WARNING: You are deploying from your local working directory.")); + console.warn(chalk.yellow("This bypasses version control and may deploy untested changes.")); + console.warn(chalk.yellow("Ensure your local changes are thoroughly tested and compatible.")); + return askForConfirmation("Are you sure you want to continue with the local deployment?"); + } + return Promise.resolve(); +} + +function validateChain(network: N, chain: C) { + if (network === "Testnet") { + if (chain === "Ethereum") { + console.error("Ethereum is deprecated on Testnet. Use EthereumSepolia instead."); + process.exit(1); + } + // if on testnet, and the chain has a *Sepolia counterpart, use that instead + if (chains.find((c) => c === `${c}Sepolia`)) { + console.error(`Chain ${chain} is deprecated. Use ${chain}Sepolia instead.`); + process.exit(1); + } + } +} + +function retryWithExponentialBackoff( + fn: () => Promise, + maxRetries: number, + delay: number, +): Promise { + const backoff = (retry: number) => Math.min(2 ** retry * delay, 10000) + Math.random() * 1000; + const attempt = async (retry: number): Promise => { + try { + return await fn(); + } catch (e) { + if (retry >= maxRetries) { + throw e; + } + const time = backoff(retry); + await new Promise((resolve) => setTimeout(resolve, backoff(time))); + return await attempt(retry + 1); + } + }; + return attempt(0); +} + +function nttVersion(): { version: string, commit: string, path: string, remote: string } | null { + const nttDir = `${process.env.HOME}/.ntt-cli`; + try { + const versionFile = fs.readFileSync(`${nttDir}/version`).toString().trim(); + const [commit, installPath, version, remote] = versionFile.split("\n"); + return { version, commit, path: installPath, remote }; + } catch { + return null; } } diff --git a/cli/src/side-effects.ts b/cli/src/side-effects.ts new file mode 100644 index 000000000..f4f91b0ee --- /dev/null +++ b/cli/src/side-effects.ts @@ -0,0 +1,38 @@ +// +// when the native secp256k1 is missing, the eccrypto library decides TO PRINT A MESSAGE TO STDOUT: +// https://github.com/bitchan/eccrypto/blob/a4f4a5f85ef5aa1776dfa1b7801cad808264a19c/index.js#L23 +// +// do you use a CLI tool that depends on that library and try to pipe the output +// of the tool into another? tough luck +// +// for lack of a better way to stop this, we patch the console.info function to +// drop that particular message... +// +const info = console.info; +console.info = function (x: string) { + if (x !== "secp256k1 unavailable, reverting to browser version") { + info(x); + } +}; + +const warn = console.warn; +globalThis.console.warn = function (x: string) { + if ( + x !== + "bigint: Failed to load bindings, pure JS will be used (try npm run rebuild?)" + ) { + warn(x); + } +}; + +// Ensure BigInt can be serialized to json +// +// eslint-disable-next-line @typescript-eslint/no-redeclare +interface BigInt { + /** Convert to BigInt to string form in JSON.stringify */ + toJSON: () => string; +} +// Without this JSON.stringify() blows up +(BigInt.prototype as any).toJSON = function () { + return this.toString(); +}; diff --git a/cli/src/tag.ts b/cli/src/tag.ts new file mode 100644 index 000000000..ba3201452 --- /dev/null +++ b/cli/src/tag.ts @@ -0,0 +1,16 @@ +import type { Platform } from "@wormhole-foundation/sdk" +import { execSync } from "child_process" + +export function getAvailableVersions

(platform: P): string[] { + const tags = execSync(`git tag --list 'v*+${platform.toLowerCase()}'`, { + stdio: ["ignore", null, null] + }).toString().trim().split("\n") + return tags.map(tag => tag.split("+")[0].slice(1)) +} + +export function getGitTagName

(platform: P, version: string): string | undefined { + const found = execSync(`git tag --list 'v${version}+${platform.toLowerCase()}'`, { + stdio: ["ignore", null, null] + }).toString().trim() + return found +} diff --git a/cli/test/sepolia-bsc.sh b/cli/test/sepolia-bsc.sh new file mode 100755 index 000000000..3bbc8a144 --- /dev/null +++ b/cli/test/sepolia-bsc.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# This script creates two forks (Bsc and Sepolia) and creates an NTT deployment +# on both of them. +# It's safe to run these tests outside of docker, as we create an isolated temporary +# directory for the tests. + +set -euox pipefail + +BSC_PORT=8545 +SEPOLIA_PORT=8546 + +anvil --silent --rpc-url https://bsc-testnet-rpc.publicnode.com -p "$BSC_PORT" & +pid1=$! +anvil --silent --rpc-url wss://ethereum-sepolia-rpc.publicnode.com -p "$SEPOLIA_PORT" & +pid2=$! + +# check both processes are running +if ! kill -0 $pid1 || ! kill -0 $pid2; then + echo "Failed to start the servers" + exit 1 +fi + +# create tmp directory +dir=$(mktemp -d) + +cleanup() { + kill $pid1 $pid2 + rm -rf $dir +} + +trap "cleanup" INT TERM EXIT + +# devnet private key +export ETH_PRIVATE_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 + +echo "Running tests..." +cd $dir +ntt new test-ntt +cd test-ntt +ntt init Testnet + +# write overrides.json +cat < overrides.json +{ + "chains": { + "Bsc": { + "rpc": "http://127.0.0.1:$BSC_PORT" + }, + "Sepolia": { + "rpc": "http://127.0.0.1:$SEPOLIA_PORT" + } + } +} +EOF + +ntt add-chain Bsc --token 0x0B15635FCF5316EdFD2a9A0b0dC3700aeA4D09E6 --mode locking --skip-verify --latest +ntt add-chain Sepolia --token 0xB82381A3fBD3FaFA77B3a7bE693342618240067b --skip-verify --ver 1.0.0 + +ntt pull --yes +ntt push --yes + +# ugprade Sepolia to 1.1.0 +ntt upgrade Sepolia --ver 1.1.0 --skip-verify --yes +# now upgrade to the local version. +ntt upgrade Sepolia --local --skip-verify --yes + +ntt pull --yes + +# transfer ownership to +NEW_OWNER=0x70997970C51812dc3A010C7d01b50e0d17dc79C8 +NEW_OWNER_SECRET=0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d + +jq '.chains.Bsc.owner = "'$NEW_OWNER'"' deployment.json > deployment.json.tmp && mv deployment.json.tmp deployment.json +jq '.chains.Sepolia.owner = "'$NEW_OWNER'"' deployment.json > deployment.json.tmp && mv deployment.json.tmp deployment.json +ntt push --yes + +# check the owner has been updated +jq '.chains.Bsc.owner == "'$NEW_OWNER'"' deployment.json +jq '.chains.Sepolia.owner == "'$NEW_OWNER'"' deployment.json + +export ETH_PRIVATE_KEY=$NEW_OWNER_SECRET + +jq '.chains.Bsc.paused = true' deployment.json > deployment.json.tmp && mv deployment.json.tmp deployment.json + +ntt push --yes +jq '.chains.Bsc.paused == true' deployment.json + +ntt status + +cat deployment.json diff --git a/cli/test/solana.sh b/cli/test/solana.sh new file mode 100755 index 000000000..01f06602d --- /dev/null +++ b/cli/test/solana.sh @@ -0,0 +1,218 @@ +#!/usr/bin/env bash + +# This script deploys the NTT program to a local Solana test validator and +# upgrades it. +# +# TODO: this script should be separated into +# 1) a general purpose validator startup script +# 2) the actual test script that sets up the NTT program and runs the tests +# +# We could then write multiple tests easily. For now, this will do. +# TODO: add better test coverage (registrations, pausing, etc) + +set -euo pipefail + +# Default values +PORT=6000 +FAUCET_PORT=6100 +NETWORK="http://127.0.0.1:$PORT" +KEYS_DIR="keys" +OVERRIDES_FILE="overrides.json" +DEPLOYMENT_FILE="deployment.json" +KEEP_ALIVE=false +USE_TMP_DIR=false + +# Function to display usage information +usage() { + cat << EOF +Usage: $0 [options] + +Options: + -h, --help Show this help message + -p, --port PORT Set the RPC port (default: 6000) + -f, --faucet-port PORT Set the faucet port (default: 6100) + -k, --keys-dir DIR Set the keys directory (default: keys) + -o, --overrides FILE Set the overrides file (default: overrides.json) + -d, --deployment FILE Set the deployment file (default: deployment.json) + --keep-alive Keep the validator running after the script finishes + --use-tmp-dir Use a temporary directory for deployment (useful for testing) +EOF + exit 1 +} + +# Parse command-line options +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + usage + ;; + -p|--port) + PORT="$2" + shift 2 + ;; + -f|--faucet-port) + FAUCET_PORT="$2" + shift 2 + ;; + -k|--keys-dir) + KEYS_DIR="$2" + shift 2 + ;; + -o|--overrides) + OVERRIDES_FILE="$2" + shift 2 + ;; + -d|--deployment) + DEPLOYMENT_FILE="$2" + shift 2 + ;; + --keep-alive) + KEEP_ALIVE=true + shift + ;; + --use-tmp-dir) + USE_TMP_DIR=true + shift + ;; + *) + echo "Unknown option: $1" + usage + ;; + esac +done + +# Update NETWORK variable based on potentially changed PORT +NETWORK="http://127.0.0.1:$PORT" + +validator_dir=$(mktemp -d) + +if [ "$USE_TMP_DIR" = true ]; then + tmp_dir=$(mktemp -d) + cd "$tmp_dir" || exit + ntt new test-ntt + cd test-ntt || exit +fi + +# Function to clean up resources +cleanup() { + echo "Cleaning up..." + kill "$pid" 2>/dev/null || true + rm -rf "$validator_dir" + if [ "$USE_TMP_DIR" = true ]; then + rm -rf "$tmp_dir" + fi + if [ -f "${OVERRIDES_FILE}.bak" ]; then + mv "${OVERRIDES_FILE}.bak" "$OVERRIDES_FILE" + else + rm -f "$OVERRIDES_FILE" + fi + solana config set --keypair "$old_default_keypair" > /dev/null +} + +# Set up trap for cleanup +trap cleanup EXIT + +# Prepare directories and files +rm -rf "$KEYS_DIR" +mkdir -p "$KEYS_DIR" + +# Backup and create overrides file +cp "$OVERRIDES_FILE" "${OVERRIDES_FILE}.bak" 2>/dev/null || true +cat << EOF > "$OVERRIDES_FILE" +{ + "chains": { + "Solana": { + "rpc": "$NETWORK" + } + } +} +EOF + +# Start Solana test validator +pushd "$validator_dir" || exit +# TODO: the deployment doesn't fully work, because we need to load in the wormhole program and its associated +# accounts. This is a bit tedious, but would be great to do. +# NOTE: this will not run in an emulated x86 docker environment (on an arm mac +# host), because the binary needs AVX instructions which the emulator doesn't +# support. +solana-test-validator --rpc-port "$PORT" --faucet-port "$FAUCET_PORT" > /dev/null 2>&1 & +pid=$! +popd || exit + +old_default_keypair=$(solana config get keypair | awk '{print $3}') + +# Wait for validator to start +echo "Waiting for Solana test validator to start..." +for _ in {1..30}; do + if solana cluster-version -u "$NETWORK" &>/dev/null; then + echo "Solana test validator started successfully." + break + fi + sleep 1 +done + +# Check if validator started successfully +if ! kill -0 "$pid" 2>/dev/null; then + echo "Failed to start solana-test-validator" + exit 1 +fi + +# Initialize NTT +rm -rf "$DEPLOYMENT_FILE" +ntt init Mainnet + +# Generate and configure keypairs +pushd "$KEYS_DIR" || exit +keypair=$(solana-keygen grind --starts-with w:1 --ignore-case | grep 'Wrote keypair' | awk '{print $4}') +keypair=$(realpath "$keypair") +solana config set --keypair "$keypair" + +# Airdrop SOL +solana airdrop 50 -u "$NETWORK" --keypair "$keypair" +# This steps is a bit voodoo -- we airdrop to this special address, which is +# needed for querying the program version. For more info, grep for these pubkeys in the ntt repo. +solana airdrop 1 Hk3SdYTJFpawrvRz4qRztuEt2SqoCG7BGj2yJfDJSFbJ -u "$NETWORK" --keypair "$keypair" > /dev/null +solana airdrop 1 98evdAiWr7ey9MAQzoQQMwFQkTsSR6KkWQuFqKrgwNwb -u "$NETWORK" --keypair "$keypair" > /dev/null + +# Create and configure token +token=$(spl-token create-token --program-id TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb -u "$NETWORK" | grep "Address:" | awk '{print $2}') +echo "Token: $token" + +ntt_keypair=$(solana-keygen grind --starts-with ntt:1 --ignore-case | grep 'Wrote keypair' | awk '{print $4}') +ntt_keypair_without_json=${ntt_keypair%.json} +ntt_keypair=$(realpath "$ntt_keypair") +popd || exit + +# Set token authority +authority=$(ntt solana token-authority "$ntt_keypair_without_json") +echo "Authority: $authority" +spl-token authorize "$token" mint "$authority" -u "$NETWORK" + +# Add chain and upgrade +ntt add-chain Solana --ver 1.0.0 --mode burning --token "$token" --payer "$keypair" --program-key "$ntt_keypair" + +echo "Getting status" +ntt status || true + +solana program extend "$ntt_keypair_without_json" 100000 -u "$NETWORK" +ntt upgrade Solana --ver 2.0.0 --payer "$keypair" --program-key "$ntt_keypair" --yes +ntt status || true + +ntt push --payer "$keypair" --yes + +cat "$DEPLOYMENT_FILE" + +if [ "$KEEP_ALIVE" = true ]; then + # wait for C-c to kill the validator + # print information about the running validator + echo "===============================" + echo "Validator is running on port $PORT" + echo "Faucet is running on port $FAUCET_PORT" + echo "Keys are stored in $KEYS_DIR" + echo "Overrides are stored in $OVERRIDES_FILE" + + echo "Press Ctrl-C to stop the validator..." + while true; do + sleep 1 + done +fi diff --git a/evm/script/DeployWormholeNtt.s.sol b/evm/script/DeployWormholeNtt.s.sol index a2b2018ea..a55e8e52e 100644 --- a/evm/script/DeployWormholeNtt.s.sol +++ b/evm/script/DeployWormholeNtt.s.sol @@ -1,15 +1,82 @@ // SPDX-License-Identifier: Apache 2 pragma solidity >=0.8.8 <0.9.0; -import {Script} from "forge-std/Script.sol"; +import {Script, console} from "forge-std/Script.sol"; import {DeployWormholeNttBase} from "./helpers/DeployWormholeNttBase.sol"; +import {INttManager} from "../src/interfaces/INttManager.sol"; +import {IWormholeTransceiver} from "../src/interfaces/IWormholeTransceiver.sol"; +import "../src/interfaces/IManagerBase.sol"; +import "openzeppelin-contracts/contracts/token/ERC20/ERC20.sol"; +import {NttManager} from "../src/NttManager/NttManager.sol"; + +interface IWormhole { + function chainId() external view returns (uint16); +} contract DeployWormholeNtt is Script, DeployWormholeNttBase { - function run() public { + function run( + address wormhole, + address token, + address wormholeRelayer, + address specialRelayer, + uint8 decimals, + IManagerBase.Mode mode + ) public { vm.startBroadcast(); - // Sanity check deployment parameters. - DeploymentParams memory params = _readEnvVariables(); + console.log("Deploying Wormhole Ntt..."); + IWormhole wh = IWormhole(wormhole); + + // sanity check decimals + (bool success, bytes memory queriedDecimals) = + token.staticcall(abi.encodeWithSignature("decimals()")); + + if (success) { + uint8 queriedDecimals = abi.decode(queriedDecimals, (uint8)); + if (queriedDecimals != decimals) { + console.log("Decimals mismatch: ", queriedDecimals, " != ", decimals); + vm.stopBroadcast(); + return; + } + } else { + // NOTE: this might not be a critical error. It could just mean that + // the token contract was compiled against a different EVM version than what the forge script is running on. + // In this case, it's the responsibility of the caller to ensure that the provided decimals are correct + // and that the token contract is valid. + // The best way to ensure that is by calling this script with the queried token decimals (which is what the NTT CLI does). + console.log( + "Failed to query token decimals. Proceeding with provided decimals.", decimals + ); + // the NTT manager initialiser calls the token contract to get the + // decimals as well. We're just going to mock that call to return the provided decimals. + // This is a bit of a hack, but in the worst case (i.e. if the token contract is actually broken), the + // NTT manager initialiser will fail anyway. + vm.mockCall( + token, abi.encodeWithSelector(ERC20.decimals.selector), abi.encode(decimals) + ); + } + + uint16 chainId = wh.chainId(); + + console.log("Chain ID: ", chainId); + + uint256 scale = + decimals > TRIMMED_DECIMALS ? uint256(10 ** (decimals - TRIMMED_DECIMALS)) : 1; + + DeploymentParams memory params = DeploymentParams({ + token: token, + mode: mode, + wormholeChainId: chainId, + rateLimitDuration: 86400, + shouldSkipRatelimiter: false, + wormholeCoreBridge: wormhole, + wormholeRelayerAddr: wormholeRelayer, + specialRelayerAddr: specialRelayer, + consistencyLevel: 202, + gasLimit: 500000, + // the trimming will trim this number to uint64.max + outboundLimit: uint256(type(uint64).max) * scale + }); // Deploy NttManager. address manager = deployNttManager(params); @@ -24,4 +91,29 @@ contract DeployWormholeNtt is Script, DeployWormholeNttBase { vm.stopBroadcast(); } + + function upgrade( + address manager + ) public { + vm.startBroadcast(); + + NttManager nttManager = NttManager(manager); + + console.log("Upgrading manager..."); + + uint64 rateLimitDuration = nttManager.rateLimitDuration(); + bool shouldSkipRatelimiter = rateLimitDuration == 0; + + NttManager implementation = new NttManager( + nttManager.token(), + nttManager.mode(), + nttManager.chainId(), + nttManager.rateLimitDuration(), + shouldSkipRatelimiter + ); + + nttManager.upgrade(address(implementation)); + + vm.stopBroadcast(); + } } diff --git a/evm/script/helpers/DeployWormholeNttBase.sol b/evm/script/helpers/DeployWormholeNttBase.sol index 78da53461..848b3cfda 100644 --- a/evm/script/helpers/DeployWormholeNttBase.sol +++ b/evm/script/helpers/DeployWormholeNttBase.sol @@ -49,8 +49,7 @@ contract DeployWormholeNttBase is ParseNttConfig { nttManagerProxy.initialize(); - console2.log("NttManager deployed at: "); - console2.logBytes32(toUniversalAddress(address(nttManagerProxy))); + console2.log("NttManager:", address(nttManagerProxy)); return address(nttManagerProxy); } @@ -74,8 +73,7 @@ contract DeployWormholeNttBase is ParseNttConfig { transceiverProxy.initialize(); - console2.log("Wormhole Transceiver deployed at: "); - console2.logBytes32(toUniversalAddress(address(transceiverProxy))); + console2.log("WormholeTransceiver:", address(transceiverProxy)); return address(transceiverProxy); } diff --git a/package-lock.json b/package-lock.json index 457b61ca6..9c85e56cc 100644 --- a/package-lock.json +++ b/package-lock.json @@ -30,8 +30,10 @@ } }, "cli": { - "version": "0.2.0", + "name": "@wormhole-foundation/ntt-cli", + "version": "1.0.0-beta", "dependencies": { + "chalk": "^5.3.0", "yargs": "^17.7.2" }, "bin": { @@ -45,6 +47,17 @@ "typescript": "^5.0.0" } }, + "cli/node_modules/chalk": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", + "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, "cli/node_modules/cliui": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", @@ -3747,6 +3760,10 @@ "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", "dev": true }, + "node_modules/@wormhole-foundation/ntt-cli": { + "resolved": "cli", + "link": true + }, "node_modules/@wormhole-foundation/sdk": { "version": "0.10.2", "resolved": "https://registry.npmjs.org/@wormhole-foundation/sdk/-/sdk-0.10.2.tgz", @@ -6515,10 +6532,6 @@ "dev": true, "peer": true }, - "node_modules/cli": { - "resolved": "cli", - "link": true - }, "node_modules/cliui": { "version": "7.0.4", "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", diff --git a/sdk/__tests__/utils.ts b/sdk/__tests__/utils.ts index 579a9bf2f..657a131ae 100644 --- a/sdk/__tests__/utils.ts +++ b/sdk/__tests__/utils.ts @@ -566,8 +566,8 @@ async function deploySolana(ctx: Ctx): Promise { await new Promise((resolve) => setTimeout(resolve, 400)); const registrTxs = manager.registerTransceiver({ - payer: keypair, - owner: keypair, + payer: Wormhole.chainAddress("Solana", keypair.publicKey.toBase58()).address, + owner: Wormhole.chainAddress("Solana", keypair.publicKey.toBase58()).address, transceiver: manager.program.programId, }); await signSendWait(ctx.context, registrTxs, signer); diff --git a/solana/scripts/regenerateIdl.ts b/solana/scripts/regenerateIdl.ts index 175c2eb21..dbcc42d5d 100644 --- a/solana/scripts/regenerateIdl.ts +++ b/solana/scripts/regenerateIdl.ts @@ -20,9 +20,17 @@ const idl: Idl = JSON.parse(fs.readFileSync(jsonPath, "utf8")); const name = titleCase(idl["name"]); idl.accounts?.forEach((account) => { - account.name = account.name.replace(/^[A-Z]+/, (match) => - match.toLowerCase() - ); + // NOTE: here we translate PascalCase to camelCase, with the exception of all + // uppercase account names, such as 'LUT', which we want to preserve. + // + // The translation needs to be done because anchor generates an invalid IDL file, so we patch it. + // Anchor handles all uppercase account names specially (when generating the account discriminator), + // so we need to preserve them. + if (!account.name.match(/^[A-Z]+$/)) { + account.name = account.name.replace(/^[A-Z]+/, (match) => + match.toLowerCase() + ); + } }); // heredoc diff --git a/solana/tests/anchor.test.ts b/solana/tests/anchor.test.ts index 8f29f61f2..c9587a143 100644 --- a/solana/tests/anchor.test.ts +++ b/solana/tests/anchor.test.ts @@ -225,8 +225,8 @@ describe("example-native-token-transfers", () => { // register const registerTxs = ntt.registerTransceiver({ - payer, - owner: payer, + payer: new SolanaAddress(payer.publicKey), + owner: new SolanaAddress(payer.publicKey), transceiver: ntt.program.programId, }); await signSendWait(ctx, registerTxs, signer); diff --git a/solana/ts/idl/2_0_0/ts/example_native_token_transfers.ts b/solana/ts/idl/2_0_0/ts/example_native_token_transfers.ts index 9f013f092..85cb39f66 100644 --- a/solana/ts/idl/2_0_0/ts/example_native_token_transfers.ts +++ b/solana/ts/idl/2_0_0/ts/example_native_token_transfers.ts @@ -1246,7 +1246,7 @@ export type ExampleNativeTokenTransfers = { } }, { - "name": "lut", + "name": "LUT", "type": { "kind": "struct", "fields": [ @@ -3178,7 +3178,7 @@ export const IDL: ExampleNativeTokenTransfers = { } }, { - "name": "lut", + "name": "LUT", "type": { "kind": "struct", "fields": [ diff --git a/solana/ts/lib/ntt.ts b/solana/ts/lib/ntt.ts index 7ab6fde0d..5384ef025 100644 --- a/solana/ts/lib/ntt.ts +++ b/solana/ts/lib/ntt.ts @@ -875,7 +875,7 @@ export namespace NTT { ); const broadcastIx = await program.methods .broadcastWormholeId() - .accounts({ + .accountsStrict({ payer: args.payer, config: pdas.configAccount(), mint: config.mint, @@ -886,6 +886,9 @@ export namespace NTT { feeCollector: whAccs.wormholeFeeCollector, sequence: whAccs.wormholeSequence, program: args.wormholeId, + systemProgram: SystemProgram.programId, + clock: web3.SYSVAR_CLOCK_PUBKEY, + rent: web3.SYSVAR_RENT_PUBKEY, }, }) .instruction(); @@ -1045,6 +1048,10 @@ export namespace NTT { if (major < 2) return null; pdas = pdas ?? NTT.pdas(program.programId); + // @ts-ignore + // NOTE: lut is 'LUT' in the IDL, but 'lut' in the generated code + // It needs to be upper-cased in the IDL to compute the anchor + // account discriminator correctly const lut = await program.account.lut.fetchNullable(pdas.lutAccount()); if (!lut) return null; diff --git a/solana/ts/sdk/ntt.ts b/solana/ts/sdk/ntt.ts index e80873ee1..e3da5786d 100644 --- a/solana/ts/sdk/ntt.ts +++ b/solana/ts/sdk/ntt.ts @@ -369,18 +369,20 @@ export class SolanaNtt } async *registerTransceiver(args: { - payer: Keypair; - owner: Keypair; + payer: AccountAddress; + owner: AccountAddress; transceiver: PublicKey; }) { const config = await this.getConfig(); + const payer = new SolanaAddress(args.payer).unwrap(); + const owner = new SolanaAddress(args.owner).unwrap(); if (config.paused) throw new Error("Contract is paused"); const ix = await this.program.methods .registerTransceiver() .accountsStrict({ - payer: args.payer.publicKey, - owner: args.owner.publicKey, + payer, + owner, config: this.pdas.configAccount(), transceiver: args.transceiver, registeredTransceiver: this.pdas.registeredTransceiver( @@ -398,7 +400,7 @@ export class SolanaNtt const broadcastIx = await this.program.methods .broadcastWormholeId() .accountsStrict({ - payer: args.payer.publicKey, + payer, config: this.pdas.configAccount(), mint: config.mint, wormholeMessage: wormholeMessage.publicKey, @@ -416,7 +418,7 @@ export class SolanaNtt .instruction(); const tx = new Transaction(); - tx.feePayer = args.payer.publicKey; + tx.feePayer = payer; tx.add(ix, broadcastIx); yield this.createUnsignedTx( { transaction: tx, signers: [wormholeMessage] },