diff --git a/frontend/.env.sample b/frontend/.env.sample index 94ced985..667db5ed 100644 --- a/frontend/.env.sample +++ b/frontend/.env.sample @@ -28,5 +28,8 @@ DISCORD_HASH_SALT=[177,24,94,160,230,142,37,36,253,97,48,93,127,154,119,45,22,10 # Dispenser guard private key for Discord verification DISPENSER_GUARD=[128,47,82,29,168,187,15,46,104,227,206,117,172,71,34,235,23,50,248,80,128,0,97,123,11,98,101,163,119,14,167,57,42,97,234,25,212,67,158,180,116,19,127,93,129,233,27,64,49,229,212,43,238,163,218,145,158,47,48,12,235,58,153,212] FUNDER_KEYPAIR=[145,197,43,77,224,103,196,174,132,195,48,31,177,97,237,163,15,196,217,142,181,204,104,107,98,82,213,0,155,140,218,180,30,119,201,38,51,176,207,221,193,222,235,244,163,250,125,66,68,196,45,208,212,201,232,178,100,163,24,21,106,83,66,174] +DEPLOYER_WALLET=[145,197,43,77,224,103,196,174,132,195,48,31,177,97,237,163,15,196,217,142,181,204,104,107,98,82,213,0,155,140,218,180,30,119,201,38,51,176,207,221,193,222,235,244,163,250,125,66,68,196,45,208,212,201,232,178,100,163,24,21,106,83,66,174] + PROGRAM_ID=Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS +CSV_DIR=/tmp diff --git a/frontend/claim_sdk/merkleTree.ts b/frontend/claim_sdk/merkleTree.ts index fbaaf648..b2133e84 100644 --- a/frontend/claim_sdk/merkleTree.ts +++ b/frontend/claim_sdk/merkleTree.ts @@ -8,6 +8,7 @@ export const HASH_SIZE = 20 export class MerkleTree { public nodes: Buffer[] + public indices = new Map() static hash(buffer: Buffer) { const bytes = keccak256(buffer) @@ -32,6 +33,7 @@ export class MerkleTree { for (let i = 0; i < 1 << depth; i++) { if (i < leaves.length) { this.nodes[(1 << depth) + i] = MerkleTree.hashLeaf(leaves[i]) + this.indices.set(leaves[i].toString('hex'), (1 << depth) + i) } else { this.nodes[(1 << depth) + i] = MerkleTree.hash(NULL_PREFIX) } @@ -52,8 +54,9 @@ export class MerkleTree { prove(leaf: Buffer): Buffer | undefined { const leafHash = MerkleTree.hashLeaf(leaf) - let index = this.nodes.findIndex((value) => value.equals(leafHash)) - if (index == -1) { + let index = this.indices.get(leaf.toString('hex'))! + + if (!index) { return undefined } diff --git a/frontend/claim_sdk/testWallets.ts b/frontend/claim_sdk/testWallets.ts index f458041d..96ef9b07 100644 --- a/frontend/claim_sdk/testWallets.ts +++ b/frontend/claim_sdk/testWallets.ts @@ -28,7 +28,7 @@ const KEY_DIR = './integration/keys/' export const TEST_DISCORD_USERNAME = process.env.DISCORD_USER_ID ?? 'a_discord_user' // For development add your discord username to .env -const DISCORD_HASH_SALT: Buffer = process.env.DISCORD_HASH_SALT +export const DISCORD_HASH_SALT: Buffer = process.env.DISCORD_HASH_SALT ? Buffer.from(new Uint8Array(JSON.parse(process.env.DISCORD_HASH_SALT))) : Buffer.alloc(64) diff --git a/frontend/package-lock.json b/frontend/package-lock.json index b6530227..051c2ea1 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -37,6 +37,7 @@ "react-hot-toast": "^2.2.0", "serve": "13.0.2", "siwe": "^2.1.4", + "sql": "^0.78.0", "tweetnacl": "^1.0.3", "wagmi": "^1.2.0" }, @@ -26912,6 +26913,11 @@ "node": ">=4" } }, + "node_modules/sliced": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/sliced/-/sliced-0.0.5.tgz", + "integrity": "sha512-9bYT917D6H3+q8GlQBJmLVz3bc4OeVGfZ2BB12wvLnluTGfG6/8UdOUbKJDW1EEx9SZMDbjnatkau5/XcUeyOw==" + }, "node_modules/snake-case": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", @@ -27294,6 +27300,23 @@ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", "peer": true }, + "node_modules/sql": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/sql/-/sql-0.78.0.tgz", + "integrity": "sha512-+If27aF3UQxmnuF5JgTJKd/X6TiELa1f58Zo5TX9ULbPnqs1fBDODwwbtU0WwM5H3lSiwx2En4m2lB1rCoIhvQ==", + "dependencies": { + "lodash": "4.1.x", + "sliced": "0.0.x" + }, + "engines": { + "node": "*" + } + }, + "node_modules/sql/node_modules/lodash": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.1.0.tgz", + "integrity": "sha512-B9sgtKUlz0xe7lkYb80BcOpwwJJw5iOiz4HkBDzF0+i5nJLiwfBnL08m7bBkCOPBfi+0aqvrJDMdZDfAvs8vYg==" + }, "node_modules/stack-utils": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", diff --git a/frontend/package.json b/frontend/package.json index 189a8773..1b7ea129 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -9,7 +9,8 @@ "serve": "serve out", "start": "next start", "migrate": "node-pg-migrate up", - "populate": "ts-node ./scripts/populate.ts" + "populate": "ts-node ./scripts/populate.ts", + "populate:csv": "ts-node ./scripts/populate_from_csv.ts" }, "dependencies": { "@aptos-labs/wallet-adapter-react": "^1.2.2", @@ -44,6 +45,7 @@ "react-hot-toast": "^2.2.0", "serve": "13.0.2", "siwe": "^2.1.4", + "sql": "^0.78.0", "tweetnacl": "^1.0.3", "wagmi": "^1.2.0" }, diff --git a/frontend/scripts/populate_from_csv.ts b/frontend/scripts/populate_from_csv.ts index 830e4123..c4aae58b 100644 --- a/frontend/scripts/populate_from_csv.ts +++ b/frontend/scripts/populate_from_csv.ts @@ -4,20 +4,31 @@ import { envOrErr } from '../claim_sdk/index' import { EVM_CHAINS, EvmChains, + SolanaBreakdownRow, addClaimInfosToDatabase, addEvmBreakdownsToDatabase, + addSolanaBreakdownsToDatabase, clearDatabase, getDatabasePool, } from '../utils/db' import fs from 'fs' import Papa from 'papaparse' -import { ClaimInfo, Ecosystem, getMaxAmount } from '../claim_sdk/claim' +import { + ClaimInfo, + Ecosystem, + Ecosystems, + getMaxAmount, +} from '../claim_sdk/claim' import BN from 'bn.js' import NodeWallet from '@coral-xyz/anchor/dist/cjs/nodewallet' import { EvmBreakdownRow } from '../utils/db' import assert from 'assert' +import path from 'path' +import { hashDiscordUserId } from '../utils/hashDiscord' +import { DISCORD_HASH_SALT, loadFunderWallet } from '../claim_sdk/testWallets' +const DEBUG = true const pool = getDatabasePool() // The config is read from these env variables @@ -29,21 +40,34 @@ const DISPENSER_GUARD = Keypair.fromSecretKey( const FUNDER_KEYPAIR = Keypair.fromSecretKey( new Uint8Array(JSON.parse(envOrErr('FUNDER_KEYPAIR'))) ) -const CLUSTER = envOrErr('CLUSTER') const DEPLOYER_WALLET = Keypair.fromSecretKey( - new Uint8Array( - JSON.parse(fs.readFileSync(envOrErr('DEPLOYER_WALLET'), 'utf-8')) - ) + new Uint8Array(JSON.parse(envOrErr('DEPLOYER_WALLET'))) ) const PYTH_MINT = new PublicKey(envOrErr('PYTH_MINT')) const PYTH_TREASURY = new PublicKey(envOrErr('PYTH_TREASURY')) -const CSV_CLAIMS = envOrErr('CSV_CLAIMS') -const CSV_EVM_BREAKDOWNS = envOrErr('CSV_EVM_BREAKDOWNS') + +const CSV_DIR = envOrErr('CSV_DIR') +const DEFI_CLAIMS = 'defi.csv' +const DEFI_DEV_CLAIMS = 'defi_dev.csv' + +const DISCORD_CLAIMS = 'discord.csv' +const DISCORD_DEV_CLAIMS = 'discord_dev.csv' + +const NFT_CLAIMS = 'nft.csv' + +const COSMWASM_CHAIN_LIST = ['neutron', 'osmosis', 'sei'] function checkClaimsMatchEvmBreakdown( claimInfos: ClaimInfo[], evmBreakDowns: EvmBreakdownRow[] ) { + const evmClaimInfos = claimInfos.filter((claimInfo) => { + return claimInfo.ecosystem === 'evm' + }) + const evmClaimInfoAddrSet = new Set( + evmClaimInfos.map((claimInfo) => claimInfo.identity) + ) + const sum: { [identity: string]: BN } = {} for (const evmBreakDownRow of evmBreakDowns) { if (sum[evmBreakDownRow.identity] == undefined) { @@ -53,15 +77,18 @@ function checkClaimsMatchEvmBreakdown( evmBreakDownRow.amount ) } - const evmClaims = claimInfos.filter((claimInfo) => { - return claimInfo.ecosystem === 'evm' - }) + assert( - Object.keys(sum).length === evmClaims.length, - 'Number of evm identities in CSV file does not match number of identities in evm_breakdowns table' + Object.keys(sum).length === evmClaimInfos.length, + ` + Number of evm identities in CSV file does not match number of identities in evm_breakdowns table. + sum: ${Object.keys(sum).length} + evmClaimInfos.length: ${evmClaimInfos.length} + evmClaimInfoAddrSet.length: ${evmClaimInfoAddrSet.size} + ` ) - for (const evmClaim of evmClaims) { + for (const evmClaim of evmClaimInfos) { assert( sum[evmClaim.identity].eq(evmClaim.amount), `Breakdown for ${evmClaim.identity} does not match total amount` @@ -69,86 +96,432 @@ function checkClaimsMatchEvmBreakdown( } } -// Requirements for this script : -// - Two csv files : one for claims and one for evm breakdowns -// - Program has been deployed -// - DB has been migrated - -// Extra steps after running this script : -// - Make sure the tokens are in the treasury account -// - Make sure the treasury account has the config account as its delegate -async function main() { - await clearDatabase(pool) - - // Load claims from csv file - const csvClaims = Papa.parse(fs.readFileSync(CSV_CLAIMS, 'utf-8'), { - header: true, - }) // Assumes ecosystem, identity, amount are the headers - const claimsData = csvClaims.data as { - ecosystem: string - identity: string - amount: string - }[] - assert( - new Set(claimsData.map((row) => row['identity'])).size == claimsData.length, - 'Duplicate addresses in CSV file' +function checkClaimsMatchSolanaBreakdown( + claimInfos: ClaimInfo[], + solanaBreakdownRows: SolanaBreakdownRow[] +) { + const sum: { [identity: string]: BN } = {} + for (const solanaBreakdownRow of solanaBreakdownRows) { + if (sum[solanaBreakdownRow.identity] == undefined) { + sum[solanaBreakdownRow.identity] = new BN(0) + } + sum[solanaBreakdownRow.identity] = sum[solanaBreakdownRow.identity].add( + solanaBreakdownRow.amount + ) + } + const solanaClaims = claimInfos.filter( + (claimInfo) => claimInfo.ecosystem === 'solana' ) assert( - claimsData.every((row) => { - return [ - 'solana', - 'evm', - 'discord', - 'cosmwasm', - 'aptos', - 'sui', - 'injective', - ].includes(row['ecosystem']) - }), - 'A row has an unexisting ecosystem' + Object.keys(sum).length === solanaClaims.length, + 'Number of solana identities in CSV file does not match number of identities in solana_breakdowns table' ) - const claimInfos = claimsData.map( - (row) => - new ClaimInfo( - row['ecosystem'] as Ecosystem, - row['identity'], - new BN(row['amount']) + + for (const solanaClaim of solanaClaims) { + assert( + sum[solanaClaim.identity].eq(solanaClaim.amount), + `Breakdown for ${solanaClaim.identity} does not match total amount` + ) + } +} + +function parseCsvs() { + // parse defi csvs + const groupedDefiAddresses = parseDefiCsv(DEFI_CLAIMS) + const groupedDefiDevAddresses = parseDefiCsv(DEFI_DEV_CLAIMS) + + groupedDefiDevAddresses.forEach((devChainsAndAllocs, key) => { + const curValues = groupedDefiAddresses.get(key) + if (curValues) { + // skip duplicate identity + chain from defi_dev.csv + const curChainsForAddr = curValues.map((row) => row[0]) + const deduped = devChainsAndAllocs.filter(([chain, alloc]) => { + const isUniqueDevAddr = !curChainsForAddr.includes(chain) + if (!isUniqueDevAddr) { + console.log( + `skipping dev claim for ${chain} address ${key} because it is already in defi.csv` + ) + } + return isUniqueDevAddr + }) + groupedDefiAddresses.set(key, [...curValues, ...deduped]) + } else { + groupedDefiAddresses.set(key, devChainsAndAllocs) + } + }) + + // for each grouped address, if multiple values then all must be in evm chainlist + const evmBreakdownAddresses = new Map() + + const claimInfos: ClaimInfo[] = [] + const solanaBreakdownData: Map = new Map() + + groupedDefiAddresses.forEach((chainsAndAllocs, key) => { + // only evm chains should have multiple values from defi csv files + if (chainsAndAllocs.length > 1) { + assert( + chainsAndAllocs.every(([chain, alloc]) => + EVM_CHAINS.includes(chain as EvmChains) + ), + `Address ${key} has multiple values but not all are in evmChainList. chains: ${JSON.stringify( + chainsAndAllocs.map((row) => row[0]) + )}` ) - ) // Cast for ecosystem ok because of assert above - const maxAmount = getMaxAmount(claimInfos) - // Load evmBreakdowns from csv file - const csvEvmBreakdowns = Papa.parse( - fs.readFileSync(CSV_EVM_BREAKDOWNS, 'utf-8'), - { header: true } - ) // Assumes chain, identity, amount are the headers - const evmBreakdownsData = csvEvmBreakdowns.data as { + evmBreakdownAddresses.set(key, chainsAndAllocs) + } else if (EVM_CHAINS.includes(chainsAndAllocs[0][0] as EvmChains)) { + evmBreakdownAddresses.set(key, chainsAndAllocs) + } else if (COSMWASM_CHAIN_LIST.includes(chainsAndAllocs[0][0])) { + claimInfos.push( + new ClaimInfo( + 'cosmwasm', + key, + truncateAllocation(chainsAndAllocs[0][1]) + ) + ) + } else { + assert( + Ecosystems.includes(chainsAndAllocs[0][0] as Ecosystem), + `Unknown ecosystem detected for identity ${key} - ${chainsAndAllocs[0]}` + ) + if (chainsAndAllocs[0][0] === 'solana') { + solanaBreakdownData.set(key, [ + { + source: 'defi', + identity: key, + amount: truncateAllocation(chainsAndAllocs[0][1]), + }, + ]) + } else { + claimInfos.push( + new ClaimInfo( + chainsAndAllocs[0][0] as Ecosystem, + key, + truncateAllocation(chainsAndAllocs[0][1]) + ) + ) + } + } + }) + + // for each evm address, sum up the allocs and add to ecosystemAddresses + evmBreakdownAddresses.forEach((value, key) => { + const totalAmount = value.reduce((acc, row) => { + return acc.add(truncateAllocation(row[1])) + }, new BN(0)) + claimInfos.push(new ClaimInfo('evm', key, totalAmount)) + }) + + // convert into breakdown rows + const evmBreakdownRows: EvmBreakdownRow[] = [] + evmBreakdownAddresses.forEach((chainsAndAllocs, identity) => { + chainsAndAllocs.forEach(([chain, alloc]) => { + evmBreakdownRows.push({ + chain, + identity, + amount: truncateAllocation(alloc), + }) + }) + }) + + // need solana breakdown between nft & defi + const nftClaims = parseNftCsv() + + nftClaims.forEach((row) => { + if (solanaBreakdownData.has(row.address)) { + solanaBreakdownData.get(row.address)?.push({ + source: 'nft', + identity: row.address, + amount: truncateAllocation(row.alloc), + }) + } else { + solanaBreakdownData.set(row.address, [ + { + source: 'nft', + identity: row.address, + amount: truncateAllocation(row.alloc), + }, + ]) + } + }) + + // sum up all the solana breakdowns for each identity and add to ecosystemAddresses + solanaBreakdownData.forEach((value, key) => { + const totalAmount = value.reduce((acc, row) => { + return acc.add(row.amount) + }, new BN(0)) + claimInfos.push(new ClaimInfo('solana', key, totalAmount)) + }) + + // flatten into breakdown rows + const solanaBreakdownRows: SolanaBreakdownRow[] = [] + solanaBreakdownData.forEach((breakdowns, identity) => { + breakdowns.forEach((breakdown) => { + solanaBreakdownRows.push(breakdown) + }) + }) + + // read all discord claims and add to ecosystemAddresses + const discordClaims = parseDiscordClaims() + discordClaims.forEach((row) => { + claimInfos.push( + new ClaimInfo('discord', row.address, truncateAllocation(row.alloc)) + ) + }) + + return { + claimInfos, + evmBreakdownRows, + solanaBreakdownRows, + } +} + +function hasColumns( + csvClaims: Papa.ParseResult, + columns: string[] +): void { + columns.forEach((column) => { + assert( + csvClaims.meta.fields?.includes(column), + `CSV file does not have required '${column}' column` + ) + }) +} + +function parseDefiCsv(defi_csv: string) { + const defiCsvClaims = Papa.parse( + fs.readFileSync(path.resolve(CSV_DIR, defi_csv), 'utf-8'), + { + header: true, + } + ) + + hasColumns(defiCsvClaims, ['address', 'chain', 'alloc']) + + const claimsData = defiCsvClaims.data as { + address: string chain: string - identity: string - amount: string + alloc: string }[] + + // group by address + // only evm addresses should have multiple values + return claimsData.reduce((acc, row) => { + const curValues = acc.get(row.address) + if (curValues) { + acc.set(row.address, [...curValues, [row.chain, row.alloc]]) + } else { + acc.set(row.address, [[row.chain, row.alloc]]) + } + return acc + }, new Map()) +} + +function parseNftCsv() { + const nftCsvClaims = Papa.parse( + fs.readFileSync(path.resolve(CSV_DIR, NFT_CLAIMS), 'utf-8'), + { + header: true, + } + ) + hasColumns(nftCsvClaims, ['address', 'alloc']) + + const nftClaims = nftCsvClaims.data as { + address: string + alloc: string + }[] + return nftClaims +} + +function parseDiscordClaims(): { address: string; alloc: string }[] { + const discordCsvClaims = Papa.parse( + fs.readFileSync(path.resolve(CSV_DIR, DISCORD_CLAIMS), 'utf-8'), + { + header: true, + } + ) + hasColumns(discordCsvClaims, ['address', 'alloc']) + + const discordClaims = discordCsvClaims.data as { + address: string + alloc: string + }[] + + const discordClaimsAddrSet = new Set(discordClaims.map((row) => row.address)) assert( - evmBreakdownsData.every((row) => { - return EVM_CHAINS.includes(row['chain'] as EvmChains) - }) + discordClaims.length === discordClaimsAddrSet.size, + 'Discord claims has duplicate addresses' + ) + + const discordDevCsvClaims = Papa.parse( + fs.readFileSync(path.resolve(CSV_DIR, DISCORD_DEV_CLAIMS), 'utf-8'), + { + header: true, + } ) - const evmBreakDowns: EvmBreakdownRow[] = evmBreakdownsData.map((row) => { + + hasColumns(discordDevCsvClaims, ['address', 'alloc']) + + // filter out addresses that are already in discordClaims + const discordDevClaims = ( + discordDevCsvClaims.data as { + address: string + alloc: string + }[] + ).filter((row) => { + const isUniqueDevAddress = !discordClaimsAddrSet.has(row.address) + if (!isUniqueDevAddress) { + console.log( + `skipping discord dev claim for ${row.address} because it is already in discord.csv` + ) + } + return isUniqueDevAddress + }) + + return discordClaims.concat(discordDevClaims).map((addrAndAlloc) => { + const hashedDiscordId = hashDiscordUserId( + DISCORD_HASH_SALT, + addrAndAlloc.address + ) return { - chain: row['chain'], - identity: row['identity'], - amount: new BN(row['amount']), + address: hashedDiscordId, + alloc: addrAndAlloc.alloc, } }) +} - checkClaimsMatchEvmBreakdown(claimInfos, evmBreakDowns) +function truncateAllocation(allocation: string): BN { + if (allocation.indexOf('.') === -1) { + return new BN(allocation + '000000') + } + const allocationParts = allocation.split('.') + assert(allocationParts.length === 2) + const allocationInt = allocationParts[0] + const allocationNormalized = allocationInt + '000000' + const allocationBn = new BN(allocationNormalized) + return allocationBn +} + +function getMaxUserAndAmount(claimInfos: ClaimInfo[]): [string, BN] { + let maxUser = '' + const maxAmount = claimInfos.reduce((prev, curr) => { + if (curr.amount.gt(prev)) { + maxUser = curr.identity + } + return BN.max(prev, curr.amount) + }, new BN(0)) + return [maxUser, maxAmount] +} + +function getTotalByEcosystems(claimInfos: ClaimInfo[]): Map { + const ecosystemMap = new Map() + claimInfos.forEach((claimInfo) => { + if (ecosystemMap.has(claimInfo.ecosystem)) { + ecosystemMap.set( + claimInfo.ecosystem, + ecosystemMap.get(claimInfo.ecosystem)?.add(claimInfo.amount) as BN + ) + } else { + ecosystemMap.set(claimInfo.ecosystem, claimInfo.amount) + } + }) + return ecosystemMap +} + +// Requirements for this script : +// - Airdrop allocation repo has been downloaded and path to repo set in .env +// - DB has been migrated + +// Extra steps after running this script : +// - Make sure the tokens are in the treasury account +// - Make sure the treasury account has the config account as its delegate + +async function main() { + const mainStart = Date.now() + await clearDatabase(pool) + const parseCsvStart = Date.now() + const { claimInfos, evmBreakdownRows, solanaBreakdownRows } = parseCsvs() + const parseCsvEnd = Date.now() + if (DEBUG) { + const [maxUser, maxAmount] = getMaxUserAndAmount(claimInfos) + console.log(`maxUser: ${maxUser} maxAmount: ${maxAmount.toString()}`) + + Ecosystems.forEach((ecosystem) => { + const [maxEcoUser, maxEcoAmount] = getMaxUserAndAmount( + claimInfos.filter((claimInfo) => claimInfo.ecosystem === ecosystem) + ) + console.log( + `ecosystem: ${ecosystem} maxEcoUser: ${maxEcoUser} maxEcoAmount: ${maxEcoAmount + .div(new BN(1000000)) + .toString()}` + ) + }) + const ecosystemMap = getTotalByEcosystems(claimInfos) + let totalAirdrop = new BN(0) + ecosystemMap.forEach((amount, ecosystem) => { + totalAirdrop = totalAirdrop.add(amount) + }) + ecosystemMap.forEach((amount, ecosystem) => { + console.log( + `ecosystem: ${ecosystem} amount: ${amount + .div(new BN(1000000)) + .toString()} - ${amount + .mul(new BN(100)) + .div(totalAirdrop) + .toString()}% of total airdrop` + ) + }) + assert( + evmBreakdownRows.every((row) => + EVM_CHAINS.includes(row.chain as EvmChains) + ) + ) + } + const maxAmount = getMaxAmount(claimInfos) + + checkClaimsMatchEvmBreakdown(claimInfos, evmBreakdownRows) + + checkClaimsMatchSolanaBreakdown(claimInfos, solanaBreakdownRows) + + // sort by amount & identity + claimInfos.sort((a, b) => { + const amountCmp = b.amount.cmp(a.amount) + return amountCmp != 0 ? amountCmp : a.identity.localeCompare(b.identity) + }) // Add data to database + const addClaimInfosStart = Date.now() const root = await addClaimInfosToDatabase(pool, claimInfos) - await addEvmBreakdownsToDatabase(pool, evmBreakDowns) + const addClaimInfoEnd = Date.now() + console.log( + `\n\nadded claim infos to database time: ${ + addClaimInfoEnd - addClaimInfosStart + } ms` + ) + const addEvmStart = Date.now() + await addEvmBreakdownsToDatabase(pool, evmBreakdownRows) + const addEvmEnd = Date.now() + console.log(`added evm breakdowns time : ${addEvmEnd - addEvmStart} ms`) + const addSolStart = Date.now() + await addSolanaBreakdownsToDatabase(pool, solanaBreakdownRows) + const addSolEnd = Date.now() + console.log( + `added solana breakdowns to db time: ${addSolEnd - addSolStart} ms` + ) + + console.log(` + \n\n + parseCsvTime: ${parseCsvEnd - parseCsvStart} + addClaimInfoTime: ${addClaimInfoEnd - addClaimInfosStart} + addEvmTime: ${addEvmEnd - addEvmStart} + addSolTime: ${addSolEnd - addSolStart} + \n\n`) - // Intialize the token dispenser + // Initialize the token dispenser const tokenDispenserProvider = new TokenDispenserProvider( ENDPOINT, new NodeWallet(DEPLOYER_WALLET), + // for local testing + // loadFunderWallet(), new PublicKey(PROGRAM_ID), { skipPreflight: true, @@ -156,6 +529,7 @@ async function main() { commitment: 'processed', } ) + await tokenDispenserProvider.initialize( root, PYTH_MINT, @@ -164,6 +538,35 @@ async function main() { FUNDER_KEYPAIR.publicKey, maxAmount ) + + // for local testing + // const mintAndTreasury = await tokenDispenserProvider.setupMintAndTreasury() + // await tokenDispenserProvider.initialize( + // root, + // mintAndTreasury.mint.publicKey, + // mintAndTreasury.treasury, + // DISPENSER_GUARD.publicKey, + // FUNDER_KEYPAIR.publicKey, + // maxAmount + // ) + const mainEnd = Date.now() + console.log(`\n\ninitialized token dispenser\n\n`) + + console.log(` + \n\n + totalTime: ${mainEnd - mainStart} + parseCsvTime: ${parseCsvEnd - parseCsvStart} + addClaimInfoTime: ${addClaimInfoEnd - addClaimInfosStart} + addEvmTime: ${addEvmEnd - addEvmStart} + addSolTime: ${addSolEnd - addSolStart} + \n\n`) } -main() +;(async () => { + try { + await main() + } catch (e) { + console.error(`error from populate_from_csv: ${e}`) + process.exit(1) + } +})() diff --git a/frontend/scripts/setup.sh b/frontend/scripts/setup.sh index bd1d0c2c..5ffbcd49 100755 --- a/frontend/scripts/setup.sh +++ b/frontend/scripts/setup.sh @@ -5,6 +5,7 @@ command -v shellcheck >/dev/null && shellcheck "$0" # initialize variables dev=0 test=0 +csv=0 verbose=0 postgres=1; @@ -14,10 +15,11 @@ TOKEN_DISPENSER_DIR="$DIR/../../token-dispenser"; usage() { cat < { + const merkleTreeStart = Date.now() const merkleTree = new MerkleTree( claimInfos.map((claimInfo) => { return claimInfo.toBuffer() }) ) + const merkleTreeEnd = Date.now() - for (const claimInfo of claimInfos) { - const proof = merkleTree.prove(claimInfo.toBuffer()) + console.log( + `\n\nbuilt merkle tree time: ${merkleTreeEnd - merkleTreeStart}\n\n` + ) - await pool.query( - 'INSERT INTO claims VALUES($1::ecosystem_type, $2, $3, $4)', - [ - claimInfo.ecosystem, - claimInfo.identity, - claimInfo.amount.toString(), - proof, - ] - ) + let claimInfoChunks = [] + const chunkSize = 100 + const chunkCounts = [...Array(Math.ceil(claimInfos.length / chunkSize))] + + const claimInfoChunksStart = Date.now() + + claimInfoChunks = chunkCounts.map((_, i) => { + if (i % 100 === 0) { + console.log(`\n\n making claimInfo chunk ${i}/${chunkCounts.length}\n\n`) + } + let chunk = claimInfos.splice(0, chunkSize) + return chunk.map((claimInfo) => { + return { + ecosystem: claimInfo.ecosystem, + identity: claimInfo.identity, + amount: claimInfo.amount.toString(), + proof_of_inclusion: merkleTree.prove(claimInfo.toBuffer()), + } + }) + }) + const claimInfoChunksEnd = Date.now() + + console.log( + `\n\nclaiminfoChunks time: ${claimInfoChunksEnd - claimInfoChunksStart}\n\n` + ) + + let Claims = sql.define({ + name: 'claims', + columns: ['ecosystem', 'identity', 'amount', 'proof_of_inclusion'], + }) + const claimsInsertStart = Date.now() + let chunkCount = 0 + for (const claimInfoChunk of claimInfoChunks) { + let query = Claims.insert(claimInfoChunk).toQuery() + await pool.query(query) + chunkCount++ + if (chunkCount % 10 === 0) { + console.log( + `\n\ninserted ${chunkCount}/${claimInfoChunks.length} chunks\n\n` + ) + } } + const claimsInsertEnd = Date.now() + console.log( + `\n\nclaimsInsert time: ${claimsInsertEnd - claimsInsertStart}\n\n` + ) return merkleTree.root }