From 1cb3af70822aca2e3c160191c0ddedaf77f5874d Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 02:18:13 +0800 Subject: [PATCH 01/19] added plugin-twilio this can send message using sms or whatsapp --- packages/plugin-twilio/.npmignore | 6 + packages/plugin-twilio/ReadMe.txt | 78 ++++++++ packages/plugin-twilio/eslint.config.mjs | 3 + packages/plugin-twilio/package.json | 20 ++ packages/plugin-twilio/src/actions/index.ts | 3 + packages/plugin-twilio/src/actions/sendSms.ts | 173 ++++++++++++++++++ .../src/actions/sendWhatsAppMessage.ts | 140 ++++++++++++++ packages/plugin-twilio/src/index.ts | 12 ++ packages/plugin-twilio/tsconfig.json | 13 ++ packages/plugin-twilio/tsup.config.ts | 20 ++ 10 files changed, 468 insertions(+) create mode 100644 packages/plugin-twilio/.npmignore create mode 100644 packages/plugin-twilio/ReadMe.txt create mode 100644 packages/plugin-twilio/eslint.config.mjs create mode 100644 packages/plugin-twilio/package.json create mode 100644 packages/plugin-twilio/src/actions/index.ts create mode 100644 packages/plugin-twilio/src/actions/sendSms.ts create mode 100644 packages/plugin-twilio/src/actions/sendWhatsAppMessage.ts create mode 100644 packages/plugin-twilio/src/index.ts create mode 100644 packages/plugin-twilio/tsconfig.json create mode 100644 packages/plugin-twilio/tsup.config.ts diff --git a/packages/plugin-twilio/.npmignore b/packages/plugin-twilio/.npmignore new file mode 100644 index 00000000000..078562eceab --- /dev/null +++ b/packages/plugin-twilio/.npmignore @@ -0,0 +1,6 @@ +* + +!dist/** +!package.json +!readme.md +!tsup.config.ts \ No newline at end of file diff --git a/packages/plugin-twilio/ReadMe.txt b/packages/plugin-twilio/ReadMe.txt new file mode 100644 index 00000000000..c32b9723e69 --- /dev/null +++ b/packages/plugin-twilio/ReadMe.txt @@ -0,0 +1,78 @@ +#The ENV file should contain this information + +# Cache Configs +CACHE_STORE=database + +# Discord Configuration +DISCORD_APPLICATION_ID= +DISCORD_API_TOKEN= + +# AI Model API Keys +OPENAI_API_KEY= + +# Twitter/X Configuration +TWITTER_USERNAME= +TWITTER_PASSWORD= +TWITTER_EMAIL= +TWITTER_POLL_INTERVAL=120 # How often (in seconds) the bot should check for interactions +TWITTER_SEARCH_ENABLE=FALSE # Enable timeline search, WARNING this greatly increases your chance of getting banned +#TWITTER_TARGET_USERS= + +# Twilio Part +TWILIO_ACCOUNT_SID= +TWILIO_AUTH_TOKEN= +TWILIO_PHONE_NUMBER= +TWILIO_WHATSAPP_PHONE_NUMBER= + +# Server Configuration +SERVER_PORT=3000 + +# How to use +1. create your .env file , if you don't have it yet and then populate it with the information above, make sure to fill in all information +2. Add this project into your eliza os project under packages +3. using terminal go inside plugin-twilio then type pnpm install twilio +4. go inside your agent folder update the package.json add this "@elizaos/plugin-twilio": "workspace:*" +5. add this inside your Agent index.ts import { twilioPlugin } from "@elizaos/plugin-twilio"; +6. Add twilioPlugin in Agent Runtime still inside Agent index.ts +7. pnpm install +8. pnpm build +9. pmpn start --character="characters/nameofyouragentcharacterfile.character.json" + +#Note: Make sure you have a twilio developer account and it is verified with verified phone number and have enough credits but they provide free small credits when your account is new +visit twilio: https://www.twilio.com +twilio quick start guides: https://www.twilio.com/docs/messaging/quickstart +twilio documentation: https://www.twilio.com/docs/messaging +Free Trial Account: https://www.twilio.com/docs/messaging/guides/how-to-use-your-free-trial-account + +# For WhatsApp guides follow the link below you need to have a separate twilio whatsapp enabled phone number +https://www.twilio.com/docs/whatsapp +https://www.twilio.com/docs/whatsapp/quickstart/node +https://www.twilio.com/docs/whatsapp/getting-started#registering-a-whatsapp-sender +https://www.twilio.com/docs/verify/whatsapp + + +#Some Other Whats App Info that you might need +https://www.twilio.com/docs/whatsapp + +#Twilio Phone Number guidelines +https://www.twilio.com/en-us/guidelines + + +# Clarification this project is intended to be use/place inside elizaos project packages, this can't work alone + +# Sample implementation can be found here +https://github.com/juanc07/AgentSoulSpark + +# Usage Sample + +The message that you want to send must be inside a Single Quote or double quotes + +Example 1: +Please send sms to [phone number], and my message is '[your message here]' +Please send whats app message to [phone number], and my message is '[your message here]' + +Example 2: +Please send sms to [phone number], and my message is "[your message here]" +Please send whats app message to [phone number], and my message is "[your message here]" + +#Note I haven't tested any other complex string or sentence yet, this could be improve ofcourse but for now it works that way \ No newline at end of file diff --git a/packages/plugin-twilio/eslint.config.mjs b/packages/plugin-twilio/eslint.config.mjs new file mode 100644 index 00000000000..92fe5bbebef --- /dev/null +++ b/packages/plugin-twilio/eslint.config.mjs @@ -0,0 +1,3 @@ +import eslintGlobalConfig from "../../eslint.config.mjs"; + +export default [...eslintGlobalConfig]; diff --git a/packages/plugin-twilio/package.json b/packages/plugin-twilio/package.json new file mode 100644 index 00000000000..db5033dea2e --- /dev/null +++ b/packages/plugin-twilio/package.json @@ -0,0 +1,20 @@ +{ + "name": "@elizaos/plugin-twilio", + "version": "0.1.7-alpha.2", + "main": "dist/index.js", + "type": "module", + "types": "dist/index.d.ts", + "dependencies": { + "@elizaos/core": "workspace:*", + "tsup": "8.3.5", + "twilio": "^5.4.0" + }, + "scripts": { + "build": "tsup --format esm --dts", + "dev": "tsup --format esm --dts --watch", + "lint": "eslint --fix --cache ." + }, + "peerDependencies": { + "whatwg-url": "7.1.0" + } +} diff --git a/packages/plugin-twilio/src/actions/index.ts b/packages/plugin-twilio/src/actions/index.ts new file mode 100644 index 00000000000..25a79e9a3a6 --- /dev/null +++ b/packages/plugin-twilio/src/actions/index.ts @@ -0,0 +1,3 @@ +export * from "./helloWorld.ts"; +export * from "./sendSms.ts"; +export * from "./sendWhatsAppMessage.ts"; diff --git a/packages/plugin-twilio/src/actions/sendSms.ts b/packages/plugin-twilio/src/actions/sendSms.ts new file mode 100644 index 00000000000..c3b8c6311ff --- /dev/null +++ b/packages/plugin-twilio/src/actions/sendSms.ts @@ -0,0 +1,173 @@ +import { + ActionExample, + generateText, + HandlerCallback, + IAgentRuntime, + Memory, + ModelClass, + State, + type Action, +} from "@elizaos/core"; +import twilio from 'twilio'; + +export const sendSmsAction: Action = { + name: "SendSms", + similes: [ + "SendSms" + ], + validate: async (_runtime: IAgentRuntime, _message: Memory) => { + return true; + }, + description: + "Send SMS to the mobile number provided by the user", + handler: async ( + _runtime: IAgentRuntime, + _message: Memory, + _state: State, + _options:{[key:string]: unknown}, + _callback: HandlerCallback, + ): Promise => { + // Check if environment variables are set + const accountSid = process.env.TWILIO_ACCOUNT_SID; + const authToken = process.env.TWILIO_AUTH_TOKEN; + + console.log("CHECK _message: ",_message.content.text); + + if (!accountSid || !authToken) { + console.error('TWILIO_ACCOUNT_SID or TWILIO_AUTH_TOKEN is not set'); + return false; + } + + // Extract the mobile number from the message + const mobileNumberRegex = /(?:\+|00)(\d{1,3})\s?(\d{3,5})\s?(\d{4,10})/; // This regex matches numbers like +1 123 4567890 or 001 123 4567890 + const text = (_message.content as { text?: string })?.text || ''; + const matches = text.match(mobileNumberRegex); + + const messageRegex = /(['"])(.*?)\1/; + const messageMatch = text.match(messageRegex); + + let mobileNumberProvidedByUser = null; + let messageToSendFromUser = null; + + if(messageMatch){ + messageToSendFromUser = messageMatch[2]; + } + if (matches) { + // Combine the parts of the number into a single string, removing spaces and plus signs + mobileNumberProvidedByUser = `+${matches[1]}${matches[2]}${matches[3]}`; + }else{ + const alternativeMobileNumberRegex = /\b(\d{3})[-.]?(\d{3})[-.]?(\d{4})\b/; // For formats like 123-456-7890 or 123.456.7890 + if (!mobileNumberProvidedByUser) { + const alternativeMatches = text.match(alternativeMobileNumberRegex); + if (alternativeMatches) { + mobileNumberProvidedByUser = `${alternativeMatches[1]}${alternativeMatches[2]}${alternativeMatches[3]}`; + } + } + } + + const twilioNumber = process.env.TWILIO_PHONE_NUMBER; // Your Twilio phone number + + console.log('check target mobile number: ', mobileNumberProvidedByUser); + console.log('check messageToSendFromUser: ', messageToSendFromUser); + console.log('check twilioNumber: ', twilioNumber); + + if (!twilioNumber) { + console.error('Twilio phone number is missing'); + + _callback({ + text: `Sorry there was an issue send sms, please try again later`, + }); + return false; + } + + const recentMessages = `Extract the phone number from the user recent messages ${_state.recentMessages}`; + + if (!mobileNumberProvidedByUser) { + console.error('Mobile number is missing will try to get the phone number or mobile number from recent messages'); + + mobileNumberProvidedByUser = await generateText( + { + runtime: _runtime, + context: recentMessages, + modelClass: ModelClass.SMALL, + stop: ["\n"], + customSystemPrompt: "only extract the message that the user intend to send and only get the last one" + } + ); + } + + if (!mobileNumberProvidedByUser) { + console.error('Mobile number is missing'); + + _callback({ + text: `Sorry, there was an issue send sms, please try again later`, + }); + return false; + } + + const recentUserMessages = `Extract the message intended for SMS or text: ${_state.recentMessages}`; + + if (!messageToSendFromUser) { + console.error('messageToSendFromUser is missing will try to get the user message from recent messages'); + + messageToSendFromUser = await generateText( + { + runtime: _runtime, + context: recentUserMessages, + modelClass: ModelClass.SMALL, + stop: ["\n"] + } + ); + } + + if(messageToSendFromUser==null){ + console.error('messageToSendFromUser is empty or null'); + + _callback({ + text: `Sorry there was an issue sending the WhatsApp message, please try again later`, + }); + return false; + } + + try { + // Initialize Twilio client + const client = twilio(accountSid, authToken); + + // Send the SMS + const message= await client.messages.create({ + body: messageToSendFromUser, // The message body + to: mobileNumberProvidedByUser, // The recipient's phone number + from: twilioNumber, // Your Twilio phone number + }); + + console.log("message body: ", message); + + const messageFromAgent = `SMS sent successfully to ${mobileNumberProvidedByUser}`; + + // Call the callback to notify the user + _callback({ + text: messageFromAgent, + }); + + return true; + } catch (error) { + console.error('Failed to send SMS:', error); + _callback({ + text: `Failed to send SMS to ${mobileNumberProvidedByUser}`, + }); + return false; + } + }, + examples: [ + [ + { + user: "{{user1}}", + content: { text: "please send my message via sms to target mobile number" }, + }, + { + user: "{{user2}}", + content: { text: "", action: "SEND_SMS" }, + }, + ], + ] as ActionExample[][], +} as Action; \ No newline at end of file diff --git a/packages/plugin-twilio/src/actions/sendWhatsAppMessage.ts b/packages/plugin-twilio/src/actions/sendWhatsAppMessage.ts new file mode 100644 index 00000000000..f9ec7744411 --- /dev/null +++ b/packages/plugin-twilio/src/actions/sendWhatsAppMessage.ts @@ -0,0 +1,140 @@ +import { + ActionExample, + HandlerCallback, + IAgentRuntime, + Memory, + State, + type Action, +} from "@elizaos/core"; +import twilio from 'twilio'; + +export const sendWhatsAppMessageAction: Action = { + name: "SendWhatsAppMessage", + similes: [ + "SendWhatsAppMessage" + ], + validate: async (_runtime: IAgentRuntime, _message: Memory) => { + return true; + }, + description: + "Send a WhatsApp message to the mobile number provided by the user", + handler: async ( + _runtime: IAgentRuntime, + _message: Memory, + _state: State, + _options:{[key:string]: unknown}, + _callback: HandlerCallback, + ): Promise => { + // Check if environment variables are set + const accountSid = process.env.TWILIO_ACCOUNT_SID; + const authToken = process.env.TWILIO_AUTH_TOKEN; + + console.log("CHECK _message: ",_message.content.text); + + if (!accountSid || !authToken) { + console.error('TWILIO_ACCOUNT_SID or TWILIO_AUTH_TOKEN is not set'); + return false; + } + + // Extract the mobile number from the message + const mobileNumberRegex = /(?:\+|00)(\d{1,3})\s?(\d{3,5})\s?(\d{4,10})/; // This regex matches numbers like +1 123 4567890 or 001 123 4567890 + const text = (_message.content as { text?: string })?.text || ''; + const matches = text.match(mobileNumberRegex); + + const messageRegex = /(['"])(.*?)\1/; + const messageMatch = text.match(messageRegex); + + let mobileNumberProvidedByUser = null; + let messageToSendFromUser = null; + + if(messageMatch){ + messageToSendFromUser = messageMatch[2]; + } + if (matches) { + // Combine the parts of the number into a single string, removing spaces and plus signs + mobileNumberProvidedByUser = `+${matches[1]}${matches[2]}${matches[3]}`; + } else { + const alternativeMobileNumberRegex = /\b(\d{3})[-.]?(\d{3})[-.]?(\d{4})\b/; // For formats like 123-456-7890 or 123.456.7890 + if (!mobileNumberProvidedByUser) { + const alternativeMatches = text.match(alternativeMobileNumberRegex); + if (alternativeMatches) { + mobileNumberProvidedByUser = `+${alternativeMatches[1]}${alternativeMatches[2]}${alternativeMatches[3]}`; + } + } + } + + const twilioNumber = process.env.TWILIO_WHATSAPP_PHONE_NUMBER; // Your Twilio WhatsApp number + + console.log('check target mobile number: ', mobileNumberProvidedByUser); + console.log('check messageToSendFromUser: ', messageToSendFromUser); + console.log('check twilioNumber: ', twilioNumber); + + if (!mobileNumberProvidedByUser) { + console.error('Mobile number is missing'); + + _callback({ + text: `Sorry there was an issue sending the WhatsApp message, please try again later`, + }); + return false; + } + + if (!twilioNumber) { + console.error('Twilio WhatsApp number is missing'); + + _callback({ + text: `Sorry there was an issue sending the WhatsApp message, please try again later`, + }); + return false; + } + + if(messageToSendFromUser==null){ + console.error('messageToSendFromUser is empty or null'); + + _callback({ + text: `Sorry there was an issue sending the WhatsApp message, please try again later`, + }); + return false; + } + + try { + // Initialize Twilio client + const client = twilio(accountSid, authToken); + + // Send the WhatsApp message + const message = await client.messages.create({ + body: messageToSendFromUser, // The message body + to: `whatsapp:${mobileNumberProvidedByUser}`, // The recipient's WhatsApp number + from: `whatsapp:${twilioNumber}`, // Your Twilio WhatsApp number + }); + + console.log("message body: ", message); + + const messageFromAgent = `WhatsApp message sent successfully to ${mobileNumberProvidedByUser}`; + + // Call the callback to notify the user + _callback({ + text: messageFromAgent, + }); + + return true; + } catch (error) { + console.error('Failed to send WhatsApp message:', error); + _callback({ + text: `Failed to send WhatsApp message to ${mobileNumberProvidedByUser}`, + }); + return false; + } + }, + examples: [ + [ + { + user: "{{user1}}", + content: { text: "please send my message via WhatsApp to target mobile number" }, + }, + { + user: "{{user2}}", + content: { text: "", action: "SEND_WHATSAPP_MESSAGE" }, + }, + ], + ] as ActionExample[][], +} as Action; \ No newline at end of file diff --git a/packages/plugin-twilio/src/index.ts b/packages/plugin-twilio/src/index.ts new file mode 100644 index 00000000000..a10a047d517 --- /dev/null +++ b/packages/plugin-twilio/src/index.ts @@ -0,0 +1,12 @@ +import { Plugin } from "@elizaos/core"; +import { sendWhatsAppMessageAction,sendSmsAction } from "./actions"; +export * as actions from "./actions"; + +export const twilioPlugin: Plugin = { + name: "twilio", + description: "twilio basic send sms action implementation", + actions: [ + sendSmsAction, + sendWhatsAppMessageAction, + ] +}; diff --git a/packages/plugin-twilio/tsconfig.json b/packages/plugin-twilio/tsconfig.json new file mode 100644 index 00000000000..834c4dce269 --- /dev/null +++ b/packages/plugin-twilio/tsconfig.json @@ -0,0 +1,13 @@ +{ + "extends": "../core/tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src", + "types": [ + "node" + ] + }, + "include": [ + "src/**/*.ts" + ] +} \ No newline at end of file diff --git a/packages/plugin-twilio/tsup.config.ts b/packages/plugin-twilio/tsup.config.ts new file mode 100644 index 00000000000..e42bf4efeae --- /dev/null +++ b/packages/plugin-twilio/tsup.config.ts @@ -0,0 +1,20 @@ +import { defineConfig } from "tsup"; + +export default defineConfig({ + entry: ["src/index.ts"], + outDir: "dist", + sourcemap: true, + clean: true, + format: ["esm"], // Ensure you're targeting CommonJS + external: [ + "dotenv", // Externalize dotenv to prevent bundling + "fs", // Externalize fs to use Node.js built-in module + "path", // Externalize other built-ins if necessary + "@reflink/reflink", + "@node-llama-cpp", + "https", + "http", + "agentkeepalive", + // Add other modules you want to externalize + ], +}); From f068a62af22e11ce75dc8631b1242735f0cc03c9 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 03:54:44 +0800 Subject: [PATCH 02/19] remove hello action in index.ts --- packages/plugin-twilio/src/actions/index.ts | 1 - pnpm-lock.yaml | 96 ++++++++++++++++----- 2 files changed, 75 insertions(+), 22 deletions(-) diff --git a/packages/plugin-twilio/src/actions/index.ts b/packages/plugin-twilio/src/actions/index.ts index 25a79e9a3a6..0326100e5bc 100644 --- a/packages/plugin-twilio/src/actions/index.ts +++ b/packages/plugin-twilio/src/actions/index.ts @@ -1,3 +1,2 @@ -export * from "./helloWorld.ts"; export * from "./sendSms.ts"; export * from "./sendWhatsAppMessage.ts"; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 90f147c715c..7881e624dee 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1864,6 +1864,18 @@ importers: specifier: 3.2.0 version: 3.2.0 + packages/plugin-twilio: + dependencies: + '@elizaos/core': + specifier: workspace:* + version: link:../core + tsup: + specifier: 8.3.5 + version: 8.3.5(@swc/core@1.10.4(@swc/helpers@0.5.15))(jiti@2.4.2)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) + twilio: + specifier: ^5.4.0 + version: 5.4.0 + packages/plugin-twitter: dependencies: '@elizaos/core': @@ -17136,6 +17148,9 @@ packages: resolution: {integrity: sha512-Gf9qqc58SpCA/xdziiHz35F4GNIWYWZrEshUc/G/r5BnLph6xpKuLeoJoQuj5WfBIx/eQLf+hmVPYHaxJu7V2g==} engines: {node: '>= 10.13.0'} + scmp@2.1.0: + resolution: {integrity: sha512-o/mRQGk9Rcer/jEEw/yw4mwo3EU/NvYvp577/Btqrym9Qy5/MdWGBqipbALgd2lrdWTJ5/gqDusxfnQBxOxT2Q==} + scrypt-js@3.0.1: resolution: {integrity: sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA==} @@ -18319,6 +18334,10 @@ packages: tweetnacl@1.0.3: resolution: {integrity: sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==} + twilio@5.4.0: + resolution: {integrity: sha512-kEmxzdOLTzXzUEXIkBVwT1Itxlbp+rtGrQogNfPtSE3EjoEsxrxB/9tdMIEbrsioL8CzTk/+fiKNJekAyHxjuQ==} + engines: {node: '>=14.0'} + twitter-api-v2@1.19.0: resolution: {integrity: sha512-jfG4aapNPM9+4VxNxn0TXvD8Qj8NmVx6cY0hp5K626uZ41qXPaJz33Djd3y6gfHF/+W29+iZz0Y5qB869d/akA==} @@ -19476,6 +19495,10 @@ packages: resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} engines: {node: '>=18'} + xmlbuilder@13.0.2: + resolution: {integrity: sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ==} + engines: {node: '>=6.0'} + xmlchars@2.2.0: resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} @@ -19623,7 +19646,7 @@ snapshots: '@acuminous/bitsyntax@0.1.2': dependencies: buffer-more-ints: 1.0.0 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 safe-buffer: 5.1.2 transitivePeerDependencies: - supports-color @@ -21556,7 +21579,7 @@ snapshots: dependencies: '@scure/bip32': 1.6.1 abitype: 1.0.8(typescript@5.6.3)(zod@3.23.8) - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 axios-mock-adapter: 1.22.0(axios@1.7.9) axios-retry: 4.5.0(axios@1.7.9) bip32: 4.0.0 @@ -23391,7 +23414,7 @@ snapshots: '@eslint/config-array@0.19.1': dependencies: '@eslint/object-schema': 2.1.5 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -23417,7 +23440,7 @@ snapshots: '@eslint/eslintrc@3.2.0': dependencies: ajv: 6.12.6 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 espree: 10.3.0 globals: 14.0.0 ignore: 5.3.2 @@ -28831,7 +28854,7 @@ snapshots: '@typescript-eslint/types': 8.16.0 '@typescript-eslint/typescript-estree': 8.16.0(typescript@5.6.3) '@typescript-eslint/visitor-keys': 8.16.0 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 eslint: 9.16.0(jiti@2.4.2) optionalDependencies: typescript: 5.6.3 @@ -28864,7 +28887,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 8.16.0(typescript@5.6.3) '@typescript-eslint/utils': 8.16.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.6.3) - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 eslint: 9.16.0(jiti@2.4.2) ts-api-utils: 1.4.3(typescript@5.6.3) optionalDependencies: @@ -28895,7 +28918,7 @@ snapshots: dependencies: '@typescript-eslint/types': 8.16.0 '@typescript-eslint/visitor-keys': 8.16.0 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 fast-glob: 3.3.2 is-glob: 4.0.3 minimatch: 9.0.5 @@ -29685,7 +29708,7 @@ snapshots: agent-base@6.0.2: dependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -30077,13 +30100,13 @@ snapshots: axios-mock-adapter@1.22.0(axios@1.7.9): dependencies: - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 fast-deep-equal: 3.1.3 is-buffer: 2.0.5 axios-retry@4.5.0(axios@1.7.9): dependencies: - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 is-retry-allowed: 2.2.0 axios@0.21.4: @@ -30094,7 +30117,7 @@ snapshots: axios@0.27.2: dependencies: - follow-redirects: 1.15.9(debug@4.4.0) + follow-redirects: 1.15.9 form-data: 4.0.1 transitivePeerDependencies: - debug @@ -30123,6 +30146,14 @@ snapshots: transitivePeerDependencies: - debug + axios@1.7.9: + dependencies: + follow-redirects: 1.15.9 + form-data: 4.0.1 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + axios@1.7.9(debug@4.4.0): dependencies: follow-redirects: 1.15.9(debug@4.4.0) @@ -32158,6 +32189,10 @@ snapshots: dependencies: ms: 2.1.3 + debug@4.4.0: + dependencies: + ms: 2.1.3 + debug@4.4.0(supports-color@5.5.0): dependencies: ms: 2.1.3 @@ -33059,7 +33094,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 escape-string-regexp: 4.0.0 eslint-scope: 8.2.0 eslint-visitor-keys: 4.2.0 @@ -33645,6 +33680,8 @@ snapshots: async: 0.2.10 which: 1.3.1 + follow-redirects@1.15.9: {} + follow-redirects@1.15.9(debug@4.3.7): optionalDependencies: debug: 4.3.7 @@ -34728,7 +34765,7 @@ snapshots: http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -34784,14 +34821,14 @@ snapshots: https-proxy-agent@5.0.1: dependencies: agent-base: 6.0.2 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color https-proxy-agent@7.0.6: dependencies: agent-base: 7.1.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -38110,7 +38147,7 @@ snapshots: '@yarnpkg/lockfile': 1.1.0 '@yarnpkg/parsers': 3.0.0-rc.46 '@zkochan/js-yaml': 0.0.7 - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 chalk: 4.1.0 cli-cursor: 3.1.0 cli-spinners: 2.6.1 @@ -40787,6 +40824,8 @@ snapshots: ajv-formats: 2.1.1(ajv@8.17.1) ajv-keywords: 5.1.0(ajv@8.17.1) + scmp@2.1.0: {} + scrypt-js@3.0.1: {} scryptsy@2.1.0: {} @@ -41173,7 +41212,7 @@ snapshots: socks-proxy-agent@8.0.5: dependencies: agent-base: 7.1.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 socks: 2.8.3 transitivePeerDependencies: - supports-color @@ -42148,7 +42187,7 @@ snapshots: cac: 6.7.14 chokidar: 4.0.3 consola: 3.3.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 esbuild: 0.24.2 joycon: 3.1.1 picocolors: 1.1.1 @@ -42182,7 +42221,7 @@ snapshots: tuf-js@2.2.1: dependencies: '@tufjs/models': 2.0.1 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 make-fetch-happen: 13.0.1 transitivePeerDependencies: - supports-color @@ -42228,6 +42267,19 @@ snapshots: tweetnacl@1.0.3: {} + twilio@5.4.0: + dependencies: + axios: 1.7.9 + dayjs: 1.11.13 + https-proxy-agent: 5.0.1 + jsonwebtoken: 9.0.2 + qs: 6.13.1 + scmp: 2.1.0 + xmlbuilder: 13.0.2 + transitivePeerDependencies: + - debug + - supports-color + twitter-api-v2@1.19.0: {} tx2@1.0.5: @@ -42863,7 +42915,7 @@ snapshots: vite-node@2.1.5(@types/node@22.10.4)(terser@5.37.0): dependencies: cac: 6.7.14 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 es-module-lexer: 1.6.0 pathe: 1.1.2 vite: 5.4.11(@types/node@22.10.4)(terser@5.37.0) @@ -42976,7 +43028,7 @@ snapshots: '@vitest/spy': 2.1.5 '@vitest/utils': 2.1.5 chai: 5.1.2 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 expect-type: 1.1.0 magic-string: 0.30.17 pathe: 1.1.2 @@ -43735,6 +43787,8 @@ snapshots: xml-name-validator@5.0.0: {} + xmlbuilder@13.0.2: {} + xmlchars@2.2.0: {} xtend@4.0.2: {} From e34e5ae9542d09ea986b5a1c8c62bb85bc9a86e9 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 04:14:57 +0800 Subject: [PATCH 03/19] added plugin-twilio to agent --- agent/package.json | 1 + agent/src/index.ts | 2 + pnpm-lock.yaml | 148 ++++++++++++--------------------------------- 3 files changed, 42 insertions(+), 109 deletions(-) diff --git a/agent/package.json b/agent/package.json index f4fa0f33e03..972a31a45be 100644 --- a/agent/package.json +++ b/agent/package.json @@ -61,6 +61,7 @@ "@elizaos/plugin-fuel": "workspace:*", "@elizaos/plugin-avalanche": "workspace:*", "@elizaos/plugin-web-search": "workspace:*", + "@elizaos/plugin-twilio": "workspace:*", "readline": "1.3.0", "ws": "8.18.0", "yargs": "17.7.2" diff --git a/agent/src/index.ts b/agent/src/index.ts index 53058cf4ece..e93231050e5 100644 --- a/agent/src/index.ts +++ b/agent/src/index.ts @@ -64,6 +64,7 @@ import { abstractPlugin } from "@elizaos/plugin-abstract"; import { avalanchePlugin } from "@elizaos/plugin-avalanche"; import { webSearchPlugin } from "@elizaos/plugin-web-search"; import { echoChamberPlugin } from "@elizaos/plugin-echochambers"; +import { twilioPlugin } from "@elizaos/plugin-twilio"; import Database from "better-sqlite3"; import fs from "fs"; import path from "path"; @@ -525,6 +526,7 @@ export async function createAgent( // character.plugins are handled when clients are added plugins: [ bootstrapPlugin, + twilioPlugin, getSecret(character, "CONFLUX_CORE_PRIVATE_KEY") ? confluxPlugin : null, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7881e624dee..3daece62773 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -222,6 +222,9 @@ importers: '@elizaos/plugin-ton': specifier: workspace:* version: link:../packages/plugin-ton + '@elizaos/plugin-twilio': + specifier: workspace:* + version: link:../packages/plugin-twilio '@elizaos/plugin-twitter': specifier: workspace:* version: link:../packages/plugin-twitter @@ -368,6 +371,9 @@ importers: '@docusaurus/preset-classic': specifier: 3.6.3 version: 3.6.3(@algolia/client-search@5.18.0)(@mdx-js/react@3.0.1(@types/react@18.3.12)(react@18.3.1))(@swc/core@1.10.4(@swc/helpers@0.5.15))(@types/react@18.3.12)(acorn@8.14.0)(bufferutil@4.0.9)(eslint@9.16.0(jiti@2.4.2))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.3)(typescript@5.6.3)(utf-8-validate@5.0.10) + '@docusaurus/theme-common': + specifier: 3.6.3 + version: 3.6.3(@docusaurus/plugin-content-docs@3.6.3(@mdx-js/react@3.0.1(@types/react@18.3.12)(react@18.3.1))(@swc/core@1.10.4(@swc/helpers@0.5.15))(acorn@8.14.0)(bufferutil@4.0.9)(eslint@9.16.0(jiti@2.4.2))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.6.3)(utf-8-validate@5.0.10))(@swc/core@1.10.4(@swc/helpers@0.5.15))(acorn@8.14.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.6.3) '@docusaurus/theme-mermaid': specifier: 3.6.3 version: 3.6.3(@docusaurus/plugin-content-docs@3.6.3(@mdx-js/react@3.0.1(@types/react@18.3.12)(react@18.3.1))(@swc/core@1.10.4(@swc/helpers@0.5.15))(acorn@8.14.0)(bufferutil@4.0.9)(eslint@9.16.0(jiti@2.4.2))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.6.3)(utf-8-validate@5.0.10))(@mdx-js/react@3.0.1(@types/react@18.3.12)(react@18.3.1))(@swc/core@1.10.4(@swc/helpers@0.5.15))(acorn@8.14.0)(bufferutil@4.0.9)(eslint@9.16.0(jiti@2.4.2))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.6.3)(utf-8-validate@5.0.10) @@ -383,6 +389,9 @@ importers: dotenv: specifier: ^16.4.7 version: 16.4.7 + lunr: + specifier: 2.3.9 + version: 2.3.9 prism-react-renderer: specifier: 2.3.1 version: 2.3.1(react@18.3.1) @@ -1156,25 +1165,6 @@ importers: specifier: 7.1.0 version: 7.1.0 - packages/plugin-ferePro: - dependencies: - '@elizaos/core': - specifier: ^0.1.7-alpha.1 - version: 0.1.7-alpha.2(@google-cloud/vertexai@1.9.2(encoding@0.1.13))(@langchain/core@0.3.27(openai@4.73.0(encoding@0.1.13)(zod@3.23.8)))(axios@1.7.9)(encoding@0.1.13)(react@18.3.1)(sswr@2.1.0(svelte@5.16.1))(svelte@5.16.1) - tsup: - specifier: ^8.3.5 - version: 8.3.5(@swc/core@1.10.4(@swc/helpers@0.5.15))(jiti@2.4.2)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.7.0) - ws: - specifier: ^8.18.0 - version: 8.18.0(bufferutil@4.0.9)(utf-8-validate@5.0.10) - devDependencies: - '@types/ws': - specifier: ^8.5.13 - version: 8.5.13 - tsx: - specifier: ^4.19.2 - version: 4.19.2 - packages/plugin-flow: dependencies: '@elizaos/core': @@ -1875,6 +1865,9 @@ importers: twilio: specifier: ^5.4.0 version: 5.4.0 + whatwg-url: + specifier: 7.1.0 + version: 7.1.0 packages/plugin-twitter: dependencies: @@ -3883,9 +3876,6 @@ packages: peerDependencies: onnxruntime-node: 1.20.1 - '@elizaos/core@0.1.7-alpha.2': - resolution: {integrity: sha512-gNvFw/Xnv4dlcfmmKxRa+baKq6en4TitAjUGvo8LgAUkSk156A0fffJ0lAsc1rX8zMB5NsIqdvMCbwKxDd54OQ==} - '@emnapi/core@1.3.1': resolution: {integrity: sha512-pVGjBIt1Y6gg3EJN8jTcfpP/+uuRksIo055oE/OBkDNcjZqVbfkWCksG1Jp4yZnj3iKWyWX8fdG/j6UDYPbFog==} @@ -19646,7 +19636,7 @@ snapshots: '@acuminous/bitsyntax@0.1.2': dependencies: buffer-more-ints: 1.0.0 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) safe-buffer: 5.1.2 transitivePeerDependencies: - supports-color @@ -21579,7 +21569,7 @@ snapshots: dependencies: '@scure/bip32': 1.6.1 abitype: 1.0.8(typescript@5.6.3)(zod@3.23.8) - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) axios-mock-adapter: 1.22.0(axios@1.7.9) axios-retry: 4.5.0(axios@1.7.9) bip32: 4.0.0 @@ -23045,56 +23035,6 @@ snapshots: '@huggingface/jinja': 0.2.2 onnxruntime-node: 1.20.1 - '@elizaos/core@0.1.7-alpha.2(@google-cloud/vertexai@1.9.2(encoding@0.1.13))(@langchain/core@0.3.27(openai@4.73.0(encoding@0.1.13)(zod@3.23.8)))(axios@1.7.9)(encoding@0.1.13)(react@18.3.1)(sswr@2.1.0(svelte@5.16.1))(svelte@5.16.1)': - dependencies: - '@ai-sdk/anthropic': 0.0.56(zod@3.23.8) - '@ai-sdk/google': 0.0.55(zod@3.23.8) - '@ai-sdk/google-vertex': 0.0.43(@google-cloud/vertexai@1.9.2(encoding@0.1.13))(zod@3.23.8) - '@ai-sdk/groq': 0.0.3(zod@3.23.8) - '@ai-sdk/openai': 1.0.5(zod@3.23.8) - '@anthropic-ai/sdk': 0.30.1(encoding@0.1.13) - '@fal-ai/client': 1.2.0 - '@types/uuid': 10.0.0 - ai: 3.4.33(openai@4.73.0(encoding@0.1.13)(zod@3.23.8))(react@18.3.1)(sswr@2.1.0(svelte@5.16.1))(svelte@5.16.1)(vue@3.5.13(typescript@5.6.3))(zod@3.23.8) - anthropic-vertex-ai: 1.0.2(encoding@0.1.13)(zod@3.23.8) - fastembed: 1.14.1 - fastestsmallesttextencoderdecoder: 1.0.22 - gaxios: 6.7.1(encoding@0.1.13) - glob: 11.0.0 - handlebars: 4.7.8 - js-sha1: 0.7.0 - js-tiktoken: 1.0.15 - langchain: 0.3.6(@langchain/core@0.3.27(openai@4.73.0(encoding@0.1.13)(zod@3.23.8)))(axios@1.7.9)(encoding@0.1.13)(handlebars@4.7.8)(openai@4.73.0(encoding@0.1.13)(zod@3.23.8)) - ollama-ai-provider: 0.16.1(zod@3.23.8) - openai: 4.73.0(encoding@0.1.13)(zod@3.23.8) - tinyld: 1.3.4 - together-ai: 0.7.0(encoding@0.1.13) - unique-names-generator: 4.7.1 - uuid: 11.0.3 - zod: 3.23.8 - transitivePeerDependencies: - - '@google-cloud/vertexai' - - '@langchain/anthropic' - - '@langchain/aws' - - '@langchain/cohere' - - '@langchain/core' - - '@langchain/google-genai' - - '@langchain/google-vertexai' - - '@langchain/groq' - - '@langchain/mistralai' - - '@langchain/ollama' - - axios - - cheerio - - encoding - - peggy - - react - - solid-js - - sswr - - supports-color - - svelte - - typeorm - - vue - '@emnapi/core@1.3.1': dependencies: '@emnapi/wasi-threads': 1.0.1 @@ -23414,7 +23354,7 @@ snapshots: '@eslint/config-array@0.19.1': dependencies: '@eslint/object-schema': 2.1.5 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -23440,7 +23380,7 @@ snapshots: '@eslint/eslintrc@3.2.0': dependencies: ajv: 6.12.6 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) espree: 10.3.0 globals: 14.0.0 ignore: 5.3.2 @@ -28854,7 +28794,7 @@ snapshots: '@typescript-eslint/types': 8.16.0 '@typescript-eslint/typescript-estree': 8.16.0(typescript@5.6.3) '@typescript-eslint/visitor-keys': 8.16.0 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) eslint: 9.16.0(jiti@2.4.2) optionalDependencies: typescript: 5.6.3 @@ -28887,7 +28827,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 8.16.0(typescript@5.6.3) '@typescript-eslint/utils': 8.16.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.6.3) - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) eslint: 9.16.0(jiti@2.4.2) ts-api-utils: 1.4.3(typescript@5.6.3) optionalDependencies: @@ -28918,7 +28858,7 @@ snapshots: dependencies: '@typescript-eslint/types': 8.16.0 '@typescript-eslint/visitor-keys': 8.16.0 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) fast-glob: 3.3.2 is-glob: 4.0.3 minimatch: 9.0.5 @@ -29708,7 +29648,7 @@ snapshots: agent-base@6.0.2: dependencies: - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -30100,13 +30040,13 @@ snapshots: axios-mock-adapter@1.22.0(axios@1.7.9): dependencies: - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) fast-deep-equal: 3.1.3 is-buffer: 2.0.5 axios-retry@4.5.0(axios@1.7.9): dependencies: - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) is-retry-allowed: 2.2.0 axios@0.21.4: @@ -30117,7 +30057,7 @@ snapshots: axios@0.27.2: dependencies: - follow-redirects: 1.15.9 + follow-redirects: 1.15.9(debug@4.4.0) form-data: 4.0.1 transitivePeerDependencies: - debug @@ -30146,14 +30086,6 @@ snapshots: transitivePeerDependencies: - debug - axios@1.7.9: - dependencies: - follow-redirects: 1.15.9 - form-data: 4.0.1 - proxy-from-env: 1.1.0 - transitivePeerDependencies: - - debug - axios@1.7.9(debug@4.4.0): dependencies: follow-redirects: 1.15.9(debug@4.4.0) @@ -32189,10 +32121,6 @@ snapshots: dependencies: ms: 2.1.3 - debug@4.4.0: - dependencies: - ms: 2.1.3 - debug@4.4.0(supports-color@5.5.0): dependencies: ms: 2.1.3 @@ -32935,6 +32863,7 @@ snapshots: '@esbuild/win32-arm64': 0.23.1 '@esbuild/win32-ia32': 0.23.1 '@esbuild/win32-x64': 0.23.1 + optional: true esbuild@0.24.2: optionalDependencies: @@ -33094,7 +33023,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) escape-string-regexp: 4.0.0 eslint-scope: 8.2.0 eslint-visitor-keys: 4.2.0 @@ -33680,8 +33609,6 @@ snapshots: async: 0.2.10 which: 1.3.1 - follow-redirects@1.15.9: {} - follow-redirects@1.15.9(debug@4.3.7): optionalDependencies: debug: 4.3.7 @@ -34016,6 +33943,7 @@ snapshots: get-tsconfig@4.8.1: dependencies: resolve-pkg-maps: 1.0.0 + optional: true get-uri@6.0.4: dependencies: @@ -34765,7 +34693,7 @@ snapshots: http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -34821,14 +34749,14 @@ snapshots: https-proxy-agent@5.0.1: dependencies: agent-base: 6.0.2 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color https-proxy-agent@7.0.6: dependencies: agent-base: 7.1.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -38147,7 +38075,7 @@ snapshots: '@yarnpkg/lockfile': 1.1.0 '@yarnpkg/parsers': 3.0.0-rc.46 '@zkochan/js-yaml': 0.0.7 - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) chalk: 4.1.0 cli-cursor: 3.1.0 cli-spinners: 2.6.1 @@ -40590,7 +40518,8 @@ snapshots: resolve-pathname@3.0.0: {} - resolve-pkg-maps@1.0.0: {} + resolve-pkg-maps@1.0.0: + optional: true resolve.exports@2.0.3: {} @@ -41212,7 +41141,7 @@ snapshots: socks-proxy-agent@8.0.5: dependencies: agent-base: 7.1.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) socks: 2.8.3 transitivePeerDependencies: - supports-color @@ -42187,7 +42116,7 @@ snapshots: cac: 6.7.14 chokidar: 4.0.3 consola: 3.3.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) esbuild: 0.24.2 joycon: 3.1.1 picocolors: 1.1.1 @@ -42215,13 +42144,14 @@ snapshots: get-tsconfig: 4.8.1 optionalDependencies: fsevents: 2.3.3 + optional: true tty-browserify@0.0.1: {} tuf-js@2.2.1: dependencies: '@tufjs/models': 2.0.1 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) make-fetch-happen: 13.0.1 transitivePeerDependencies: - supports-color @@ -42269,7 +42199,7 @@ snapshots: twilio@5.4.0: dependencies: - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) dayjs: 1.11.13 https-proxy-agent: 5.0.1 jsonwebtoken: 9.0.2 @@ -42915,7 +42845,7 @@ snapshots: vite-node@2.1.5(@types/node@22.10.4)(terser@5.37.0): dependencies: cac: 6.7.14 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) es-module-lexer: 1.6.0 pathe: 1.1.2 vite: 5.4.11(@types/node@22.10.4)(terser@5.37.0) @@ -43028,7 +42958,7 @@ snapshots: '@vitest/spy': 2.1.5 '@vitest/utils': 2.1.5 chai: 5.1.2 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) expect-type: 1.1.0 magic-string: 0.30.17 pathe: 1.1.2 From 026c82189a96d8506bc7d16c95137256ea6afc21 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 10:38:59 +0800 Subject: [PATCH 04/19] added check gated the plugin-twilio in agent runtime only add it when required env vars are found --- agent/src/index.ts | 7 ++++++- packages/plugin-twilio/src/actions/sendSms.ts | 9 ++------- .../src/actions/sendWhatsAppMessage.ts | 14 ++++++++------ 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/agent/src/index.ts b/agent/src/index.ts index e93231050e5..87e01cf3063 100644 --- a/agent/src/index.ts +++ b/agent/src/index.ts @@ -526,7 +526,12 @@ export async function createAgent( // character.plugins are handled when clients are added plugins: [ bootstrapPlugin, - twilioPlugin, + getSecret(character, "TWILIO_ACCOUNT_SID") && + getSecret(character, "TWILIO_AUTH_TOKEN") && + getSecret(character, "TWILIO_PHONE_NUMBER") && + getSecret(character, "TWILIO_WHATSAPP_PHONE_NUMBER") + ? twilioPlugin + : null, getSecret(character, "CONFLUX_CORE_PRIVATE_KEY") ? confluxPlugin : null, diff --git a/packages/plugin-twilio/src/actions/sendSms.ts b/packages/plugin-twilio/src/actions/sendSms.ts index c3b8c6311ff..cae33f12558 100644 --- a/packages/plugin-twilio/src/actions/sendSms.ts +++ b/packages/plugin-twilio/src/actions/sendSms.ts @@ -31,8 +31,6 @@ export const sendSmsAction: Action = { const accountSid = process.env.TWILIO_ACCOUNT_SID; const authToken = process.env.TWILIO_AUTH_TOKEN; - console.log("CHECK _message: ",_message.content.text); - if (!accountSid || !authToken) { console.error('TWILIO_ACCOUNT_SID or TWILIO_AUTH_TOKEN is not set'); return false; @@ -67,10 +65,6 @@ export const sendSmsAction: Action = { const twilioNumber = process.env.TWILIO_PHONE_NUMBER; // Your Twilio phone number - console.log('check target mobile number: ', mobileNumberProvidedByUser); - console.log('check messageToSendFromUser: ', messageToSendFromUser); - console.log('check twilioNumber: ', twilioNumber); - if (!twilioNumber) { console.error('Twilio phone number is missing'); @@ -140,7 +134,8 @@ export const sendSmsAction: Action = { from: twilioNumber, // Your Twilio phone number }); - console.log("message body: ", message); + // for debug purposes uncomment this + console.log("check twilio message body: ", message); const messageFromAgent = `SMS sent successfully to ${mobileNumberProvidedByUser}`; diff --git a/packages/plugin-twilio/src/actions/sendWhatsAppMessage.ts b/packages/plugin-twilio/src/actions/sendWhatsAppMessage.ts index f9ec7744411..0c76fd48ca1 100644 --- a/packages/plugin-twilio/src/actions/sendWhatsAppMessage.ts +++ b/packages/plugin-twilio/src/actions/sendWhatsAppMessage.ts @@ -63,15 +63,13 @@ export const sendWhatsAppMessageAction: Action = { } } - const twilioNumber = process.env.TWILIO_WHATSAPP_PHONE_NUMBER; // Your Twilio WhatsApp number - - console.log('check target mobile number: ', mobileNumberProvidedByUser); - console.log('check messageToSendFromUser: ', messageToSendFromUser); - console.log('check twilioNumber: ', twilioNumber); + // Your Twilio WhatsApp enabled phone number this is a different from twilio regular phone number + const twilioNumber = process.env.TWILIO_WHATSAPP_PHONE_NUMBER; if (!mobileNumberProvidedByUser) { console.error('Mobile number is missing'); + //TODO: this can be improve by letting the AI Agent generate his/her own reply for the specific issue _callback({ text: `Sorry there was an issue sending the WhatsApp message, please try again later`, }); @@ -81,6 +79,7 @@ export const sendWhatsAppMessageAction: Action = { if (!twilioNumber) { console.error('Twilio WhatsApp number is missing'); + //TODO: this can be improve by letting the AI Agent generate his/her own reply for the specific issue _callback({ text: `Sorry there was an issue sending the WhatsApp message, please try again later`, }); @@ -90,6 +89,7 @@ export const sendWhatsAppMessageAction: Action = { if(messageToSendFromUser==null){ console.error('messageToSendFromUser is empty or null'); + //TODO: this can be improve by letting the AI Agent generate his/her own reply for the specific issue _callback({ text: `Sorry there was an issue sending the WhatsApp message, please try again later`, }); @@ -107,8 +107,10 @@ export const sendWhatsAppMessageAction: Action = { from: `whatsapp:${twilioNumber}`, // Your Twilio WhatsApp number }); - console.log("message body: ", message); + // for debug purposes uncomment this + console.log("check twilio message body: ", message); + //TODO: this can be improve by letting the AI Agent generate his/her own reply to user const messageFromAgent = `WhatsApp message sent successfully to ${mobileNumberProvidedByUser}`; // Call the callback to notify the user From 7370922a892bd0c8a51154f501fdf224d61d4ffd Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 19:47:11 +0800 Subject: [PATCH 05/19] added core from main --- packages/core/generation.ts | 1970 ----------------- packages/core/models.ts | 542 ----- packages/core/src/database/CircuitBreaker.ts | 5 +- packages/core/src/environment.ts | 5 +- packages/core/src/models.ts | 36 +- packages/core/src/runtime.ts | 37 +- .../core/src/test_resources/createRuntime.ts | 2 +- packages/core/src/tests/actions.test.ts | 63 +- packages/core/src/tests/context.test.ts | 2 +- packages/core/src/tests/environment.test.ts | 208 +- packages/core/src/tests/knowledge.test.ts | 155 +- packages/core/src/tests/messages.test.ts | 213 +- packages/core/src/tests/models.test.ts | 120 +- packages/core/src/tests/parsing.test.ts | 126 +- packages/core/src/tests/runtime.test.ts | 8 +- packages/core/src/types.ts | 1 - packages/core/tsconfig.build.json | 14 +- packages/core/tsconfig.json | 20 +- packages/core/types.ts | 1332 ----------- 19 files changed, 420 insertions(+), 4439 deletions(-) delete mode 100644 packages/core/generation.ts delete mode 100644 packages/core/models.ts delete mode 100644 packages/core/types.ts diff --git a/packages/core/generation.ts b/packages/core/generation.ts deleted file mode 100644 index 583e6787936..00000000000 --- a/packages/core/generation.ts +++ /dev/null @@ -1,1970 +0,0 @@ -import { createAnthropic } from "@ai-sdk/anthropic"; -import { createGoogleGenerativeAI } from "@ai-sdk/google"; -import { createGroq } from "@ai-sdk/groq"; -import { createOpenAI } from "@ai-sdk/openai"; -import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; -import { - generateObject as aiGenerateObject, - generateText as aiGenerateText, - CoreTool, - GenerateObjectResult, - StepResult as AIStepResult, -} from "ai"; -import { Buffer } from "buffer"; -import { createOllama } from "ollama-ai-provider"; -import OpenAI from "openai"; -import { encodingForModel, TiktokenModel } from "js-tiktoken"; -import { AutoTokenizer } from "@huggingface/transformers"; -import Together from "together-ai"; -import { ZodSchema } from "zod"; -import { elizaLogger } from "./index.ts"; -import { getModel, models } from "./models.ts"; -import { - parseBooleanFromText, - parseJsonArrayFromText, - parseJSONObjectFromText, - parseShouldRespondFromText, - parseActionResponseFromText, -} from "./parsing.ts"; -import settings from "./settings.ts"; -import { - Content, - IAgentRuntime, - IImageDescriptionService, - ITextGenerationService, - ModelClass, - ModelProviderName, - ServiceType, - SearchResponse, - ActionResponse, - TelemetrySettings, - TokenizerType, -} from "./types.ts"; -import { fal } from "@fal-ai/client"; -import { tavily } from "@tavily/core"; - -type Tool = CoreTool; -type StepResult = AIStepResult; - -/** - * Trims the provided text context to a specified token limit using a tokenizer model and type. - * - * The function dynamically determines the truncation method based on the tokenizer settings - * provided by the runtime. If no tokenizer settings are defined, it defaults to using the - * TikToken truncation method with the "gpt-4o" model. - * - * @async - * @function trimTokens - * @param {string} context - The text to be tokenized and trimmed. - * @param {number} maxTokens - The maximum number of tokens allowed after truncation. - * @param {IAgentRuntime} runtime - The runtime interface providing tokenizer settings. - * - * @returns {Promise} A promise that resolves to the trimmed text. - * - * @throws {Error} Throws an error if the runtime settings are invalid or missing required fields. - * - * @example - * const trimmedText = await trimTokens("This is an example text", 50, runtime); - * console.log(trimmedText); // Output will be a truncated version of the input text. - */ -export async function trimTokens( - context: string, - maxTokens: number, - runtime: IAgentRuntime -) { - if (!context) return ""; - if (maxTokens <= 0) throw new Error("maxTokens must be positive"); - - const tokenizerModel = runtime.getSetting("TOKENIZER_MODEL"); - const tokenizerType = runtime.getSetting("TOKENIZER_TYPE"); - - if (!tokenizerModel || !tokenizerType) { - // Default to TikToken truncation using the "gpt-4o" model if tokenizer settings are not defined - return truncateTiktoken("gpt-4o", context, maxTokens); - } - - // Choose the truncation method based on tokenizer type - if (tokenizerType === TokenizerType.Auto) { - return truncateAuto(tokenizerModel, context, maxTokens); - } - - if (tokenizerType === TokenizerType.TikToken) { - return truncateTiktoken( - tokenizerModel as TiktokenModel, - context, - maxTokens - ); - } - - elizaLogger.warn(`Unsupported tokenizer type: ${tokenizerType}`); - return truncateTiktoken("gpt-4o", context, maxTokens); -} - -async function truncateAuto( - modelPath: string, - context: string, - maxTokens: number -) { - try { - const tokenizer = await AutoTokenizer.from_pretrained(modelPath); - const tokens = tokenizer.encode(context); - - // If already within limits, return unchanged - if (tokens.length <= maxTokens) { - return context; - } - - // Keep the most recent tokens by slicing from the end - const truncatedTokens = tokens.slice(-maxTokens); - - // Decode back to text - js-tiktoken decode() returns a string directly - return tokenizer.decode(truncatedTokens); - } catch (error) { - elizaLogger.error("Error in trimTokens:", error); - // Return truncated string if tokenization fails - return context.slice(-maxTokens * 4); // Rough estimate of 4 chars per token - } -} - -async function truncateTiktoken( - model: TiktokenModel, - context: string, - maxTokens: number -) { - try { - const encoding = encodingForModel(model); - - // Encode the text into tokens - const tokens = encoding.encode(context); - - // If already within limits, return unchanged - if (tokens.length <= maxTokens) { - return context; - } - - // Keep the most recent tokens by slicing from the end - const truncatedTokens = tokens.slice(-maxTokens); - - // Decode back to text - js-tiktoken decode() returns a string directly - return encoding.decode(truncatedTokens); - } catch (error) { - elizaLogger.error("Error in trimTokens:", error); - // Return truncated string if tokenization fails - return context.slice(-maxTokens * 4); // Rough estimate of 4 chars per token - } -} - -/** - * Send a message to the model for a text generateText - receive a string back and parse how you'd like - * @param opts - The options for the generateText request. - * @param opts.context The context of the message to be completed. - * @param opts.stop A list of strings to stop the generateText at. - * @param opts.model The model to use for generateText. - * @param opts.frequency_penalty The frequency penalty to apply to the generateText. - * @param opts.presence_penalty The presence penalty to apply to the generateText. - * @param opts.temperature The temperature to apply to the generateText. - * @param opts.max_context_length The maximum length of the context to apply to the generateText. - * @returns The completed message. - */ - -export async function generateText({ - runtime, - context, - modelClass, - tools = {}, - onStepFinish, - maxSteps = 1, - stop, - customSystemPrompt, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; - tools?: Record; - onStepFinish?: (event: StepResult) => Promise | void; - maxSteps?: number; - stop?: string[]; - customSystemPrompt?: string; -}): Promise { - if (!context) { - console.error("generateText context is empty"); - return ""; - } - - elizaLogger.log("Generating text..."); - - elizaLogger.info("Generating text with options:", { - modelProvider: runtime.modelProvider, - model: modelClass, - }); - - const provider = runtime.modelProvider; - const endpoint = - runtime.character.modelEndpointOverride || models[provider].endpoint; - let model = models[provider].model[modelClass]; - - // allow character.json settings => secrets to override models - // FIXME: add MODEL_MEDIUM support - switch (provider) { - // if runtime.getSetting("LLAMACLOUD_MODEL_LARGE") is true and modelProvider is LLAMACLOUD, then use the large model - case ModelProviderName.LLAMACLOUD: - { - switch (modelClass) { - case ModelClass.LARGE: - { - model = - runtime.getSetting("LLAMACLOUD_MODEL_LARGE") || - model; - } - break; - case ModelClass.SMALL: - { - model = - runtime.getSetting("LLAMACLOUD_MODEL_SMALL") || - model; - } - break; - } - } - break; - case ModelProviderName.TOGETHER: - { - switch (modelClass) { - case ModelClass.LARGE: - { - model = - runtime.getSetting("TOGETHER_MODEL_LARGE") || - model; - } - break; - case ModelClass.SMALL: - { - model = - runtime.getSetting("TOGETHER_MODEL_SMALL") || - model; - } - break; - } - } - break; - case ModelProviderName.OPENROUTER: - { - switch (modelClass) { - case ModelClass.LARGE: - { - model = - runtime.getSetting("LARGE_OPENROUTER_MODEL") || - model; - } - break; - case ModelClass.SMALL: - { - model = - runtime.getSetting("SMALL_OPENROUTER_MODEL") || - model; - } - break; - } - } - break; - } - - elizaLogger.info("Selected model:", model); - - const modelConfiguration = runtime.character?.settings?.modelConfig; - const temperature = - modelConfiguration?.temperature || - models[provider].settings.temperature; - const frequency_penalty = - modelConfiguration?.frequency_penalty || - models[provider].settings.frequency_penalty; - const presence_penalty = - modelConfiguration?.presence_penalty || - models[provider].settings.presence_penalty; - const max_context_length = - modelConfiguration?.maxInputTokens || - models[provider].settings.maxInputTokens; - const max_response_length = - modelConfiguration?.max_response_length || - models[provider].settings.maxOutputTokens; - const experimental_telemetry = - modelConfiguration?.experimental_telemetry || - models[provider].settings.experimental_telemetry; - - const apiKey = runtime.token; - - try { - elizaLogger.debug( - `Trimming context to max length of ${max_context_length} tokens.` - ); - - context = await trimTokens(context, max_context_length, runtime); - - let response: string; - - const _stop = stop || models[provider].settings.stop; - elizaLogger.debug( - `Using provider: ${provider}, model: ${model}, temperature: ${temperature}, max response length: ${max_response_length}` - ); - - switch (provider) { - // OPENAI & LLAMACLOUD shared same structure. - case ModelProviderName.OPENAI: - case ModelProviderName.ALI_BAILIAN: - case ModelProviderName.VOLENGINE: - case ModelProviderName.LLAMACLOUD: - case ModelProviderName.NANOGPT: - case ModelProviderName.HYPERBOLIC: - case ModelProviderName.TOGETHER: - case ModelProviderName.AKASH_CHAT_API: { - elizaLogger.debug("Initializing OpenAI model."); - const openai = createOpenAI({ - apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: openaiResponse } = await aiGenerateText({ - model: openai.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = openaiResponse; - elizaLogger.debug("Received response from OpenAI model."); - break; - } - - case ModelProviderName.ETERNALAI: { - elizaLogger.debug("Initializing EternalAI model."); - const openai = createOpenAI({ - apiKey, - baseURL: endpoint, - fetch: async (url: string, options: any) => { - const fetching = await runtime.fetch(url, options); - if ( - parseBooleanFromText( - runtime.getSetting("ETERNAL_AI_LOG_REQUEST") - ) - ) { - elizaLogger.info( - "Request data: ", - JSON.stringify(options, null, 2) - ); - const clonedResponse = fetching.clone(); - clonedResponse.json().then((data) => { - elizaLogger.info( - "Response data: ", - JSON.stringify(data, null, 2) - ); - }); - } - return fetching; - }, - }); - - const { text: openaiResponse } = await aiGenerateText({ - model: openai.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - }); - - response = openaiResponse; - elizaLogger.debug("Received response from EternalAI model."); - break; - } - - case ModelProviderName.GOOGLE: { - const google = createGoogleGenerativeAI({ - apiKey, - fetch: runtime.fetch, - }); - - const { text: googleResponse } = await aiGenerateText({ - model: google(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = googleResponse; - elizaLogger.debug("Received response from Google model."); - break; - } - - case ModelProviderName.ANTHROPIC: { - elizaLogger.debug("Initializing Anthropic model."); - - const anthropic = createAnthropic({ - apiKey, - fetch: runtime.fetch, - }); - - const { text: anthropicResponse } = await aiGenerateText({ - model: anthropic.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = anthropicResponse; - elizaLogger.debug("Received response from Anthropic model."); - break; - } - - case ModelProviderName.CLAUDE_VERTEX: { - elizaLogger.debug("Initializing Claude Vertex model."); - - const anthropic = createAnthropic({ - apiKey, - fetch: runtime.fetch, - }); - - const { text: anthropicResponse } = await aiGenerateText({ - model: anthropic.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = anthropicResponse; - elizaLogger.debug( - "Received response from Claude Vertex model." - ); - break; - } - - case ModelProviderName.GROK: { - elizaLogger.debug("Initializing Grok model."); - const grok = createOpenAI({ - apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: grokResponse } = await aiGenerateText({ - model: grok.languageModel(model, { - parallelToolCalls: false, - }), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = grokResponse; - elizaLogger.debug("Received response from Grok model."); - break; - } - - case ModelProviderName.GROQ: { - const groq = createGroq({ apiKey, fetch: runtime.fetch }); - - const { text: groqResponse } = await aiGenerateText({ - model: groq.languageModel(model), - prompt: context, - temperature: temperature, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = groqResponse; - break; - } - - case ModelProviderName.LLAMALOCAL: { - elizaLogger.debug( - "Using local Llama model for text completion." - ); - const textGenerationService = - runtime.getService( - ServiceType.TEXT_GENERATION - ); - - if (!textGenerationService) { - throw new Error("Text generation service not found"); - } - - response = await textGenerationService.queueTextCompletion( - context, - temperature, - _stop, - frequency_penalty, - presence_penalty, - max_response_length - ); - elizaLogger.debug("Received response from local Llama model."); - break; - } - - case ModelProviderName.REDPILL: { - elizaLogger.debug("Initializing RedPill model."); - const serverUrl = models[provider].endpoint; - const openai = createOpenAI({ - apiKey, - baseURL: serverUrl, - fetch: runtime.fetch, - }); - - const { text: redpillResponse } = await aiGenerateText({ - model: openai.languageModel(model), - prompt: context, - temperature: temperature, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = redpillResponse; - elizaLogger.debug("Received response from redpill model."); - break; - } - - case ModelProviderName.OPENROUTER: { - elizaLogger.debug("Initializing OpenRouter model."); - const serverUrl = models[provider].endpoint; - const openrouter = createOpenAI({ - apiKey, - baseURL: serverUrl, - fetch: runtime.fetch, - }); - - const { text: openrouterResponse } = await aiGenerateText({ - model: openrouter.languageModel(model), - prompt: context, - temperature: temperature, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = openrouterResponse; - elizaLogger.debug("Received response from OpenRouter model."); - break; - } - - case ModelProviderName.OLLAMA: - { - elizaLogger.debug("Initializing Ollama model."); - - const ollamaProvider = createOllama({ - baseURL: models[provider].endpoint + "/api", - fetch: runtime.fetch, - }); - const ollama = ollamaProvider(model); - - elizaLogger.debug("****** MODEL\n", model); - - const { text: ollamaResponse } = await aiGenerateText({ - model: ollama, - prompt: context, - tools: tools, - onStepFinish: onStepFinish, - temperature: temperature, - maxSteps: maxSteps, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = ollamaResponse; - } - elizaLogger.debug("Received response from Ollama model."); - break; - - case ModelProviderName.HEURIST: { - elizaLogger.debug("Initializing Heurist model."); - const heurist = createOpenAI({ - apiKey: apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: heuristResponse } = await aiGenerateText({ - model: heurist.languageModel(model), - prompt: context, - system: - customSystemPrompt ?? - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - temperature: temperature, - maxTokens: max_response_length, - maxSteps: maxSteps, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = heuristResponse; - elizaLogger.debug("Received response from Heurist model."); - break; - } - case ModelProviderName.GAIANET: { - elizaLogger.debug("Initializing GAIANET model."); - - var baseURL = models[provider].endpoint; - if (!baseURL) { - switch (modelClass) { - case ModelClass.SMALL: - baseURL = - settings.SMALL_GAIANET_SERVER_URL || - "https://llama3b.gaia.domains/v1"; - break; - case ModelClass.MEDIUM: - baseURL = - settings.MEDIUM_GAIANET_SERVER_URL || - "https://llama8b.gaia.domains/v1"; - break; - case ModelClass.LARGE: - baseURL = - settings.LARGE_GAIANET_SERVER_URL || - "https://qwen72b.gaia.domains/v1"; - break; - } - } - - elizaLogger.debug("Using GAIANET model with baseURL:", baseURL); - - const openai = createOpenAI({ - apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: openaiResponse } = await aiGenerateText({ - model: openai.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = openaiResponse; - elizaLogger.debug("Received response from GAIANET model."); - break; - } - - case ModelProviderName.GALADRIEL: { - elizaLogger.debug("Initializing Galadriel model."); - const galadriel = createOpenAI({ - apiKey: apiKey, - baseURL: endpoint, - fetch: runtime.fetch, - }); - - const { text: galadrielResponse } = await aiGenerateText({ - model: galadriel.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - maxSteps: maxSteps, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - experimental_telemetry: experimental_telemetry, - }); - - response = galadrielResponse; - elizaLogger.debug("Received response from Galadriel model."); - break; - } - - case ModelProviderName.VENICE: { - elizaLogger.debug("Initializing Venice model."); - const venice = createOpenAI({ - apiKey: apiKey, - baseURL: endpoint, - }); - - const { text: veniceResponse } = await aiGenerateText({ - model: venice.languageModel(model), - prompt: context, - system: - runtime.character.system ?? - settings.SYSTEM_PROMPT ?? - undefined, - tools: tools, - onStepFinish: onStepFinish, - temperature: temperature, - maxSteps: maxSteps, - maxTokens: max_response_length, - }); - - response = veniceResponse; - elizaLogger.debug("Received response from Venice model."); - break; - } - - case ModelProviderName.INFERA: { - elizaLogger.debug("Initializing Infera model."); - const apiKey = settings.INFERA_API_KEY || runtime.token; - - const infera = createOpenAI({ - apiKey, - baseURL: endpoint, - headers: { - 'api_key': apiKey, - 'Content-Type': 'application/json' - } - }); - - const { text: inferaResponse } = await aiGenerateText({ - model: infera.languageModel(model), - prompt: context, - system: runtime.character.system ?? settings.SYSTEM_PROMPT ?? undefined, - temperature: temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - }); - - response = inferaResponse; - elizaLogger.debug("Received response from Infera model."); - break; - } - - default: { - const errorMessage = `Unsupported provider: ${provider}`; - elizaLogger.error(errorMessage); - throw new Error(errorMessage); - } - } - - return response; - } catch (error) { - elizaLogger.error("Error in generateText:", error); - throw error; - } -} - -/** - * Sends a message to the model to determine if it should respond to the given context. - * @param opts - The options for the generateText request - * @param opts.context The context to evaluate for response - * @param opts.stop A list of strings to stop the generateText at - * @param opts.model The model to use for generateText - * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) - * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) - * @param opts.temperature The temperature to control randomness (0.0 to 2.0) - * @param opts.serverUrl The URL of the API server - * @param opts.max_context_length Maximum allowed context length in tokens - * @param opts.max_response_length Maximum allowed response length in tokens - * @returns Promise resolving to "RESPOND", "IGNORE", "STOP" or null - */ -export async function generateShouldRespond({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise<"RESPOND" | "IGNORE" | "STOP" | null> { - let retryDelay = 1000; - while (true) { - try { - elizaLogger.debug( - "Attempting to generate text with context:", - context - ); - const response = await generateText({ - runtime, - context, - modelClass, - }); - - elizaLogger.debug("Received response from generateText:", response); - const parsedResponse = parseShouldRespondFromText(response.trim()); - if (parsedResponse) { - elizaLogger.debug("Parsed response:", parsedResponse); - return parsedResponse; - } else { - elizaLogger.debug("generateShouldRespond no response"); - } - } catch (error) { - elizaLogger.error("Error in generateShouldRespond:", error); - if ( - error instanceof TypeError && - error.message.includes("queueTextCompletion") - ) { - elizaLogger.error( - "TypeError: Cannot read properties of null (reading 'queueTextCompletion')" - ); - } - } - - elizaLogger.log(`Retrying in ${retryDelay}ms...`); - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -/** - * Splits content into chunks of specified size with optional overlapping bleed sections - * @param content - The text content to split into chunks - * @param chunkSize - The maximum size of each chunk in tokens - * @param bleed - Number of characters to overlap between chunks (default: 100) - * @returns Promise resolving to array of text chunks with bleed sections - */ -export async function splitChunks( - content: string, - chunkSize: number = 512, - bleed: number = 20 -): Promise { - const textSplitter = new RecursiveCharacterTextSplitter({ - chunkSize: Number(chunkSize), - chunkOverlap: Number(bleed), - }); - - return textSplitter.splitText(content); -} - -/** - * Sends a message to the model and parses the response as a boolean value - * @param opts - The options for the generateText request - * @param opts.context The context to evaluate for the boolean response - * @param opts.stop A list of strings to stop the generateText at - * @param opts.model The model to use for generateText - * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) - * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) - * @param opts.temperature The temperature to control randomness (0.0 to 2.0) - * @param opts.serverUrl The URL of the API server - * @param opts.token The API token for authentication - * @param opts.max_context_length Maximum allowed context length in tokens - * @param opts.max_response_length Maximum allowed response length in tokens - * @returns Promise resolving to a boolean value parsed from the model's response - */ -export async function generateTrueOrFalse({ - runtime, - context = "", - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - let retryDelay = 1000; - - const stop = Array.from( - new Set([ - ...(models[runtime.modelProvider].settings.stop || []), - ["\n"], - ]) - ) as string[]; - - while (true) { - try { - const response = await generateText({ - stop, - runtime, - context, - modelClass, - }); - - const parsedResponse = parseBooleanFromText(response.trim()); - if (parsedResponse !== null) { - return parsedResponse; - } - } catch (error) { - elizaLogger.error("Error in generateTrueOrFalse:", error); - } - - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -/** - * Send a message to the model and parse the response as a string array - * @param opts - The options for the generateText request - * @param opts.context The context/prompt to send to the model - * @param opts.stop Array of strings that will stop the model's generation if encountered - * @param opts.model The language model to use - * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) - * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) - * @param opts.temperature The temperature to control randomness (0.0 to 2.0) - * @param opts.serverUrl The URL of the API server - * @param opts.token The API token for authentication - * @param opts.max_context_length Maximum allowed context length in tokens - * @param opts.max_response_length Maximum allowed response length in tokens - * @returns Promise resolving to an array of strings parsed from the model's response - */ -export async function generateTextArray({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - if (!context) { - elizaLogger.error("generateTextArray context is empty"); - return []; - } - let retryDelay = 1000; - - while (true) { - try { - const response = await generateText({ - runtime, - context, - modelClass, - }); - - const parsedResponse = parseJsonArrayFromText(response); - if (parsedResponse) { - return parsedResponse; - } - } catch (error) { - elizaLogger.error("Error in generateTextArray:", error); - } - - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -export async function generateObjectDeprecated({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - if (!context) { - elizaLogger.error("generateObjectDeprecated context is empty"); - return null; - } - let retryDelay = 1000; - - while (true) { - try { - // this is slightly different than generateObjectArray, in that we parse object, not object array - const response = await generateText({ - runtime, - context, - modelClass, - }); - const parsedResponse = parseJSONObjectFromText(response); - if (parsedResponse) { - return parsedResponse; - } - } catch (error) { - elizaLogger.error("Error in generateObject:", error); - } - - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -export async function generateObjectArray({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - if (!context) { - elizaLogger.error("generateObjectArray context is empty"); - return []; - } - let retryDelay = 1000; - - while (true) { - try { - const response = await generateText({ - runtime, - context, - modelClass, - }); - - const parsedResponse = parseJsonArrayFromText(response); - if (parsedResponse) { - return parsedResponse; - } - } catch (error) { - elizaLogger.error("Error in generateTextArray:", error); - } - - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} - -/** - * Send a message to the model for generateText. - * @param opts - The options for the generateText request. - * @param opts.context The context of the message to be completed. - * @param opts.stop A list of strings to stop the generateText at. - * @param opts.model The model to use for generateText. - * @param opts.frequency_penalty The frequency penalty to apply to the generateText. - * @param opts.presence_penalty The presence penalty to apply to the generateText. - * @param opts.temperature The temperature to apply to the generateText. - * @param opts.max_context_length The maximum length of the context to apply to the generateText. - * @returns The completed message. - */ -export async function generateMessageResponse({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - const provider = runtime.modelProvider; - const max_context_length = models[provider].settings.maxInputTokens; - - context = await trimTokens(context, max_context_length, runtime); - let retryLength = 1000; // exponential backoff - while (true) { - try { - elizaLogger.log("Generating message response.."); - - const response = await generateText({ - runtime, - context, - modelClass, - }); - - // try parsing the response as JSON, if null then try again - const parsedContent = parseJSONObjectFromText(response) as Content; - if (!parsedContent) { - elizaLogger.debug("parsedContent is null, retrying"); - continue; - } - - return parsedContent; - } catch (error) { - elizaLogger.error("ERROR:", error); - // wait for 2 seconds - retryLength *= 2; - await new Promise((resolve) => setTimeout(resolve, retryLength)); - elizaLogger.debug("Retrying..."); - } - } -} - -export const generateImage = async ( - data: { - prompt: string; - width: number; - height: number; - count?: number; - negativePrompt?: string; - numIterations?: number; - guidanceScale?: number; - seed?: number; - modelId?: string; - jobId?: string; - stylePreset?: string; - hideWatermark?: boolean; - }, - runtime: IAgentRuntime -): Promise<{ - success: boolean; - data?: string[]; - error?: any; -}> => { - const model = getModel(runtime.imageModelProvider, ModelClass.IMAGE); - const modelSettings = models[runtime.imageModelProvider].imageSettings; - - elizaLogger.info("Generating image with options:", { - imageModelProvider: model, - }); - - const apiKey = - runtime.imageModelProvider === runtime.modelProvider - ? runtime.token - : (() => { - // First try to match the specific provider - switch (runtime.imageModelProvider) { - case ModelProviderName.HEURIST: - return runtime.getSetting("HEURIST_API_KEY"); - case ModelProviderName.TOGETHER: - return runtime.getSetting("TOGETHER_API_KEY"); - case ModelProviderName.FAL: - return runtime.getSetting("FAL_API_KEY"); - case ModelProviderName.OPENAI: - return runtime.getSetting("OPENAI_API_KEY"); - case ModelProviderName.VENICE: - return runtime.getSetting("VENICE_API_KEY"); - case ModelProviderName.LIVEPEER: - return runtime.getSetting("LIVEPEER_GATEWAY_URL"); - default: - // If no specific match, try the fallback chain - return ( - runtime.getSetting("HEURIST_API_KEY") ?? - runtime.getSetting("TOGETHER_API_KEY") ?? - runtime.getSetting("FAL_API_KEY") ?? - runtime.getSetting("OPENAI_API_KEY") ?? - runtime.getSetting("VENICE_API_KEY") ?? - runtime.getSetting("LIVEPEER_GATEWAY_URL") - ); - } - })(); - try { - if (runtime.imageModelProvider === ModelProviderName.HEURIST) { - const response = await fetch( - "http://sequencer.heurist.xyz/submit_job", - { - method: "POST", - headers: { - Authorization: `Bearer ${apiKey}`, - "Content-Type": "application/json", - }, - body: JSON.stringify({ - job_id: data.jobId || crypto.randomUUID(), - model_input: { - SD: { - prompt: data.prompt, - neg_prompt: data.negativePrompt, - num_iterations: data.numIterations || 20, - width: data.width || 512, - height: data.height || 512, - guidance_scale: data.guidanceScale || 3, - seed: data.seed || -1, - }, - }, - model_id: data.modelId || "FLUX.1-dev", - deadline: 60, - priority: 1, - }), - } - ); - - if (!response.ok) { - throw new Error( - `Heurist image generation failed: ${response.statusText}` - ); - } - - const imageURL = await response.json(); - return { success: true, data: [imageURL] }; - } else if ( - runtime.imageModelProvider === ModelProviderName.TOGETHER || - // for backwards compat - runtime.imageModelProvider === ModelProviderName.LLAMACLOUD - ) { - const together = new Together({ apiKey: apiKey as string }); - const response = await together.images.create({ - model: "black-forest-labs/FLUX.1-schnell", - prompt: data.prompt, - width: data.width, - height: data.height, - steps: modelSettings?.steps ?? 4, - n: data.count, - }); - - // Add type assertion to handle the response properly - const togetherResponse = - response as unknown as TogetherAIImageResponse; - - if ( - !togetherResponse.data || - !Array.isArray(togetherResponse.data) - ) { - throw new Error("Invalid response format from Together AI"); - } - - // Rest of the code remains the same... - const base64s = await Promise.all( - togetherResponse.data.map(async (image) => { - if (!image.url) { - elizaLogger.error("Missing URL in image data:", image); - throw new Error("Missing URL in Together AI response"); - } - - // Fetch the image from the URL - const imageResponse = await fetch(image.url); - if (!imageResponse.ok) { - throw new Error( - `Failed to fetch image: ${imageResponse.statusText}` - ); - } - - // Convert to blob and then to base64 - const blob = await imageResponse.blob(); - const arrayBuffer = await blob.arrayBuffer(); - const base64 = Buffer.from(arrayBuffer).toString("base64"); - - // Return with proper MIME type - return `data:image/jpeg;base64,${base64}`; - }) - ); - - if (base64s.length === 0) { - throw new Error("No images generated by Together AI"); - } - - elizaLogger.debug(`Generated ${base64s.length} images`); - return { success: true, data: base64s }; - } else if (runtime.imageModelProvider === ModelProviderName.FAL) { - fal.config({ - credentials: apiKey as string, - }); - - // Prepare the input parameters according to their schema - const input = { - prompt: data.prompt, - image_size: "square" as const, - num_inference_steps: modelSettings?.steps ?? 50, - guidance_scale: data.guidanceScale || 3.5, - num_images: data.count, - enable_safety_checker: - runtime.getSetting("FAL_AI_ENABLE_SAFETY_CHECKER") === - "true", - safety_tolerance: Number( - runtime.getSetting("FAL_AI_SAFETY_TOLERANCE") || "2" - ), - output_format: "png" as const, - seed: data.seed ?? 6252023, - ...(runtime.getSetting("FAL_AI_LORA_PATH") - ? { - loras: [ - { - path: runtime.getSetting("FAL_AI_LORA_PATH"), - scale: 1, - }, - ], - } - : {}), - }; - - // Subscribe to the model - const result = await fal.subscribe(model, { - input, - logs: true, - onQueueUpdate: (update) => { - if (update.status === "IN_PROGRESS") { - elizaLogger.info(update.logs.map((log) => log.message)); - } - }, - }); - - // Convert the returned image URLs to base64 to match existing functionality - const base64Promises = result.data.images.map(async (image) => { - const response = await fetch(image.url); - const blob = await response.blob(); - const buffer = await blob.arrayBuffer(); - const base64 = Buffer.from(buffer).toString("base64"); - return `data:${image.content_type};base64,${base64}`; - }); - - const base64s = await Promise.all(base64Promises); - return { success: true, data: base64s }; - } else if (runtime.imageModelProvider === ModelProviderName.VENICE) { - const response = await fetch( - "https://api.venice.ai/api/v1/image/generate", - { - method: "POST", - headers: { - Authorization: `Bearer ${apiKey}`, - "Content-Type": "application/json", - }, - body: JSON.stringify({ - model: data.modelId || "fluently-xl", - prompt: data.prompt, - negative_prompt: data.negativePrompt, - width: data.width, - height: data.height, - steps: data.numIterations, - seed: data.seed, - style_preset: data.stylePreset, - hide_watermark: data.hideWatermark, - }), - } - ); - - const result = await response.json(); - - if (!result.images || !Array.isArray(result.images)) { - throw new Error("Invalid response format from Venice AI"); - } - - const base64s = result.images.map((base64String) => { - if (!base64String) { - throw new Error( - "Empty base64 string in Venice AI response" - ); - } - return `data:image/png;base64,${base64String}`; - }); - - return { success: true, data: base64s }; - } else if (runtime.imageModelProvider === ModelProviderName.LIVEPEER) { - if (!apiKey) { - throw new Error("Livepeer Gateway is not defined"); - } - try { - const baseUrl = new URL(apiKey); - if (!baseUrl.protocol.startsWith("http")) { - throw new Error("Invalid Livepeer Gateway URL protocol"); - } - const response = await fetch( - `${baseUrl.toString()}text-to-image`, - { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify({ - model_id: - data.modelId || "ByteDance/SDXL-Lightning", - prompt: data.prompt, - width: data.width || 1024, - height: data.height || 1024, - }), - } - ); - const result = await response.json(); - if (!result.images?.length) { - throw new Error("No images generated"); - } - const base64Images = await Promise.all( - result.images.map(async (image) => { - console.log("imageUrl console log", image.url); - let imageUrl; - if (image.url.includes("http")) { - imageUrl = image.url; - } else { - imageUrl = `${apiKey}${image.url}`; - } - const imageResponse = await fetch(imageUrl); - if (!imageResponse.ok) { - throw new Error( - `Failed to fetch image: ${imageResponse.statusText}` - ); - } - const blob = await imageResponse.blob(); - const arrayBuffer = await blob.arrayBuffer(); - const base64 = - Buffer.from(arrayBuffer).toString("base64"); - return `data:image/jpeg;base64,${base64}`; - }) - ); - return { - success: true, - data: base64Images, - }; - } catch (error) { - console.error(error); - return { success: false, error: error }; - } - } else { - let targetSize = `${data.width}x${data.height}`; - if ( - targetSize !== "1024x1024" && - targetSize !== "1792x1024" && - targetSize !== "1024x1792" - ) { - targetSize = "1024x1024"; - } - const openaiApiKey = runtime.getSetting("OPENAI_API_KEY") as string; - if (!openaiApiKey) { - throw new Error("OPENAI_API_KEY is not set"); - } - const openai = new OpenAI({ - apiKey: openaiApiKey as string, - }); - const response = await openai.images.generate({ - model, - prompt: data.prompt, - size: targetSize as "1024x1024" | "1792x1024" | "1024x1792", - n: data.count, - response_format: "b64_json", - }); - const base64s = response.data.map( - (image) => `data:image/png;base64,${image.b64_json}` - ); - return { success: true, data: base64s }; - } - } catch (error) { - console.error(error); - return { success: false, error: error }; - } -}; - -export const generateCaption = async ( - data: { imageUrl: string }, - runtime: IAgentRuntime -): Promise<{ - title: string; - description: string; -}> => { - const { imageUrl } = data; - const imageDescriptionService = - runtime.getService( - ServiceType.IMAGE_DESCRIPTION - ); - - if (!imageDescriptionService) { - throw new Error("Image description service not found"); - } - - const resp = await imageDescriptionService.describeImage(imageUrl); - return { - title: resp.title.trim(), - description: resp.description.trim(), - }; -}; - -export const generateWebSearch = async ( - query: string, - runtime: IAgentRuntime -): Promise => { - try { - const apiKey = runtime.getSetting("TAVILY_API_KEY") as string; - if (!apiKey) { - throw new Error("TAVILY_API_KEY is not set"); - } - const tvly = tavily({ apiKey }); - const response = await tvly.search(query, { - includeAnswer: true, - maxResults: 3, // 5 (default) - topic: "general", // "general"(default) "news" - searchDepth: "basic", // "basic"(default) "advanced" - includeImages: false, // false (default) true - }); - return response; - } catch (error) { - elizaLogger.error("Error:", error); - } -}; -/** - * Configuration options for generating objects with a model. - */ -export interface GenerationOptions { - runtime: IAgentRuntime; - context: string; - modelClass: ModelClass; - schema?: ZodSchema; - schemaName?: string; - schemaDescription?: string; - stop?: string[]; - mode?: "auto" | "json" | "tool"; - experimental_providerMetadata?: Record; -} - -/** - * Base settings for model generation. - */ -interface ModelSettings { - prompt: string; - temperature: number; - maxTokens: number; - frequencyPenalty: number; - presencePenalty: number; - stop?: string[]; - experimental_telemetry?: TelemetrySettings; -} - -/** - * Generates structured objects from a prompt using specified AI models and configuration options. - * - * @param {GenerationOptions} options - Configuration options for generating objects. - * @returns {Promise} - A promise that resolves to an array of generated objects. - * @throws {Error} - Throws an error if the provider is unsupported or if generation fails. - */ -export const generateObject = async ({ - runtime, - context, - modelClass, - schema, - schemaName, - schemaDescription, - stop, - mode = "json", -}: GenerationOptions): Promise> => { - if (!context) { - const errorMessage = "generateObject context is empty"; - console.error(errorMessage); - throw new Error(errorMessage); - } - - const provider = runtime.modelProvider; - const model = models[provider].model[modelClass]; - const temperature = models[provider].settings.temperature; - const frequency_penalty = models[provider].settings.frequency_penalty; - const presence_penalty = models[provider].settings.presence_penalty; - const max_context_length = models[provider].settings.maxInputTokens; - const max_response_length = models[provider].settings.maxOutputTokens; - const experimental_telemetry = - models[provider].settings.experimental_telemetry; - const apiKey = runtime.token; - - try { - context = await trimTokens(context, max_context_length, runtime); - - const modelOptions: ModelSettings = { - prompt: context, - temperature, - maxTokens: max_response_length, - frequencyPenalty: frequency_penalty, - presencePenalty: presence_penalty, - stop: stop || models[provider].settings.stop, - experimental_telemetry: experimental_telemetry, - }; - - const response = await handleProvider({ - provider, - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, - runtime, - context, - modelClass, - }); - - return response; - } catch (error) { - console.error("Error in generateObject:", error); - throw error; - } -}; - -/** - * Interface for provider-specific generation options. - */ -interface ProviderOptions { - runtime: IAgentRuntime; - provider: ModelProviderName; - model: any; - apiKey: string; - schema?: ZodSchema; - schemaName?: string; - schemaDescription?: string; - mode?: "auto" | "json" | "tool"; - experimental_providerMetadata?: Record; - modelOptions: ModelSettings; - modelClass: string; - context: string; -} - -/** - * Handles AI generation based on the specified provider. - * - * @param {ProviderOptions} options - Configuration options specific to the provider. - * @returns {Promise} - A promise that resolves to an array of generated objects. - */ -export async function handleProvider( - options: ProviderOptions -): Promise> { - const { provider, runtime, context, modelClass } = options; - switch (provider) { - case ModelProviderName.OPENAI: - case ModelProviderName.ETERNALAI: - case ModelProviderName.ALI_BAILIAN: - case ModelProviderName.VOLENGINE: - case ModelProviderName.LLAMACLOUD: - case ModelProviderName.TOGETHER: - case ModelProviderName.NANOGPT: - case ModelProviderName.INFERA: - case ModelProviderName.AKASH_CHAT_API: - return await handleOpenAI(options); - case ModelProviderName.ANTHROPIC: - case ModelProviderName.CLAUDE_VERTEX: - return await handleAnthropic(options); - case ModelProviderName.GROK: - return await handleGrok(options); - case ModelProviderName.GROQ: - return await handleGroq(options); - case ModelProviderName.LLAMALOCAL: - return await generateObjectDeprecated({ - runtime, - context, - modelClass, - }); - case ModelProviderName.GOOGLE: - return await handleGoogle(options); - case ModelProviderName.REDPILL: - return await handleRedPill(options); - case ModelProviderName.OPENROUTER: - return await handleOpenRouter(options); - case ModelProviderName.OLLAMA: - return await handleOllama(options); - default: { - const errorMessage = `Unsupported provider: ${provider}`; - elizaLogger.error(errorMessage); - throw new Error(errorMessage); - } - } -} -/** - * Handles object generation for OpenAI. - * - * @param {ProviderOptions} options - Options specific to OpenAI. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleOpenAI({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const baseURL = models.openai.endpoint || undefined; - const openai = createOpenAI({ apiKey, baseURL }); - return await aiGenerateObject({ - model: openai.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Anthropic models. - * - * @param {ProviderOptions} options - Options specific to Anthropic. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleAnthropic({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const anthropic = createAnthropic({ apiKey }); - return await aiGenerateObject({ - model: anthropic.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Grok models. - * - * @param {ProviderOptions} options - Options specific to Grok. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleGrok({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const grok = createOpenAI({ apiKey, baseURL: models.grok.endpoint }); - return await aiGenerateObject({ - model: grok.languageModel(model, { parallelToolCalls: false }), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Groq models. - * - * @param {ProviderOptions} options - Options specific to Groq. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleGroq({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const groq = createGroq({ apiKey }); - return await aiGenerateObject({ - model: groq.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Google models. - * - * @param {ProviderOptions} options - Options specific to Google. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleGoogle({ - model, - apiKey: _apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const google = createGoogleGenerativeAI(); - return await aiGenerateObject({ - model: google(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Redpill models. - * - * @param {ProviderOptions} options - Options specific to Redpill. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleRedPill({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const redPill = createOpenAI({ apiKey, baseURL: models.redpill.endpoint }); - return await aiGenerateObject({ - model: redPill.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for OpenRouter models. - * - * @param {ProviderOptions} options - Options specific to OpenRouter. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleOpenRouter({ - model, - apiKey, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, -}: ProviderOptions): Promise> { - const openRouter = createOpenAI({ - apiKey, - baseURL: models.openrouter.endpoint, - }); - return await aiGenerateObject({ - model: openRouter.languageModel(model), - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -/** - * Handles object generation for Ollama models. - * - * @param {ProviderOptions} options - Options specific to Ollama. - * @returns {Promise>} - A promise that resolves to generated objects. - */ -async function handleOllama({ - model, - schema, - schemaName, - schemaDescription, - mode, - modelOptions, - provider, -}: ProviderOptions): Promise> { - const ollamaProvider = createOllama({ - baseURL: models[provider].endpoint + "/api", - }); - const ollama = ollamaProvider(model); - return await aiGenerateObject({ - model: ollama, - schema, - schemaName, - schemaDescription, - mode, - ...modelOptions, - }); -} - -// Add type definition for Together AI response -interface TogetherAIImageResponse { - data: Array<{ - url: string; - content_type?: string; - image_type?: string; - }>; -} - -export async function generateTweetActions({ - runtime, - context, - modelClass, -}: { - runtime: IAgentRuntime; - context: string; - modelClass: string; -}): Promise { - let retryDelay = 1000; - while (true) { - try { - const response = await generateText({ - runtime, - context, - modelClass, - }); - console.debug( - "Received response from generateText for tweet actions:", - response - ); - const { actions } = parseActionResponseFromText(response.trim()); - if (actions) { - console.debug("Parsed tweet actions:", actions); - return actions; - } else { - elizaLogger.debug("generateTweetActions no valid response"); - } - } catch (error) { - elizaLogger.error("Error in generateTweetActions:", error); - if ( - error instanceof TypeError && - error.message.includes("queueTextCompletion") - ) { - elizaLogger.error( - "TypeError: Cannot read properties of null (reading 'queueTextCompletion')" - ); - } - } - elizaLogger.log(`Retrying in ${retryDelay}ms...`); - await new Promise((resolve) => setTimeout(resolve, retryDelay)); - retryDelay *= 2; - } -} diff --git a/packages/core/models.ts b/packages/core/models.ts deleted file mode 100644 index 67269b49d37..00000000000 --- a/packages/core/models.ts +++ /dev/null @@ -1,542 +0,0 @@ -import settings from "./settings.ts"; -import { Models, ModelProviderName, ModelClass } from "./types.ts"; - -export const models: Models = { - [ModelProviderName.OPENAI]: { - endpoint: settings.OPENAI_API_URL || "https://api.openai.com/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: settings.SMALL_OPENAI_MODEL || "gpt-4o-mini", - [ModelClass.MEDIUM]: settings.MEDIUM_OPENAI_MODEL || "gpt-4o", - [ModelClass.LARGE]: settings.LARGE_OPENAI_MODEL || "gpt-4o", - [ModelClass.EMBEDDING]: settings.EMBEDDING_OPENAI_MODEL || "text-embedding-3-small", - [ModelClass.IMAGE]: settings.IMAGE_OPENAI_MODEL || "dall-e-3", - }, - }, - [ModelProviderName.ETERNALAI]: { - endpoint: settings.ETERNALAI_URL, - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: - settings.ETERNALAI_MODEL || - "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", - [ModelClass.MEDIUM]: - settings.ETERNALAI_MODEL || - "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", - [ModelClass.LARGE]: - settings.ETERNALAI_MODEL || - "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", - [ModelClass.EMBEDDING]: "", - [ModelClass.IMAGE]: "", - }, - }, - [ModelProviderName.ANTHROPIC]: { - settings: { - stop: [], - maxInputTokens: 200000, - maxOutputTokens: 4096, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, - endpoint: "https://api.anthropic.com/v1", - model: { - [ModelClass.SMALL]: settings.SMALL_ANTHROPIC_MODEL || "claude-3-haiku-20240307", - [ModelClass.MEDIUM]: settings.MEDIUM_ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022", - [ModelClass.LARGE]: settings.LARGE_ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022", - }, - }, - [ModelProviderName.CLAUDE_VERTEX]: { - settings: { - stop: [], - maxInputTokens: 200000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, - endpoint: "https://api.anthropic.com/v1", // TODO: check - model: { - [ModelClass.SMALL]: "claude-3-5-sonnet-20241022", - [ModelClass.MEDIUM]: "claude-3-5-sonnet-20241022", - [ModelClass.LARGE]: "claude-3-opus-20240229", - }, - }, - [ModelProviderName.GROK]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, - endpoint: "https://api.x.ai/v1", - model: { - [ModelClass.SMALL]: settings.SMALL_GROK_MODEL || "grok-2-1212", - [ModelClass.MEDIUM]: settings.MEDIUM_GROK_MODEL || "grok-2-1212", - [ModelClass.LARGE]: settings.LARGE_GROK_MODEL || "grok-2-1212", - [ModelClass.EMBEDDING]: settings.EMBEDDING_GROK_MODEL || "grok-2-1212", // not sure about this one - }, - }, - [ModelProviderName.GROQ]: { - endpoint: "https://api.groq.com/openai/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8000, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, - model: { - [ModelClass.SMALL]: - settings.SMALL_GROQ_MODEL || "llama-3.1-8b-instant", - [ModelClass.MEDIUM]: - settings.MEDIUM_GROQ_MODEL || "llama-3.3-70b-versatile", - [ModelClass.LARGE]: - settings.LARGE_GROQ_MODEL || "llama-3.2-90b-vision-preview", - [ModelClass.EMBEDDING]: - settings.EMBEDDING_GROQ_MODEL || "llama-3.1-8b-instant", - }, - }, - [ModelProviderName.LLAMACLOUD]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - imageSettings: { - steps: 4, - }, - endpoint: "https://api.llamacloud.com/v1", - model: { - [ModelClass.SMALL]: "meta-llama/Llama-3.2-3B-Instruct-Turbo", - [ModelClass.MEDIUM]: "meta-llama-3.1-8b-instruct", - [ModelClass.LARGE]: "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", - [ModelClass.EMBEDDING]: - "togethercomputer/m2-bert-80M-32k-retrieval", - [ModelClass.IMAGE]: "black-forest-labs/FLUX.1-schnell", - }, - }, - [ModelProviderName.TOGETHER]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - imageSettings: { - steps: 4, - }, - endpoint: "https://api.together.ai/v1", - model: { - [ModelClass.SMALL]: "meta-llama/Llama-3.2-3B-Instruct-Turbo", - [ModelClass.MEDIUM]: "meta-llama-3.1-8b-instruct", - [ModelClass.LARGE]: "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", - [ModelClass.EMBEDDING]: - "togethercomputer/m2-bert-80M-32k-retrieval", - [ModelClass.IMAGE]: "black-forest-labs/FLUX.1-schnell", - }, - }, - [ModelProviderName.LLAMALOCAL]: { - settings: { - stop: ["<|eot_id|>", "<|eom_id|>"], - maxInputTokens: 32768, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - model: { - [ModelClass.SMALL]: - "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", - [ModelClass.MEDIUM]: - "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", // TODO: ?download=true - [ModelClass.LARGE]: - "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", - // "RichardErkhov/NousResearch_-_Meta-Llama-3.1-70B-gguf", // TODO: - [ModelClass.EMBEDDING]: - "togethercomputer/m2-bert-80M-32k-retrieval", - }, - }, - [ModelProviderName.GOOGLE]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, - model: { - [ModelClass.SMALL]: - settings.SMALL_GOOGLE_MODEL || - settings.GOOGLE_MODEL || - "gemini-2.0-flash-exp", - [ModelClass.MEDIUM]: - settings.MEDIUM_GOOGLE_MODEL || - settings.GOOGLE_MODEL || - "gemini-2.0-flash-exp", - [ModelClass.LARGE]: - settings.LARGE_GOOGLE_MODEL || - settings.GOOGLE_MODEL || - "gemini-2.0-flash-exp", - [ModelClass.EMBEDDING]: - settings.EMBEDDING_GOOGLE_MODEL || - settings.GOOGLE_MODEL || - "text-embedding-004", - }, - }, - [ModelProviderName.REDPILL]: { - endpoint: "https://api.red-pill.ai/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, - }, - // Available models: https://docs.red-pill.ai/get-started/supported-models - // To test other models, change the models below - model: { - [ModelClass.SMALL]: - settings.SMALL_REDPILL_MODEL || - settings.REDPILL_MODEL || - "gpt-4o-mini", - [ModelClass.MEDIUM]: - settings.MEDIUM_REDPILL_MODEL || - settings.REDPILL_MODEL || - "gpt-4o", - [ModelClass.LARGE]: - settings.LARGE_REDPILL_MODEL || - settings.REDPILL_MODEL || - "gpt-4o", - [ModelClass.EMBEDDING]: "text-embedding-3-small", - }, - }, - [ModelProviderName.OPENROUTER]: { - endpoint: "https://openrouter.ai/api/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, - // Available models: https://openrouter.ai/models - // To test other models, change the models below - model: { - [ModelClass.SMALL]: - settings.SMALL_OPENROUTER_MODEL || - settings.OPENROUTER_MODEL || - "nousresearch/hermes-3-llama-3.1-405b", - [ModelClass.MEDIUM]: - settings.MEDIUM_OPENROUTER_MODEL || - settings.OPENROUTER_MODEL || - "nousresearch/hermes-3-llama-3.1-405b", - [ModelClass.LARGE]: - settings.LARGE_OPENROUTER_MODEL || - settings.OPENROUTER_MODEL || - "nousresearch/hermes-3-llama-3.1-405b", - [ModelClass.EMBEDDING]: "text-embedding-3-small", - }, - }, - [ModelProviderName.OLLAMA]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, - endpoint: settings.OLLAMA_SERVER_URL || "http://localhost:11434", - model: { - [ModelClass.SMALL]: - settings.SMALL_OLLAMA_MODEL || - settings.OLLAMA_MODEL || - "llama3.2", - [ModelClass.MEDIUM]: - settings.MEDIUM_OLLAMA_MODEL || - settings.OLLAMA_MODEL || - "hermes3", - [ModelClass.LARGE]: - settings.LARGE_OLLAMA_MODEL || - settings.OLLAMA_MODEL || - "hermes3:70b", - [ModelClass.EMBEDDING]: - settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large", - }, - }, - [ModelProviderName.HEURIST]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - imageSettings: { - steps: 20, - }, - endpoint: "https://llm-gateway.heurist.xyz", - model: { - [ModelClass.SMALL]: - settings.SMALL_HEURIST_MODEL || - "meta-llama/llama-3-70b-instruct", - [ModelClass.MEDIUM]: - settings.MEDIUM_HEURIST_MODEL || - "meta-llama/llama-3-70b-instruct", - [ModelClass.LARGE]: - settings.LARGE_HEURIST_MODEL || - "meta-llama/llama-3.1-405b-instruct", - [ModelClass.EMBEDDING]: "", //Add later, - [ModelClass.IMAGE]: settings.HEURIST_IMAGE_MODEL || "PepeXL", - }, - }, - [ModelProviderName.GALADRIEL]: { - endpoint: "https://api.galadriel.com/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.5, - presence_penalty: 0.5, - temperature: 0.8, - }, - model: { - [ModelClass.SMALL]: "llama3.1:70b", - [ModelClass.MEDIUM]: "llama3.1:70b", - [ModelClass.LARGE]: "llama3.1:405b", - [ModelClass.EMBEDDING]: "gte-large-en-v1.5", - [ModelClass.IMAGE]: "stabilityai/stable-diffusion-xl-base-1.0", - }, - }, - [ModelProviderName.FAL]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - imageSettings: { - steps: 28, - }, - endpoint: "https://api.fal.ai/v1", - model: { - [ModelClass.SMALL]: "", // FAL doesn't provide text models - [ModelClass.MEDIUM]: "", - [ModelClass.LARGE]: "", - [ModelClass.EMBEDDING]: "", - [ModelClass.IMAGE]: "fal-ai/flux-lora", - }, - }, - [ModelProviderName.GAIANET]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - endpoint: settings.GAIANET_SERVER_URL, - model: { - [ModelClass.SMALL]: - settings.GAIANET_MODEL || - settings.SMALL_GAIANET_MODEL || - "llama3b", - [ModelClass.MEDIUM]: - settings.GAIANET_MODEL || - settings.MEDIUM_GAIANET_MODEL || - "llama", - [ModelClass.LARGE]: - settings.GAIANET_MODEL || - settings.LARGE_GAIANET_MODEL || - "qwen72b", - [ModelClass.EMBEDDING]: - settings.GAIANET_EMBEDDING_MODEL || "nomic-embed", - }, - }, - [ModelProviderName.ALI_BAILIAN]: { - endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: "qwen-turbo", - [ModelClass.MEDIUM]: "qwen-plus", - [ModelClass.LARGE]: "qwen-max", - [ModelClass.IMAGE]: "wanx-v1", - }, - }, - [ModelProviderName.VOLENGINE]: { - endpoint: settings.VOLENGINE_API_URL || "https://open.volcengineapi.com/api/v3/", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: - settings.SMALL_VOLENGINE_MODEL || - settings.VOLENGINE_MODEL || - "doubao-lite-128k", - [ModelClass.MEDIUM]: - settings.MEDIUM_VOLENGINE_MODEL || - settings.VOLENGINE_MODEL || - "doubao-pro-128k", - [ModelClass.LARGE]: - settings.LARGE_VOLENGINE_MODEL || - settings.VOLENGINE_MODEL || - "doubao-pro-256k", - [ModelClass.EMBEDDING]: - settings.VOLENGINE_EMBEDDING_MODEL || - "doubao-embedding", - }, - }, - [ModelProviderName.NANOGPT]: { - endpoint: "https://nano-gpt.com/api/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: settings.SMALL_NANOGPT_MODEL || "gpt-4o-mini", - [ModelClass.MEDIUM]: settings.MEDIUM_NANOGPT_MODEL || "gpt-4o", - [ModelClass.LARGE]: settings.LARGE_NANOGPT_MODEL || "gpt-4o", - } - }, - [ModelProviderName.HYPERBOLIC]: { - endpoint: "https://api.hyperbolic.xyz/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: - settings.SMALL_HYPERBOLIC_MODEL || - settings.HYPERBOLIC_MODEL || - "meta-llama/Llama-3.2-3B-Instruct", - [ModelClass.MEDIUM]: - settings.MEDIUM_HYPERBOLIC_MODEL || - settings.HYPERBOLIC_MODEL || - "meta-llama/Meta-Llama-3.1-70B-Instruct", - [ModelClass.LARGE]: - settings.LARGE_HYPERBOLIC_MODEL || - settings.HYPERBOLIC_MODEL || - "meta-llama/Meta-Llama-3.1-405-Instruct", - [ModelClass.IMAGE]: settings.IMAGE_HYPERBOLIC_MODEL || "FLUX.1-dev", - }, - }, - [ModelProviderName.VENICE]: { - endpoint: "https://api.venice.ai/api/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: settings.SMALL_VENICE_MODEL || "llama-3.3-70b", - [ModelClass.MEDIUM]: settings.MEDIUM_VENICE_MODEL || "llama-3.3-70b", - [ModelClass.LARGE]: settings.LARGE_VENICE_MODEL || "llama-3.1-405b", - [ModelClass.IMAGE]: settings.IMAGE_VENICE_MODEL || "fluently-xl", - }, - }, - [ModelProviderName.AKASH_CHAT_API]: { - endpoint: "https://chatapi.akash.network/api/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: - settings.SMALL_AKASH_CHAT_API_MODEL || - "Meta-Llama-3-2-3B-Instruct", - [ModelClass.MEDIUM]: - settings.MEDIUM_AKASH_CHAT_API_MODEL || - "Meta-Llama-3-3-70B-Instruct", - [ModelClass.LARGE]: - settings.LARGE_AKASH_CHAT_API_MODEL || - "Meta-Llama-3-1-405B-Instruct-FP8", - }, - }, - [ModelProviderName.LIVEPEER]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - // livepeer endpoint is handled from the sdk - model: { - [ModelClass.SMALL]: "", - [ModelClass.MEDIUM]: "", - [ModelClass.LARGE]: "", - [ModelClass.EMBEDDING]: "", - [ModelClass.IMAGE]: settings.LIVEPEER_IMAGE_MODEL || "ByteDance/SDXL-Lightning", - }, - }, - [ModelProviderName.INFERA]: { - endpoint: "https://api.infera.org", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - temperature: 0.6, - }, - model: { - [ModelClass.SMALL]: - settings.SMALL_INFERA_MODEL || "llama3.2:3b", - [ModelClass.MEDIUM]: - settings.MEDIUM_INFERA_MODEL || "mistral-nemo:latest", - [ModelClass.LARGE]: - settings.LARGE_INFERA_MODEL || "mistral-small:latest", - }, - }, -}; - -export function getModel(provider: ModelProviderName, type: ModelClass) { - return models[provider].model[type]; -} - -export function getEndpoint(provider: ModelProviderName) { - return models[provider].endpoint; -} diff --git a/packages/core/src/database/CircuitBreaker.ts b/packages/core/src/database/CircuitBreaker.ts index 728cdf99b4c..b79b08daff0 100644 --- a/packages/core/src/database/CircuitBreaker.ts +++ b/packages/core/src/database/CircuitBreaker.ts @@ -53,10 +53,7 @@ export class CircuitBreaker { this.failureCount++; this.lastFailureTime = Date.now(); - if ( - this.state !== "OPEN" && - this.failureCount >= this.failureThreshold - ) { + if (this.state !== "OPEN" && this.failureCount >= this.failureThreshold) { this.state = "OPEN"; } } diff --git a/packages/core/src/environment.ts b/packages/core/src/environment.ts index 485a1e9d93c..0758d0d31d9 100644 --- a/packages/core/src/environment.ts +++ b/packages/core/src/environment.ts @@ -78,7 +78,10 @@ export const CharacterSchema = z.object({ adjectives: z.array(z.string()), knowledge: z.array(z.string()).optional(), clients: z.array(z.nativeEnum(Clients)), - plugins: z.union([z.array(z.string()), z.array(PluginSchema)]), + plugins: z.union([ + z.array(z.string()), + z.array(PluginSchema), + ]), settings: z .object({ secrets: z.record(z.string()).optional(), diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index 235edfc1060..99e8507821a 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -16,8 +16,7 @@ export const models: Models = { [ModelClass.SMALL]: settings.SMALL_OPENAI_MODEL || "gpt-4o-mini", [ModelClass.MEDIUM]: settings.MEDIUM_OPENAI_MODEL || "gpt-4o", [ModelClass.LARGE]: settings.LARGE_OPENAI_MODEL || "gpt-4o", - [ModelClass.EMBEDDING]: - settings.EMBEDDING_OPENAI_MODEL || "text-embedding-3-small", + [ModelClass.EMBEDDING]: settings.EMBEDDING_OPENAI_MODEL || "text-embedding-3-small", [ModelClass.IMAGE]: settings.IMAGE_OPENAI_MODEL || "dall-e-3", }, }, @@ -56,12 +55,9 @@ export const models: Models = { }, endpoint: "https://api.anthropic.com/v1", model: { - [ModelClass.SMALL]: - settings.SMALL_ANTHROPIC_MODEL || "claude-3-haiku-20240307", - [ModelClass.MEDIUM]: - settings.MEDIUM_ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022", - [ModelClass.LARGE]: - settings.LARGE_ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022", + [ModelClass.SMALL]: settings.SMALL_ANTHROPIC_MODEL || "claude-3-haiku-20240307", + [ModelClass.MEDIUM]: settings.MEDIUM_ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022", + [ModelClass.LARGE]: settings.LARGE_ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022", }, }, [ModelProviderName.CLAUDE_VERTEX]: { @@ -94,8 +90,7 @@ export const models: Models = { [ModelClass.SMALL]: settings.SMALL_GROK_MODEL || "grok-2-1212", [ModelClass.MEDIUM]: settings.MEDIUM_GROK_MODEL || "grok-2-1212", [ModelClass.LARGE]: settings.LARGE_GROK_MODEL || "grok-2-1212", - [ModelClass.EMBEDDING]: - settings.EMBEDDING_GROK_MODEL || "grok-2-1212", // not sure about this one + [ModelClass.EMBEDDING]: settings.EMBEDDING_GROK_MODEL || "grok-2-1212", // not sure about this one }, }, [ModelProviderName.GROQ]: { @@ -194,15 +189,15 @@ export const models: Models = { [ModelClass.SMALL]: settings.SMALL_GOOGLE_MODEL || settings.GOOGLE_MODEL || - "gemini-2.0-flash-exp", + "gemini-1.5-flash-latest", [ModelClass.MEDIUM]: settings.MEDIUM_GOOGLE_MODEL || settings.GOOGLE_MODEL || - "gemini-2.0-flash-exp", + "gemini-1.5-flash-latest", [ModelClass.LARGE]: settings.LARGE_GOOGLE_MODEL || settings.GOOGLE_MODEL || - "gemini-2.0-flash-exp", + "gemini-1.5-pro-latest", [ModelClass.EMBEDDING]: settings.EMBEDDING_GOOGLE_MODEL || settings.GOOGLE_MODEL || @@ -400,9 +395,7 @@ export const models: Models = { }, }, [ModelProviderName.VOLENGINE]: { - endpoint: - settings.VOLENGINE_API_URL || - "https://open.volcengineapi.com/api/v3/", + endpoint: settings.VOLENGINE_API_URL || "https://open.volcengineapi.com/api/v3/", settings: { stop: [], maxInputTokens: 128000, @@ -425,7 +418,8 @@ export const models: Models = { settings.VOLENGINE_MODEL || "doubao-pro-256k", [ModelClass.EMBEDDING]: - settings.VOLENGINE_EMBEDDING_MODEL || "doubao-embedding", + settings.VOLENGINE_EMBEDDING_MODEL || + "doubao-embedding", }, }, [ModelProviderName.NANOGPT]: { @@ -442,7 +436,7 @@ export const models: Models = { [ModelClass.SMALL]: settings.SMALL_NANOGPT_MODEL || "gpt-4o-mini", [ModelClass.MEDIUM]: settings.MEDIUM_NANOGPT_MODEL || "gpt-4o", [ModelClass.LARGE]: settings.LARGE_NANOGPT_MODEL || "gpt-4o", - }, + } }, [ModelProviderName.HYPERBOLIC]: { endpoint: "https://api.hyperbolic.xyz/v1", @@ -478,8 +472,7 @@ export const models: Models = { }, model: { [ModelClass.SMALL]: settings.SMALL_VENICE_MODEL || "llama-3.3-70b", - [ModelClass.MEDIUM]: - settings.MEDIUM_VENICE_MODEL || "llama-3.3-70b", + [ModelClass.MEDIUM]: settings.MEDIUM_VENICE_MODEL || "llama-3.3-70b", [ModelClass.LARGE]: settings.LARGE_VENICE_MODEL || "llama-3.1-405b", [ModelClass.IMAGE]: settings.IMAGE_VENICE_MODEL || "fluently-xl", }, @@ -518,8 +511,7 @@ export const models: Models = { [ModelClass.MEDIUM]: "", [ModelClass.LARGE]: "", [ModelClass.EMBEDDING]: "", - [ModelClass.IMAGE]: - settings.LIVEPEER_IMAGE_MODEL || "ByteDance/SDXL-Lightning", + [ModelClass.IMAGE]: settings.LIVEPEER_IMAGE_MODEL || "ByteDance/SDXL-Lightning", }, }, }; diff --git a/packages/core/src/runtime.ts b/packages/core/src/runtime.ts index 0dad3997008..37bab24adf0 100644 --- a/packages/core/src/runtime.ts +++ b/packages/core/src/runtime.ts @@ -103,7 +103,8 @@ export class AgentRuntime implements IAgentRuntime { */ imageModelProvider: ModelProviderName; - /** + + /** * The model to use for describing images. */ imageVisionModelProvider: ModelProviderName; @@ -330,13 +331,14 @@ export class AgentRuntime implements IAgentRuntime { ); this.imageVisionModelProvider = - this.character.imageVisionModelProvider ?? this.modelProvider; + this.character.imageVisionModelProvider ?? this.modelProvider; elizaLogger.info("Selected model provider:", this.modelProvider); - elizaLogger.info( + elizaLogger.info( "Selected image model provider:", this.imageVisionModelProvider - ); + ); + // Validate model provider if (!Object.values(ModelProviderName).includes(this.modelProvider)) { @@ -424,27 +426,22 @@ export class AgentRuntime implements IAgentRuntime { } async stop() { - elizaLogger.debug("runtime::stop - character", this.character); - // stop services, they don't have a stop function + elizaLogger.debug('runtime::stop - character', this.character) + // stop services, they don't have a stop function // just initialize - // plugins + // plugins // have actions, providers, evaluators (no start/stop) // services (just initialized), clients - // client have a start - for (const cStr in this.clients) { - const c = this.clients[cStr]; - elizaLogger.log( - "runtime::stop - requesting", - cStr, - "client stop for", - this.character.name - ); - c.stop(); - } - // we don't need to unregister with directClient - // don't need to worry about knowledge + // client have a start + for(const cStr in this.clients) { + const c = this.clients[cStr] + elizaLogger.log('runtime::stop - requesting', cStr, 'client stop for', this.character.name) + c.stop() + } + // we don't need to unregister with directClient + // don't need to worry about knowledge } /** diff --git a/packages/core/src/test_resources/createRuntime.ts b/packages/core/src/test_resources/createRuntime.ts index 668fa47b5b2..209b800cbe2 100644 --- a/packages/core/src/test_resources/createRuntime.ts +++ b/packages/core/src/test_resources/createRuntime.ts @@ -19,7 +19,7 @@ import { User } from "./types.ts"; /** * Creates a runtime environment for the agent. - * + * * @param {Object} param - The parameters for creating the runtime. * @param {Record | NodeJS.ProcessEnv} [param.env] - The environment variables. * @param {number} [param.conversationLength] - The length of the conversation. diff --git a/packages/core/src/tests/actions.test.ts b/packages/core/src/tests/actions.test.ts index 1091fe195f5..ab0fcdfb915 100644 --- a/packages/core/src/tests/actions.test.ts +++ b/packages/core/src/tests/actions.test.ts @@ -20,23 +20,13 @@ describe("Actions", () => { }, ], [ - { - user: "user1", - content: { text: "Hey {{user2}}, how are you?" }, - }, - { - user: "user2", - content: { text: "I'm good {{user1}}, thanks!" }, - }, + { user: "user1", content: { text: "Hey {{user2}}, how are you?" } }, + { user: "user2", content: { text: "I'm good {{user1}}, thanks!" } }, ], ], similes: ["say hi", "welcome"], - handler: async () => { - throw new Error("Not implemented"); - }, - validate: async () => { - throw new Error("Not implemented"); - }, + handler: async () => { throw new Error("Not implemented"); }, + validate: async () => { throw new Error("Not implemented"); }, }, { name: "farewell", @@ -48,38 +38,24 @@ describe("Actions", () => { ], ], similes: ["say bye", "leave"], - handler: async () => { - throw new Error("Not implemented"); - }, - validate: async () => { - throw new Error("Not implemented"); - }, + handler: async () => { throw new Error("Not implemented"); }, + validate: async () => { throw new Error("Not implemented"); }, }, { name: "help", description: "Get assistance", examples: [ [ - { - user: "user1", - content: { text: "Can you help me {{user2}}?" }, - }, + { user: "user1", content: { text: "Can you help me {{user2}}?" } }, { user: "user2", - content: { - text: "Of course {{user1}}, what do you need?", - action: "assist", - }, + content: { text: "Of course {{user1}}, what do you need?", action: "assist" } }, ], ], similes: ["assist", "support"], - handler: async () => { - throw new Error("Not implemented"); - }, - validate: async () => { - throw new Error("Not implemented"); - }, + handler: async () => { throw new Error("Not implemented"); }, + validate: async () => { throw new Error("Not implemented"); }, }, ]; @@ -110,13 +86,8 @@ describe("Actions", () => { describe("formatActionNames", () => { it("should format action names correctly", () => { - const formatted = formatActionNames([ - mockActions[0], - mockActions[1], - ]); - expect(formatted).toMatch( - /^(greet|farewell)(, (greet|farewell))?$/ - ); + const formatted = formatActionNames([mockActions[0], mockActions[1]]); + expect(formatted).toMatch(/^(greet|farewell)(, (greet|farewell))?$/); }); it("should handle single action", () => { @@ -152,7 +123,7 @@ describe("Actions", () => { describe("Action Structure", () => { it("should validate action structure", () => { - mockActions.forEach((action) => { + mockActions.forEach(action => { expect(action).toHaveProperty("name"); expect(action).toHaveProperty("description"); expect(action).toHaveProperty("examples"); @@ -165,9 +136,9 @@ describe("Actions", () => { }); it("should validate example structure", () => { - mockActions.forEach((action) => { - action.examples.forEach((example) => { - example.forEach((message) => { + mockActions.forEach(action => { + action.examples.forEach(example => { + example.forEach(message => { expect(message).toHaveProperty("user"); expect(message).toHaveProperty("content"); expect(message.content).toHaveProperty("text"); @@ -177,7 +148,7 @@ describe("Actions", () => { }); it("should have unique action names", () => { - const names = mockActions.map((action) => action.name); + const names = mockActions.map(action => action.name); const uniqueNames = new Set(names); expect(names.length).toBe(uniqueNames.size); }); diff --git a/packages/core/src/tests/context.test.ts b/packages/core/src/tests/context.test.ts index afbaa1c4643..6bf391282b2 100644 --- a/packages/core/src/tests/context.test.ts +++ b/packages/core/src/tests/context.test.ts @@ -160,7 +160,7 @@ describe("composeContext", () => { }); it("should handle missing values in handlebars template", () => { - const state = { ...baseState }; + const state = {...baseState} const template = "Hello, {{userName}}!"; const result = composeContext({ diff --git a/packages/core/src/tests/environment.test.ts b/packages/core/src/tests/environment.test.ts index fe690f4e0e1..f38b683919f 100644 --- a/packages/core/src/tests/environment.test.ts +++ b/packages/core/src/tests/environment.test.ts @@ -1,20 +1,20 @@ -import { describe, it, expect, beforeEach, afterEach } from "vitest"; -import { validateEnv, validateCharacterConfig } from "../environment"; -import { Clients, ModelProviderName } from "../types"; +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { validateEnv, validateCharacterConfig } from '../environment'; +import { Clients, ModelProviderName } from '../types'; -describe("Environment Configuration", () => { +describe('Environment Configuration', () => { const originalEnv = process.env; beforeEach(() => { process.env = { ...originalEnv, - OPENAI_API_KEY: "sk-test123", - REDPILL_API_KEY: "test-key", - GROK_API_KEY: "test-key", - GROQ_API_KEY: "gsk_test123", - OPENROUTER_API_KEY: "test-key", - GOOGLE_GENERATIVE_AI_API_KEY: "test-key", - ELEVENLABS_XI_API_KEY: "test-key", + OPENAI_API_KEY: 'sk-test123', + REDPILL_API_KEY: 'test-key', + GROK_API_KEY: 'test-key', + GROQ_API_KEY: 'gsk_test123', + OPENROUTER_API_KEY: 'test-key', + GOOGLE_GENERATIVE_AI_API_KEY: 'test-key', + ELEVENLABS_XI_API_KEY: 'test-key', }; }); @@ -22,183 +22,161 @@ describe("Environment Configuration", () => { process.env = originalEnv; }); - it("should validate correct environment variables", () => { + it('should validate correct environment variables', () => { expect(() => validateEnv()).not.toThrow(); }); - it("should throw error for invalid OpenAI API key format", () => { - process.env.OPENAI_API_KEY = "invalid-key"; - expect(() => validateEnv()).toThrow( - "OpenAI API key must start with 'sk-'" - ); + it('should throw error for invalid OpenAI API key format', () => { + process.env.OPENAI_API_KEY = 'invalid-key'; + expect(() => validateEnv()).toThrow("OpenAI API key must start with 'sk-'"); }); - it("should throw error for invalid GROQ API key format", () => { - process.env.GROQ_API_KEY = "invalid-key"; - expect(() => validateEnv()).toThrow( - "GROQ API key must start with 'gsk_'" - ); + it('should throw error for invalid GROQ API key format', () => { + process.env.GROQ_API_KEY = 'invalid-key'; + expect(() => validateEnv()).toThrow("GROQ API key must start with 'gsk_'"); }); - it("should throw error for missing required keys", () => { + it('should throw error for missing required keys', () => { delete process.env.REDPILL_API_KEY; - expect(() => validateEnv()).toThrow("REDPILL_API_KEY: Required"); + expect(() => validateEnv()).toThrow('REDPILL_API_KEY: Required'); }); - it("should throw error for multiple missing required keys", () => { + it('should throw error for multiple missing required keys', () => { delete process.env.REDPILL_API_KEY; delete process.env.GROK_API_KEY; delete process.env.OPENROUTER_API_KEY; expect(() => validateEnv()).toThrow( - "Environment validation failed:\n" + - "REDPILL_API_KEY: Required\n" + - "GROK_API_KEY: Required\n" + - "OPENROUTER_API_KEY: Required" + 'Environment validation failed:\n' + + 'REDPILL_API_KEY: Required\n' + + 'GROK_API_KEY: Required\n' + + 'OPENROUTER_API_KEY: Required' ); }); }); -describe("Character Configuration", () => { +describe('Character Configuration', () => { const validCharacterConfig = { - name: "Test Character", + name: 'Test Character', modelProvider: ModelProviderName.OPENAI, - bio: "Test bio", - lore: ["Test lore"], - messageExamples: [ - [ - { - user: "user1", - content: { - text: "Hello", - }, - }, - ], - ], - postExamples: ["Test post"], - topics: ["topic1"], - adjectives: ["friendly"], + bio: 'Test bio', + lore: ['Test lore'], + messageExamples: [[ + { + user: 'user1', + content: { + text: 'Hello', + } + } + ]], + postExamples: ['Test post'], + topics: ['topic1'], + adjectives: ['friendly'], clients: [Clients.DISCORD], - plugins: ["test-plugin"], + plugins: ['test-plugin'], style: { - all: ["style1"], - chat: ["chat-style"], - post: ["post-style"], - }, + all: ['style1'], + chat: ['chat-style'], + post: ['post-style'] + } }; - it("should validate correct character configuration", () => { - expect(() => - validateCharacterConfig(validCharacterConfig) - ).not.toThrow(); + it('should validate correct character configuration', () => { + expect(() => validateCharacterConfig(validCharacterConfig)).not.toThrow(); }); - it("should validate configuration with optional fields", () => { + it('should validate configuration with optional fields', () => { const configWithOptionals = { ...validCharacterConfig, - id: "123e4567-e89b-12d3-a456-426614174000", - system: "Test system", + id: '123e4567-e89b-12d3-a456-426614174000', + system: 'Test system', templates: { - greeting: "Hello!", + greeting: 'Hello!' }, - knowledge: ["fact1"], + knowledge: ['fact1'], settings: { secrets: { - key: "value", + key: 'value' }, voice: { - model: "test-model", - url: "http://example.com", - }, - }, + model: 'test-model', + url: 'http://example.com' + } + } }; - expect(() => - validateCharacterConfig(configWithOptionals) - ).not.toThrow(); + expect(() => validateCharacterConfig(configWithOptionals)).not.toThrow(); }); - it("should throw error for missing required fields", () => { + it('should throw error for missing required fields', () => { const invalidConfig = { ...validCharacterConfig }; delete (invalidConfig as any).name; expect(() => validateCharacterConfig(invalidConfig)).toThrow(); }); - it("should validate plugin objects in plugins array", () => { + it('should validate plugin objects in plugins array', () => { const configWithPluginObjects = { ...validCharacterConfig, - plugins: [ - { - name: "test-plugin", - description: "Test description", - }, - ], + plugins: [{ + name: 'test-plugin', + description: 'Test description' + }] }; - expect(() => - validateCharacterConfig(configWithPluginObjects) - ).not.toThrow(); + expect(() => validateCharacterConfig(configWithPluginObjects)).not.toThrow(); }); - it("should validate client-specific configurations", () => { + it('should validate client-specific configurations', () => { const configWithClientConfig = { ...validCharacterConfig, clientConfig: { discord: { shouldIgnoreBotMessages: true, - shouldIgnoreDirectMessages: false, + shouldIgnoreDirectMessages: false }, telegram: { shouldIgnoreBotMessages: true, - shouldIgnoreDirectMessages: true, - }, - }, + shouldIgnoreDirectMessages: true + } + } }; - expect(() => - validateCharacterConfig(configWithClientConfig) - ).not.toThrow(); + expect(() => validateCharacterConfig(configWithClientConfig)).not.toThrow(); }); - it("should validate twitter profile configuration", () => { + it('should validate twitter profile configuration', () => { const configWithTwitter = { ...validCharacterConfig, twitterProfile: { - username: "testuser", - screenName: "Test User", - bio: "Test bio", - nicknames: ["test"], - }, + username: 'testuser', + screenName: 'Test User', + bio: 'Test bio', + nicknames: ['test'] + } }; expect(() => validateCharacterConfig(configWithTwitter)).not.toThrow(); }); - it("should validate model endpoint override", () => { + it('should validate model endpoint override', () => { const configWithEndpoint = { ...validCharacterConfig, - modelEndpointOverride: "custom-endpoint", + modelEndpointOverride: 'custom-endpoint' }; expect(() => validateCharacterConfig(configWithEndpoint)).not.toThrow(); }); - it("should validate message examples with additional properties", () => { + it('should validate message examples with additional properties', () => { const configWithComplexMessage = { ...validCharacterConfig, - messageExamples: [ - [ - { - user: "user1", - content: { - text: "Hello", - action: "wave", - source: "chat", - url: "http://example.com", - inReplyTo: "123e4567-e89b-12d3-a456-426614174000", - attachments: ["file1"], - customField: "value", - }, - }, - ], - ], + messageExamples: [[{ + user: 'user1', + content: { + text: 'Hello', + action: 'wave', + source: 'chat', + url: 'http://example.com', + inReplyTo: '123e4567-e89b-12d3-a456-426614174000', + attachments: ['file1'], + customField: 'value' + } + }]] }; - expect(() => - validateCharacterConfig(configWithComplexMessage) - ).not.toThrow(); + expect(() => validateCharacterConfig(configWithComplexMessage)).not.toThrow(); }); }); diff --git a/packages/core/src/tests/knowledge.test.ts b/packages/core/src/tests/knowledge.test.ts index c637ff4729f..436954f975b 100644 --- a/packages/core/src/tests/knowledge.test.ts +++ b/packages/core/src/tests/knowledge.test.ts @@ -1,126 +1,113 @@ -import { describe, it, expect, vi, beforeEach } from "vitest"; -import knowledge from "../knowledge"; -import { AgentRuntime } from "../runtime"; -import { KnowledgeItem, Memory } from "../types"; -import { getEmbeddingZeroVector } from "../embedding"; +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import knowledge from '../knowledge'; +import { AgentRuntime } from '../runtime'; +import { KnowledgeItem, Memory } from '../types'; +import { getEmbeddingZeroVector } from '../embedding'; // Mock dependencies -vi.mock("../embedding", () => ({ +vi.mock('../embedding', () => ({ embed: vi.fn().mockResolvedValue(new Float32Array(1536).fill(0)), - getEmbeddingZeroVector: vi - .fn() - .mockReturnValue(new Float32Array(1536).fill(0)), + getEmbeddingZeroVector: vi.fn().mockReturnValue(new Float32Array(1536).fill(0)) })); -vi.mock("../generation", () => ({ - splitChunks: vi.fn().mockImplementation(async (text) => [text]), +vi.mock('../generation', () => ({ + splitChunks: vi.fn().mockImplementation(async (text) => [text]) })); -vi.mock("../uuid", () => ({ - stringToUuid: vi.fn().mockImplementation((str) => str), +vi.mock('../uuid', () => ({ + stringToUuid: vi.fn().mockImplementation((str) => str) })); -describe("Knowledge Module", () => { - describe("preprocess", () => { - it("should handle invalid inputs", () => { - expect(knowledge.preprocess(null)).toBe(""); - expect(knowledge.preprocess(undefined)).toBe(""); - expect(knowledge.preprocess("")).toBe(""); +describe('Knowledge Module', () => { + describe('preprocess', () => { + it('should handle invalid inputs', () => { + expect(knowledge.preprocess(null)).toBe(''); + expect(knowledge.preprocess(undefined)).toBe(''); + expect(knowledge.preprocess('')).toBe(''); }); - it("should remove code blocks and inline code", () => { - const input = - "Here is some code: ```const x = 1;``` and `inline code`"; - expect(knowledge.preprocess(input)).toBe("here is some code: and"); + it('should remove code blocks and inline code', () => { + const input = 'Here is some code: ```const x = 1;``` and `inline code`'; + expect(knowledge.preprocess(input)).toBe('here is some code: and'); }); - it("should handle markdown formatting", () => { - const input = - "# Header\n## Subheader\n[Link](http://example.com)\n![Image](image.jpg)"; - expect(knowledge.preprocess(input)).toBe( - "header subheader link image" - ); + it('should handle markdown formatting', () => { + const input = '# Header\n## Subheader\n[Link](http://example.com)\n![Image](image.jpg)'; + expect(knowledge.preprocess(input)).toBe('header subheader link image'); }); - it("should simplify URLs", () => { - const input = "Visit https://www.example.com/path?param=value"; - expect(knowledge.preprocess(input)).toBe( - "visit example.com/path?param=value" - ); + it('should simplify URLs', () => { + const input = 'Visit https://www.example.com/path?param=value'; + expect(knowledge.preprocess(input)).toBe('visit example.com/path?param=value'); }); - it("should remove Discord mentions and HTML tags", () => { - const input = "Hello <@123456789> and
HTML content
"; - expect(knowledge.preprocess(input)).toBe("hello and html content"); + it('should remove Discord mentions and HTML tags', () => { + const input = 'Hello <@123456789> and
HTML content
'; + expect(knowledge.preprocess(input)).toBe('hello and html content'); }); - it("should normalize whitespace and newlines", () => { - const input = "Multiple spaces\n\n\nand\nnewlines"; - expect(knowledge.preprocess(input)).toBe( - "multiple spaces and newlines" - ); + it('should normalize whitespace and newlines', () => { + const input = 'Multiple spaces\n\n\nand\nnewlines'; + expect(knowledge.preprocess(input)).toBe('multiple spaces and newlines'); }); - it("should remove comments", () => { - const input = "/* Block comment */ Normal text // Line comment"; - expect(knowledge.preprocess(input)).toBe("normal text"); + it('should remove comments', () => { + const input = '/* Block comment */ Normal text // Line comment'; + expect(knowledge.preprocess(input)).toBe('normal text'); }); }); - describe("get and set", () => { + describe('get and set', () => { let mockRuntime: AgentRuntime; beforeEach(() => { mockRuntime = { - agentId: "test-agent", + agentId: 'test-agent', knowledgeManager: { searchMemoriesByEmbedding: vi.fn().mockResolvedValue([ { - content: { - text: "test fragment", - source: "source1", - }, - similarity: 0.9, - }, + content: { text: 'test fragment', source: 'source1' }, + similarity: 0.9 + } ]), - createMemory: vi.fn().mockResolvedValue(undefined), + createMemory: vi.fn().mockResolvedValue(undefined) }, documentsManager: { getMemoryById: vi.fn().mockResolvedValue({ - id: "source1", - content: { text: "test document" }, + id: 'source1', + content: { text: 'test document' } }), - createMemory: vi.fn().mockResolvedValue(undefined), - }, + createMemory: vi.fn().mockResolvedValue(undefined) + } } as unknown as AgentRuntime; }); - describe("get", () => { - it("should handle invalid messages", async () => { + describe('get', () => { + it('should handle invalid messages', async () => { const invalidMessage = {} as Memory; const result = await knowledge.get(mockRuntime, invalidMessage); expect(result).toEqual([]); }); - it("should retrieve knowledge items based on message content", async () => { + it('should retrieve knowledge items based on message content', async () => { const message: Memory = { - agentId: "test-agent", - content: { text: "test query" }, + agentId: 'test-agent', + content: { text: 'test query' } } as unknown as Memory; const result = await knowledge.get(mockRuntime, message); expect(result).toHaveLength(1); expect(result[0]).toEqual({ - id: "source1", - content: { text: "test document" }, + id: 'source1', + content: { text: 'test document' } }); }); - it("should handle empty processed text", async () => { + it('should handle empty processed text', async () => { const message: Memory = { - agentId: "test-agent", - content: { text: "```code only```" }, + agentId: 'test-agent', + content: { text: '```code only```' } } as unknown as Memory; const result = await knowledge.get(mockRuntime, message); @@ -128,52 +115,46 @@ describe("Knowledge Module", () => { }); }); - describe("set", () => { - it("should store knowledge item and its fragments", async () => { + describe('set', () => { + it('should store knowledge item and its fragments', async () => { const item: KnowledgeItem = { - id: "test-id-1234-5678-9101-112131415161", - content: { text: "test content" }, + id: 'test-id-1234-5678-9101-112131415161', + content: { text: 'test content' } }; await knowledge.set(mockRuntime, item); // Check if document was created - expect( - mockRuntime.documentsManager.createMemory - ).toHaveBeenCalledWith( + expect(mockRuntime.documentsManager.createMemory).toHaveBeenCalledWith( expect.objectContaining({ id: item.id, content: item.content, - embedding: getEmbeddingZeroVector(), + embedding: getEmbeddingZeroVector() }) ); // Check if fragment was created - expect( - mockRuntime.knowledgeManager.createMemory - ).toHaveBeenCalledWith( + expect(mockRuntime.knowledgeManager.createMemory).toHaveBeenCalledWith( expect.objectContaining({ content: { source: item.id, - text: expect.any(String), + text: expect.any(String) }, - embedding: expect.any(Float32Array), + embedding: expect.any(Float32Array) }) ); }); - it("should use default chunk size and bleed", async () => { + it('should use default chunk size and bleed', async () => { const item: KnowledgeItem = { - id: "test-id-1234-5678-9101-112131415161", - content: { text: "test content" }, + id: 'test-id-1234-5678-9101-112131415161', + content: { text: 'test content' } }; await knowledge.set(mockRuntime, item); // Verify default parameters were used - expect( - mockRuntime.knowledgeManager.createMemory - ).toHaveBeenCalledTimes(1); + expect(mockRuntime.knowledgeManager.createMemory).toHaveBeenCalledTimes(1); }); }); }); diff --git a/packages/core/src/tests/messages.test.ts b/packages/core/src/tests/messages.test.ts index 66ff029ba67..bbebe103a6d 100644 --- a/packages/core/src/tests/messages.test.ts +++ b/packages/core/src/tests/messages.test.ts @@ -122,7 +122,9 @@ describe("Messages Library", () => { // Assertions expect(formattedMessages).toContain("Check this attachment"); - expect(formattedMessages).toContain("Attachments: ["); + expect(formattedMessages).toContain( + "Attachments: [" + ); }); test("formatMessages should handle empty attachments gracefully", () => { @@ -146,28 +148,28 @@ describe("Messages Library", () => { }); }); -describe("Messages", () => { +describe('Messages', () => { const mockActors: Actor[] = [ { id: "123e4567-e89b-12d3-a456-426614174006" as UUID, - name: "Alice", - username: "alice", + name: 'Alice', + username: 'alice', details: { - tagline: "Software Engineer", - summary: "Full-stack developer with 5 years experience", - quote: "", - }, + tagline: 'Software Engineer', + summary: 'Full-stack developer with 5 years experience', + quote: "" + } }, { id: "123e4567-e89b-12d3-a456-426614174007" as UUID, - name: "Bob", - username: "bob", + name: 'Bob', + username: 'bob', details: { - tagline: "Product Manager", - summary: "Experienced in agile methodologies", - quote: "", - }, - }, + tagline: 'Product Manager', + summary: 'Experienced in agile methodologies', + quote: "" + } + } ]; const mockMessages: Memory[] = [ @@ -177,10 +179,10 @@ describe("Messages", () => { userId: mockActors[0].id, createdAt: Date.now() - 5000, // 5 seconds ago content: { - text: "Hello everyone!", - action: "wave", + text: 'Hello everyone!', + action: 'wave' } as Content, - agentId: "123e4567-e89b-12d3-a456-426614174001", + agentId: "123e4567-e89b-12d3-a456-426614174001" }, { id: "123e4567-e89b-12d3-a456-426614174010" as UUID, @@ -188,171 +190,144 @@ describe("Messages", () => { userId: mockActors[1].id, createdAt: Date.now() - 60000, // 1 minute ago content: { - text: "Hi Alice!", + text: 'Hi Alice!', attachments: [ { id: "123e4567-e89b-12d3-a456-426614174011" as UUID, - title: "Document", - url: "https://example.com/doc.pdf", - }, - ], + title: 'Document', + url: 'https://example.com/doc.pdf' + } + ] } as Content, - agentId: "123e4567-e89b-12d3-a456-426614174001", - }, + agentId: "123e4567-e89b-12d3-a456-426614174001" + } ]; - describe("getActorDetails", () => { - it("should retrieve actor details from database", async () => { + describe('getActorDetails', () => { + it('should retrieve actor details from database', async () => { const mockRuntime = { databaseAdapter: { - getParticipantsForRoom: vi - .fn() - .mockResolvedValue([ - mockActors[0].id, - mockActors[1].id, - ]), + getParticipantsForRoom: vi.fn().mockResolvedValue([mockActors[0].id, mockActors[1].id]), getAccountById: vi.fn().mockImplementation((id) => { - const actor = mockActors.find((a) => a.id === id); + const actor = mockActors.find(a => a.id === id); return Promise.resolve(actor); - }), - }, + }) + } }; const actors = await getActorDetails({ runtime: mockRuntime as any, - roomId: "123e4567-e89b-12d3-a456-426614174009" as UUID, + roomId: "123e4567-e89b-12d3-a456-426614174009" as UUID }); expect(actors).toHaveLength(2); - expect(actors[0].name).toBe("Alice"); - expect(actors[1].name).toBe("Bob"); - expect( - mockRuntime.databaseAdapter.getParticipantsForRoom - ).toHaveBeenCalled(); + expect(actors[0].name).toBe('Alice'); + expect(actors[1].name).toBe('Bob'); + expect(mockRuntime.databaseAdapter.getParticipantsForRoom).toHaveBeenCalled(); }); - it("should filter out null actors", async () => { + it('should filter out null actors', async () => { const invalidId = "123e4567-e89b-12d3-a456-426614174012" as UUID; const mockRuntime = { databaseAdapter: { - getParticipantsForRoom: vi - .fn() - .mockResolvedValue([mockActors[0].id, invalidId]), + getParticipantsForRoom: vi.fn().mockResolvedValue([mockActors[0].id, invalidId]), getAccountById: vi.fn().mockImplementation((id) => { - const actor = mockActors.find((a) => a.id === id); + const actor = mockActors.find(a => a.id === id); return Promise.resolve(actor || null); - }), - }, + }) + } }; const actors = await getActorDetails({ runtime: mockRuntime as any, - roomId: "123e4567-e89b-12d3-a456-426614174009" as UUID, + roomId: "123e4567-e89b-12d3-a456-426614174009" as UUID }); expect(actors).toHaveLength(1); - expect(actors[0].name).toBe("Alice"); + expect(actors[0].name).toBe('Alice'); }); }); - describe("formatActors", () => { - it("should format actors with complete details", () => { + describe('formatActors', () => { + it('should format actors with complete details', () => { const formatted = formatActors({ actors: mockActors }); - expect(formatted).toContain("Alice: Software Engineer"); - expect(formatted).toContain( - "Full-stack developer with 5 years experience" - ); - expect(formatted).toContain("Bob: Product Manager"); - expect(formatted).toContain("Experienced in agile methodologies"); + expect(formatted).toContain('Alice: Software Engineer'); + expect(formatted).toContain('Full-stack developer with 5 years experience'); + expect(formatted).toContain('Bob: Product Manager'); + expect(formatted).toContain('Experienced in agile methodologies'); }); - it("should handle actors without details", () => { + it('should handle actors without details', () => { const actorsWithoutDetails: Actor[] = [ { id: "123e4567-e89b-12d3-a456-426614174013" as UUID, - name: "Charlie", - username: "charlie", + name: 'Charlie', + username: 'charlie', details: { tagline: "Tag", summary: "Summary", - quote: "Quote", - }, - }, + quote: "Quote" + } + } ]; const formatted = formatActors({ actors: actorsWithoutDetails }); - expect(formatted).toBe("Charlie: Tag\nSummary"); + expect(formatted).toBe('Charlie: Tag\nSummary'); }); - it("should handle empty actors array", () => { + it('should handle empty actors array', () => { const formatted = formatActors({ actors: [] }); - expect(formatted).toBe(""); + expect(formatted).toBe(''); }); }); - describe("formatMessages", () => { - it("should format messages with all details", () => { - const formatted = formatMessages({ - messages: mockMessages, - actors: mockActors, - }); - const lines = formatted.split("\n"); + describe('formatMessages', () => { + it('should format messages with all details', () => { + const formatted = formatMessages({ messages: mockMessages, actors: mockActors }); + const lines = formatted.split('\n'); expect(lines[1]).toContain("Alice"); expect(lines[1]).toContain("(wave)"); expect(lines[1]).toContain("(just now)"); }); - it("should handle messages from unknown users", () => { - const messagesWithUnknownUser: Memory[] = [ - { - id: "123e4567-e89b-12d3-a456-426614174014" as UUID, - roomId: "123e4567-e89b-12d3-a456-426614174009" as UUID, - userId: "123e4567-e89b-12d3-a456-426614174015" as UUID, - createdAt: Date.now(), - content: { text: "Test message" } as Content, - agentId: "123e4567-e89b-12d3-a456-426614174001", - }, - ]; - - const formatted = formatMessages({ - messages: messagesWithUnknownUser, - actors: mockActors, - }); - expect(formatted).toContain("Unknown User: Test message"); + it('should handle messages from unknown users', () => { + const messagesWithUnknownUser: Memory[] = [{ + id: "123e4567-e89b-12d3-a456-426614174014" as UUID, + roomId: "123e4567-e89b-12d3-a456-426614174009" as UUID, + userId: "123e4567-e89b-12d3-a456-426614174015" as UUID, + createdAt: Date.now(), + content: { text: 'Test message' } as Content, + agentId: "123e4567-e89b-12d3-a456-426614174001" + }]; + + const formatted = formatMessages({ messages: messagesWithUnknownUser, actors: mockActors }); + expect(formatted).toContain('Unknown User: Test message'); }); - it("should handle messages with no action", () => { - const messagesWithoutAction: Memory[] = [ - { - id: "123e4567-e89b-12d3-a456-426614174016" as UUID, - roomId: "123e4567-e89b-12d3-a456-426614174009" as UUID, - userId: mockActors[0].id, - createdAt: Date.now(), - content: { text: "Simple message" } as Content, - agentId: "123e4567-e89b-12d3-a456-426614174001", - }, - ]; - - const formatted = formatMessages({ - messages: messagesWithoutAction, - actors: mockActors, - }); - expect(formatted).not.toContain("()"); - expect(formatted).toContain("Simple message"); + it('should handle messages with no action', () => { + const messagesWithoutAction: Memory[] = [{ + id: "123e4567-e89b-12d3-a456-426614174016" as UUID, + roomId: "123e4567-e89b-12d3-a456-426614174009" as UUID, + userId: mockActors[0].id, + createdAt: Date.now(), + content: { text: 'Simple message' } as Content, + agentId: "123e4567-e89b-12d3-a456-426614174001" + }]; + + const formatted = formatMessages({ messages: messagesWithoutAction, actors: mockActors }); + expect(formatted).not.toContain('()'); + expect(formatted).toContain('Simple message'); }); - it("should handle empty messages array", () => { - const formatted = formatMessages({ - messages: [], - actors: mockActors, - }); - expect(formatted).toBe(""); + it('should handle empty messages array', () => { + const formatted = formatMessages({ messages: [], actors: mockActors }); + expect(formatted).toBe(''); }); }); - describe("formatTimestamp", () => { - it("should handle exact time boundaries", () => { + describe('formatTimestamp', () => { + it('should handle exact time boundaries', () => { const now = Date.now(); - expect(formatTimestamp(now)).toContain("just now"); + expect(formatTimestamp(now)).toContain('just now'); }); }); }); diff --git a/packages/core/src/tests/models.test.ts b/packages/core/src/tests/models.test.ts index 90ca87dba76..f336093cfdd 100644 --- a/packages/core/src/tests/models.test.ts +++ b/packages/core/src/tests/models.test.ts @@ -26,9 +26,7 @@ vi.mock("../settings", () => { describe("Model Provider Configuration", () => { describe("OpenAI Provider", () => { test("should have correct endpoint", () => { - expect(models[ModelProviderName.OPENAI].endpoint).toBe( - "https://api.openai.com/v1" - ); + expect(models[ModelProviderName.OPENAI].endpoint).toBe("https://api.openai.com/v1"); }); test("should have correct model mappings", () => { @@ -36,9 +34,7 @@ describe("Model Provider Configuration", () => { expect(openAIModels[ModelClass.SMALL]).toBe("gpt-4o-mini"); expect(openAIModels[ModelClass.MEDIUM]).toBe("gpt-4o"); expect(openAIModels[ModelClass.LARGE]).toBe("gpt-4o"); - expect(openAIModels[ModelClass.EMBEDDING]).toBe( - "text-embedding-3-small" - ); + expect(openAIModels[ModelClass.EMBEDDING]).toBe("text-embedding-3-small"); expect(openAIModels[ModelClass.IMAGE]).toBe("dall-e-3"); }); @@ -54,22 +50,14 @@ describe("Model Provider Configuration", () => { describe("Anthropic Provider", () => { test("should have correct endpoint", () => { - expect(models[ModelProviderName.ANTHROPIC].endpoint).toBe( - "https://api.anthropic.com/v1" - ); + expect(models[ModelProviderName.ANTHROPIC].endpoint).toBe("https://api.anthropic.com/v1"); }); test("should have correct model mappings", () => { const anthropicModels = models[ModelProviderName.ANTHROPIC].model; - expect(anthropicModels[ModelClass.SMALL]).toBe( - "claude-3-haiku-20240307" - ); - expect(anthropicModels[ModelClass.MEDIUM]).toBe( - "claude-3-5-sonnet-20241022" - ); - expect(anthropicModels[ModelClass.LARGE]).toBe( - "claude-3-5-sonnet-20241022" - ); + expect(anthropicModels[ModelClass.SMALL]).toBe("claude-3-haiku-20240307"); + expect(anthropicModels[ModelClass.MEDIUM]).toBe("claude-3-5-sonnet-20241022"); + expect(anthropicModels[ModelClass.LARGE]).toBe("claude-3-5-sonnet-20241022"); }); test("should have correct settings configuration", () => { @@ -84,28 +72,16 @@ describe("Model Provider Configuration", () => { describe("LlamaCloud Provider", () => { test("should have correct endpoint", () => { - expect(models[ModelProviderName.LLAMACLOUD].endpoint).toBe( - "https://api.llamacloud.com/v1" - ); + expect(models[ModelProviderName.LLAMACLOUD].endpoint).toBe("https://api.llamacloud.com/v1"); }); test("should have correct model mappings", () => { const llamaCloudModels = models[ModelProviderName.LLAMACLOUD].model; - expect(llamaCloudModels[ModelClass.SMALL]).toBe( - "meta-llama/Llama-3.2-3B-Instruct-Turbo" - ); - expect(llamaCloudModels[ModelClass.MEDIUM]).toBe( - "meta-llama-3.1-8b-instruct" - ); - expect(llamaCloudModels[ModelClass.LARGE]).toBe( - "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo" - ); - expect(llamaCloudModels[ModelClass.EMBEDDING]).toBe( - "togethercomputer/m2-bert-80M-32k-retrieval" - ); - expect(llamaCloudModels[ModelClass.IMAGE]).toBe( - "black-forest-labs/FLUX.1-schnell" - ); + expect(llamaCloudModels[ModelClass.SMALL]).toBe("meta-llama/Llama-3.2-3B-Instruct-Turbo"); + expect(llamaCloudModels[ModelClass.MEDIUM]).toBe("meta-llama-3.1-8b-instruct"); + expect(llamaCloudModels[ModelClass.LARGE]).toBe("meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo"); + expect(llamaCloudModels[ModelClass.EMBEDDING]).toBe("togethercomputer/m2-bert-80M-32k-retrieval"); + expect(llamaCloudModels[ModelClass.IMAGE]).toBe("black-forest-labs/FLUX.1-schnell"); }); test("should have correct settings configuration", () => { @@ -120,11 +96,9 @@ describe("Model Provider Configuration", () => { describe("Google Provider", () => { test("should have correct model mappings", () => { const googleModels = models[ModelProviderName.GOOGLE].model; - expect(googleModels[ModelClass.SMALL]).toBe("gemini-2.0-flash-exp"); - expect(googleModels[ModelClass.MEDIUM]).toBe( - "gemini-2.0-flash-exp" - ); - expect(googleModels[ModelClass.LARGE]).toBe("gemini-2.0-flash-exp"); + expect(googleModels[ModelClass.SMALL]).toBe("gemini-1.5-flash-latest"); + expect(googleModels[ModelClass.MEDIUM]).toBe("gemini-1.5-flash-latest"); + expect(googleModels[ModelClass.LARGE]).toBe("gemini-1.5-pro-latest"); }); }); }); @@ -132,50 +106,28 @@ describe("Model Provider Configuration", () => { describe("Model Retrieval Functions", () => { describe("getModel function", () => { test("should retrieve correct models for different providers and classes", () => { - expect(getModel(ModelProviderName.OPENAI, ModelClass.SMALL)).toBe( - "gpt-4o-mini" - ); - expect( - getModel(ModelProviderName.ANTHROPIC, ModelClass.LARGE) - ).toBe("claude-3-5-sonnet-20241022"); - expect( - getModel(ModelProviderName.LLAMACLOUD, ModelClass.MEDIUM) - ).toBe("meta-llama-3.1-8b-instruct"); + expect(getModel(ModelProviderName.OPENAI, ModelClass.SMALL)).toBe("gpt-4o-mini"); + expect(getModel(ModelProviderName.ANTHROPIC, ModelClass.LARGE)).toBe("claude-3-5-sonnet-20241022"); + expect(getModel(ModelProviderName.LLAMACLOUD, ModelClass.MEDIUM)).toBe("meta-llama-3.1-8b-instruct"); }); test("should handle environment variable overrides", () => { - expect( - getModel(ModelProviderName.OPENROUTER, ModelClass.SMALL) - ).toBe("mock-small-model"); - expect( - getModel(ModelProviderName.OPENROUTER, ModelClass.LARGE) - ).toBe("mock-large-model"); - expect( - getModel(ModelProviderName.ETERNALAI, ModelClass.SMALL) - ).toBe("mock-eternal-model"); + expect(getModel(ModelProviderName.OPENROUTER, ModelClass.SMALL)).toBe("mock-small-model"); + expect(getModel(ModelProviderName.OPENROUTER, ModelClass.LARGE)).toBe("mock-large-model"); + expect(getModel(ModelProviderName.ETERNALAI, ModelClass.SMALL)).toBe("mock-eternal-model"); }); test("should throw error for invalid model provider", () => { - expect(() => - getModel("INVALID_PROVIDER" as any, ModelClass.SMALL) - ).toThrow(); + expect(() => getModel("INVALID_PROVIDER" as any, ModelClass.SMALL)).toThrow(); }); }); describe("getEndpoint function", () => { test("should retrieve correct endpoints for different providers", () => { - expect(getEndpoint(ModelProviderName.OPENAI)).toBe( - "https://api.openai.com/v1" - ); - expect(getEndpoint(ModelProviderName.ANTHROPIC)).toBe( - "https://api.anthropic.com/v1" - ); - expect(getEndpoint(ModelProviderName.LLAMACLOUD)).toBe( - "https://api.llamacloud.com/v1" - ); - expect(getEndpoint(ModelProviderName.ETERNALAI)).toBe( - "https://mock.eternal.ai" - ); + expect(getEndpoint(ModelProviderName.OPENAI)).toBe("https://api.openai.com/v1"); + expect(getEndpoint(ModelProviderName.ANTHROPIC)).toBe("https://api.anthropic.com/v1"); + expect(getEndpoint(ModelProviderName.LLAMACLOUD)).toBe("https://api.llamacloud.com/v1"); + expect(getEndpoint(ModelProviderName.ETERNALAI)).toBe("https://mock.eternal.ai"); }); test("should throw error for invalid provider", () => { @@ -186,7 +138,7 @@ describe("Model Retrieval Functions", () => { describe("Model Settings Validation", () => { test("all providers should have required settings", () => { - Object.values(ModelProviderName).forEach((provider) => { + Object.values(ModelProviderName).forEach(provider => { const providerConfig = models[provider]; expect(providerConfig.settings).toBeDefined(); expect(providerConfig.settings.maxInputTokens).toBeGreaterThan(0); @@ -196,7 +148,7 @@ describe("Model Settings Validation", () => { }); test("all providers should have model mappings for basic model classes", () => { - Object.values(ModelProviderName).forEach((provider) => { + Object.values(ModelProviderName).forEach(provider => { const providerConfig = models[provider]; expect(providerConfig.model).toBeDefined(); expect(providerConfig.model[ModelClass.SMALL]).toBeDefined(); @@ -209,21 +161,13 @@ describe("Model Settings Validation", () => { describe("Environment Variable Integration", () => { test("should use environment variables for LlamaCloud models", () => { const llamaConfig = models[ModelProviderName.LLAMACLOUD]; - expect(llamaConfig.model[ModelClass.SMALL]).toBe( - "meta-llama/Llama-3.2-3B-Instruct-Turbo" - ); - expect(llamaConfig.model[ModelClass.LARGE]).toBe( - "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo" - ); + expect(llamaConfig.model[ModelClass.SMALL]).toBe("meta-llama/Llama-3.2-3B-Instruct-Turbo"); + expect(llamaConfig.model[ModelClass.LARGE]).toBe("meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo"); }); test("should use environment variables for Together models", () => { const togetherConfig = models[ModelProviderName.TOGETHER]; - expect(togetherConfig.model[ModelClass.SMALL]).toBe( - "meta-llama/Llama-3.2-3B-Instruct-Turbo" - ); - expect(togetherConfig.model[ModelClass.LARGE]).toBe( - "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo" - ); + expect(togetherConfig.model[ModelClass.SMALL]).toBe("meta-llama/Llama-3.2-3B-Instruct-Turbo"); + expect(togetherConfig.model[ModelClass.LARGE]).toBe("meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo"); }); }); diff --git a/packages/core/src/tests/parsing.test.ts b/packages/core/src/tests/parsing.test.ts index 1f436adaa2a..636f0b00aff 100644 --- a/packages/core/src/tests/parsing.test.ts +++ b/packages/core/src/tests/parsing.test.ts @@ -1,114 +1,94 @@ -import { describe, it, expect } from "vitest"; +import { describe, it, expect } from 'vitest'; import { parseShouldRespondFromText, parseBooleanFromText, parseJsonArrayFromText, parseJSONObjectFromText, -} from "../parsing"; +} from '../parsing'; -describe("Parsing Module", () => { - describe("parseShouldRespondFromText", () => { - it("should parse exact matches", () => { - expect(parseShouldRespondFromText("[RESPOND]")).toBe("RESPOND"); - expect(parseShouldRespondFromText("[IGNORE]")).toBe("IGNORE"); - expect(parseShouldRespondFromText("[STOP]")).toBe("STOP"); +describe('Parsing Module', () => { + describe('parseShouldRespondFromText', () => { + it('should parse exact matches', () => { + expect(parseShouldRespondFromText('[RESPOND]')).toBe('RESPOND'); + expect(parseShouldRespondFromText('[IGNORE]')).toBe('IGNORE'); + expect(parseShouldRespondFromText('[STOP]')).toBe('STOP'); }); - it("should handle case insensitive input", () => { - expect(parseShouldRespondFromText("[respond]")).toBe("RESPOND"); - expect(parseShouldRespondFromText("[ignore]")).toBe("IGNORE"); - expect(parseShouldRespondFromText("[stop]")).toBe("STOP"); + it('should handle case insensitive input', () => { + expect(parseShouldRespondFromText('[respond]')).toBe('RESPOND'); + expect(parseShouldRespondFromText('[ignore]')).toBe('IGNORE'); + expect(parseShouldRespondFromText('[stop]')).toBe('STOP'); }); - it("should handle text containing keywords", () => { - expect( - parseShouldRespondFromText("I think we should RESPOND here") - ).toBe("RESPOND"); - expect( - parseShouldRespondFromText("Better to IGNORE this one") - ).toBe("IGNORE"); - expect(parseShouldRespondFromText("We need to STOP now")).toBe( - "STOP" - ); + it('should handle text containing keywords', () => { + expect(parseShouldRespondFromText('I think we should RESPOND here')).toBe('RESPOND'); + expect(parseShouldRespondFromText('Better to IGNORE this one')).toBe('IGNORE'); + expect(parseShouldRespondFromText('We need to STOP now')).toBe('STOP'); }); - it("should return null for invalid input", () => { - expect(parseShouldRespondFromText("")).toBe(null); - expect(parseShouldRespondFromText("invalid")).toBe(null); - expect(parseShouldRespondFromText("[INVALID]")).toBe(null); + it('should return null for invalid input', () => { + expect(parseShouldRespondFromText('')).toBe(null); + expect(parseShouldRespondFromText('invalid')).toBe(null); + expect(parseShouldRespondFromText('[INVALID]')).toBe(null); }); }); - describe("parseBooleanFromText", () => { - it("should parse exact YES/NO matches", () => { - expect(parseBooleanFromText("YES")).toBe(true); - expect(parseBooleanFromText("NO")).toBe(false); + describe('parseBooleanFromText', () => { + it('should parse exact YES/NO matches', () => { + expect(parseBooleanFromText('YES')).toBe(true); + expect(parseBooleanFromText('NO')).toBe(false); }); - it("should handle case insensitive input", () => { - expect(parseBooleanFromText("yes")).toBe(true); - expect(parseBooleanFromText("no")).toBe(false); + it('should handle case insensitive input', () => { + expect(parseBooleanFromText('yes')).toBe(true); + expect(parseBooleanFromText('no')).toBe(false); }); - it("should return null for invalid input", () => { - expect(parseBooleanFromText("")).toBe(null); - expect(parseBooleanFromText("maybe")).toBe(null); - expect(parseBooleanFromText("YES NO")).toBe(null); + it('should return null for invalid input', () => { + expect(parseBooleanFromText('')).toBe(null); + expect(parseBooleanFromText('maybe')).toBe(null); + expect(parseBooleanFromText('YES NO')).toBe(null); }); }); - describe("parseJsonArrayFromText", () => { - it("should parse JSON array from code block", () => { + describe('parseJsonArrayFromText', () => { + it('should parse JSON array from code block', () => { const input = '```json\n["item1", "item2", "item3"]\n```'; - expect(parseJsonArrayFromText(input)).toEqual([ - "item1", - "item2", - "item3", - ]); + expect(parseJsonArrayFromText(input)).toEqual(['item1', 'item2', 'item3']); }); - it("should handle empty arrays", () => { - expect(parseJsonArrayFromText("```json\n[]\n```")).toEqual([]); - expect(parseJsonArrayFromText("[]")).toEqual(null); + it('should handle empty arrays', () => { + expect(parseJsonArrayFromText('```json\n[]\n```')).toEqual([]); + expect(parseJsonArrayFromText('[]')).toEqual(null); }); - it("should return null for invalid JSON", () => { - expect(parseJsonArrayFromText("invalid")).toBe(null); - expect(parseJsonArrayFromText("[invalid]")).toBe(null); - expect(parseJsonArrayFromText("```json\n[invalid]\n```")).toBe( - null - ); + it('should return null for invalid JSON', () => { + expect(parseJsonArrayFromText('invalid')).toBe(null); + expect(parseJsonArrayFromText('[invalid]')).toBe(null); + expect(parseJsonArrayFromText('```json\n[invalid]\n```')).toBe(null); }); }); - describe("parseJSONObjectFromText", () => { - it("should parse JSON object from code block", () => { + describe('parseJSONObjectFromText', () => { + it('should parse JSON object from code block', () => { const input = '```json\n{"key": "value", "number": 42}\n```'; - expect(parseJSONObjectFromText(input)).toEqual({ - key: "value", - number: 42, - }); + expect(parseJSONObjectFromText(input)).toEqual({ key: 'value', number: 42 }); }); - it("should parse JSON object without code block", () => { + it('should parse JSON object without code block', () => { const input = '{"key": "value", "number": 42}'; - expect(parseJSONObjectFromText(input)).toEqual({ - key: "value", - number: 42, - }); + expect(parseJSONObjectFromText(input)).toEqual({ key: 'value', number: 42 }); }); - it("should handle empty objects", () => { - expect(parseJSONObjectFromText("```json\n{}\n```")).toEqual({}); - expect(parseJSONObjectFromText("{}")).toEqual({}); + it('should handle empty objects', () => { + expect(parseJSONObjectFromText('```json\n{}\n```')).toEqual({}); + expect(parseJSONObjectFromText('{}')).toEqual({}); }); - it("should return null for invalid JSON", () => { - expect(parseJSONObjectFromText("invalid")).toBe(null); - expect(parseJSONObjectFromText("{invalid}")).toBe(null); - expect(parseJSONObjectFromText("```json\n{invalid}\n```")).toBe( - null - ); + it('should return null for invalid JSON', () => { + expect(parseJSONObjectFromText('invalid')).toBe(null); + expect(parseJSONObjectFromText('{invalid}')).toBe(null); + expect(parseJSONObjectFromText('```json\n{invalid}\n```')).toBe(null); }); }); }); diff --git a/packages/core/src/tests/runtime.test.ts b/packages/core/src/tests/runtime.test.ts index ef0a6a571e4..292de6670a0 100644 --- a/packages/core/src/tests/runtime.test.ts +++ b/packages/core/src/tests/runtime.test.ts @@ -47,7 +47,7 @@ const mockDatabaseAdapter: IDatabaseAdapter = { setParticipantUserState: vi.fn().mockResolvedValue(undefined), createRelationship: vi.fn().mockResolvedValue(true), getRelationship: vi.fn().mockResolvedValue(null), - getRelationships: vi.fn().mockResolvedValue([]), + getRelationships: vi.fn().mockResolvedValue([]) }; const mockCacheManager = { @@ -113,11 +113,7 @@ describe("AgentRuntime", () => { userId: "123e4567-e89b-12d3-a456-426614174005", agentId: "123e4567-e89b-12d3-a456-426614174005", roomId: "123e4567-e89b-12d3-a456-426614174003", - content: { - type: "text", - text: "test response", - action: "testAction", - }, + content: { type: "text", text: "test response", action: "testAction" }, }; await runtime.processActions(message, [response], { diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index d1dadddaf8e..3687ded5e01 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -243,7 +243,6 @@ export enum ModelProviderName { VENICE = "venice", AKASH_CHAT_API = "akash_chat_api", LIVEPEER = "livepeer", - INFERA = "infera", } /** diff --git a/packages/core/tsconfig.build.json b/packages/core/tsconfig.build.json index e3eb058310d..e56b5e155c1 100644 --- a/packages/core/tsconfig.build.json +++ b/packages/core/tsconfig.build.json @@ -1,8 +1,8 @@ { - "extends": "./tsconfig.json", - "compilerOptions": { - "sourceMap": true, - "inlineSources": true, - "sourceRoot": "/" - } -} + "extends": "./tsconfig.json", + "compilerOptions": { + "sourceMap": true, + "inlineSources": true, + "sourceRoot": "/" + } + } diff --git a/packages/core/tsconfig.json b/packages/core/tsconfig.json index cb33a265893..c19e06bb645 100644 --- a/packages/core/tsconfig.json +++ b/packages/core/tsconfig.json @@ -2,7 +2,10 @@ "compilerOptions": { "target": "ESNext", "module": "ESNext", - "lib": ["ESNext", "dom"], + "lib": [ + "ESNext", + "dom" + ], "moduleResolution": "Bundler", "outDir": "./dist", "rootDir": "./src", @@ -20,8 +23,17 @@ "noEmitOnError": false, "moduleDetection": "force", "allowArbitraryExtensions": true, - "customConditions": ["@elizaos/source"] + "customConditions": [ + "@elizaos/source" + ], }, - "include": ["src/**/*"], - "exclude": ["node_modules", "dist", "src/**/*.d.ts", "types/**/*.test.ts"] + "include": [ + "src/**/*" + ], + "exclude": [ + "node_modules", + "dist", + "src/**/*.d.ts", + "types/**/*.test.ts" + ] } diff --git a/packages/core/types.ts b/packages/core/types.ts deleted file mode 100644 index 8fb9e2814bd..00000000000 --- a/packages/core/types.ts +++ /dev/null @@ -1,1332 +0,0 @@ -import { Readable } from "stream"; - -/** - * Represents a UUID string in the format "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" - */ -export type UUID = `${string}-${string}-${string}-${string}-${string}`; - -/** - * Represents the content of a message or communication - */ -export interface Content { - /** The main text content */ - text: string; - - /** Optional action associated with the message */ - action?: string; - - /** Optional source/origin of the content */ - source?: string; - - /** URL of the original message/post (e.g. tweet URL, Discord message link) */ - url?: string; - - /** UUID of parent message if this is a reply/thread */ - inReplyTo?: UUID; - - /** Array of media attachments */ - attachments?: Media[]; - - /** Additional dynamic properties */ - [key: string]: unknown; -} - -/** - * Example content with associated user for demonstration purposes - */ -export interface ActionExample { - /** User associated with the example */ - user: string; - - /** Content of the example */ - content: Content; -} - -/** - * Example conversation content with user ID - */ -export interface ConversationExample { - /** UUID of user in conversation */ - userId: UUID; - - /** Content of the conversation */ - content: Content; -} - -/** - * Represents an actor/participant in a conversation - */ -export interface Actor { - /** Display name */ - name: string; - - /** Username/handle */ - username: string; - - /** Additional profile details */ - details: { - /** Short profile tagline */ - tagline: string; - - /** Longer profile summary */ - summary: string; - - /** Favorite quote */ - quote: string; - }; - - /** Unique identifier */ - id: UUID; -} - -/** - * Represents a single objective within a goal - */ -export interface Objective { - /** Optional unique identifier */ - id?: string; - - /** Description of what needs to be achieved */ - description: string; - - /** Whether objective is completed */ - completed: boolean; -} - -/** - * Status enum for goals - */ -export enum GoalStatus { - DONE = "DONE", - FAILED = "FAILED", - IN_PROGRESS = "IN_PROGRESS", -} - -/** - * Represents a high-level goal composed of objectives - */ -export interface Goal { - /** Optional unique identifier */ - id?: UUID; - - /** Room ID where goal exists */ - roomId: UUID; - - /** User ID of goal owner */ - userId: UUID; - - /** Name/title of the goal */ - name: string; - - /** Current status */ - status: GoalStatus; - - /** Component objectives */ - objectives: Objective[]; -} - -/** - * Model size/type classification - */ -export enum ModelClass { - SMALL = "small", - MEDIUM = "medium", - LARGE = "large", - EMBEDDING = "embedding", - IMAGE = "image", -} - -/** - * Configuration for an AI model - */ -export type Model = { - /** Optional API endpoint */ - endpoint?: string; - - /** Model settings */ - settings: { - /** Maximum input tokens */ - maxInputTokens: number; - - /** Maximum output tokens */ - maxOutputTokens: number; - - /** Optional frequency penalty */ - frequency_penalty?: number; - - /** Optional presence penalty */ - presence_penalty?: number; - - /** Optional repetition penalty */ - repetition_penalty?: number; - - /** Stop sequences */ - stop: string[]; - - /** Temperature setting */ - temperature: number; - - /** Optional telemetry configuration (experimental) */ - experimental_telemetry?: TelemetrySettings; - }; - - /** Optional image generation settings */ - imageSettings?: { - steps?: number; - }; - - /** Model names by size class */ - model: { - [ModelClass.SMALL]: string; - [ModelClass.MEDIUM]: string; - [ModelClass.LARGE]: string; - [ModelClass.EMBEDDING]?: string; - [ModelClass.IMAGE]?: string; - }; -}; - -/** - * Model configurations by provider - */ -export type Models = { - [ModelProviderName.OPENAI]: Model; - [ModelProviderName.ETERNALAI]: Model; - [ModelProviderName.ANTHROPIC]: Model; - [ModelProviderName.GROK]: Model; - [ModelProviderName.GROQ]: Model; - [ModelProviderName.LLAMACLOUD]: Model; - [ModelProviderName.TOGETHER]: Model; - [ModelProviderName.LLAMALOCAL]: Model; - [ModelProviderName.GOOGLE]: Model; - [ModelProviderName.CLAUDE_VERTEX]: Model; - [ModelProviderName.REDPILL]: Model; - [ModelProviderName.OPENROUTER]: Model; - [ModelProviderName.OLLAMA]: Model; - [ModelProviderName.HEURIST]: Model; - [ModelProviderName.GALADRIEL]: Model; - [ModelProviderName.FAL]: Model; - [ModelProviderName.GAIANET]: Model; - [ModelProviderName.ALI_BAILIAN]: Model; - [ModelProviderName.VOLENGINE]: Model; - [ModelProviderName.NANOGPT]: Model; - [ModelProviderName.HYPERBOLIC]: Model; - [ModelProviderName.VENICE]: Model; - [ModelProviderName.AKASH_CHAT_API]: Model; - [ModelProviderName.LIVEPEER]: Model; - [ModelProviderName.INFERA]: Model; -}; - -/** - * Available model providers - */ -export enum ModelProviderName { - OPENAI = "openai", - ETERNALAI = "eternalai", - ANTHROPIC = "anthropic", - GROK = "grok", - GROQ = "groq", - LLAMACLOUD = "llama_cloud", - TOGETHER = "together", - LLAMALOCAL = "llama_local", - GOOGLE = "google", - CLAUDE_VERTEX = "claude_vertex", - REDPILL = "redpill", - OPENROUTER = "openrouter", - OLLAMA = "ollama", - HEURIST = "heurist", - GALADRIEL = "galadriel", - FAL = "falai", - GAIANET = "gaianet", - ALI_BAILIAN = "ali_bailian", - VOLENGINE = "volengine", - NANOGPT = "nanogpt", - HYPERBOLIC = "hyperbolic", - VENICE = "venice", - AKASH_CHAT_API = "akash_chat_api", - LIVEPEER = "livepeer", - INFERA = "infera", -} - -/** - * Represents the current state/context of a conversation - */ -export interface State { - /** ID of user who sent current message */ - userId?: UUID; - - /** ID of agent in conversation */ - agentId?: UUID; - - /** Agent's biography */ - bio: string; - - /** Agent's background lore */ - lore: string; - - /** Message handling directions */ - messageDirections: string; - - /** Post handling directions */ - postDirections: string; - - /** Current room/conversation ID */ - roomId: UUID; - - /** Optional agent name */ - agentName?: string; - - /** Optional message sender name */ - senderName?: string; - - /** String representation of conversation actors */ - actors: string; - - /** Optional array of actor objects */ - actorsData?: Actor[]; - - /** Optional string representation of goals */ - goals?: string; - - /** Optional array of goal objects */ - goalsData?: Goal[]; - - /** Recent message history as string */ - recentMessages: string; - - /** Recent message objects */ - recentMessagesData: Memory[]; - - /** Optional valid action names */ - actionNames?: string; - - /** Optional action descriptions */ - actions?: string; - - /** Optional action objects */ - actionsData?: Action[]; - - /** Optional action examples */ - actionExamples?: string; - - /** Optional provider descriptions */ - providers?: string; - - /** Optional response content */ - responseData?: Content; - - /** Optional recent interaction objects */ - recentInteractionsData?: Memory[]; - - /** Optional recent interactions string */ - recentInteractions?: string; - - /** Optional formatted conversation */ - formattedConversation?: string; - - /** Optional formatted knowledge */ - knowledge?: string; - /** Optional knowledge data */ - knowledgeData?: KnowledgeItem[]; - - /** Additional dynamic properties */ - [key: string]: unknown; -} - -/** - * Represents a stored memory/message - */ -export interface Memory { - /** Optional unique identifier */ - id?: UUID; - - /** Associated user ID */ - userId: UUID; - - /** Associated agent ID */ - agentId: UUID; - - /** Optional creation timestamp */ - createdAt?: number; - - /** Memory content */ - content: Content; - - /** Optional embedding vector */ - embedding?: number[]; - - /** Associated room ID */ - roomId: UUID; - - /** Whether memory is unique */ - unique?: boolean; - - /** Embedding similarity score */ - similarity?: number; -} - -/** - * Example message for demonstration - */ -export interface MessageExample { - /** Associated user */ - user: string; - - /** Message content */ - content: Content; -} - -/** - * Handler function type for processing messages - */ -export type Handler = ( - runtime: IAgentRuntime, - message: Memory, - state?: State, - options?: { [key: string]: unknown }, - callback?: HandlerCallback -) => Promise; - -/** - * Callback function type for handlers - */ -export type HandlerCallback = ( - response: Content, - files?: any -) => Promise; - -/** - * Validator function type for actions/evaluators - */ -export type Validator = ( - runtime: IAgentRuntime, - message: Memory, - state?: State -) => Promise; - -/** - * Represents an action the agent can perform - */ -export interface Action { - /** Similar action descriptions */ - similes: string[]; - - /** Detailed description */ - description: string; - - /** Example usages */ - examples: ActionExample[][]; - - /** Handler function */ - handler: Handler; - - /** Action name */ - name: string; - - /** Validation function */ - validate: Validator; - - /** Whether to suppress the initial message when this action is used */ - suppressInitialMessage?: boolean; -} - -/** - * Example for evaluating agent behavior - */ -export interface EvaluationExample { - /** Evaluation context */ - context: string; - - /** Example messages */ - messages: Array; - - /** Expected outcome */ - outcome: string; -} - -/** - * Evaluator for assessing agent responses - */ -export interface Evaluator { - /** Whether to always run */ - alwaysRun?: boolean; - - /** Detailed description */ - description: string; - - /** Similar evaluator descriptions */ - similes: string[]; - - /** Example evaluations */ - examples: EvaluationExample[]; - - /** Handler function */ - handler: Handler; - - /** Evaluator name */ - name: string; - - /** Validation function */ - validate: Validator; -} - -/** - * Provider for external data/services - */ -export interface Provider { - /** Data retrieval function */ - get: ( - runtime: IAgentRuntime, - message: Memory, - state?: State - ) => Promise; -} - -/** - * Represents a relationship between users - */ -export interface Relationship { - /** Unique identifier */ - id: UUID; - - /** First user ID */ - userA: UUID; - - /** Second user ID */ - userB: UUID; - - /** Primary user ID */ - userId: UUID; - - /** Associated room ID */ - roomId: UUID; - - /** Relationship status */ - status: string; - - /** Optional creation timestamp */ - createdAt?: string; -} - -/** - * Represents a user account - */ -export interface Account { - /** Unique identifier */ - id: UUID; - - /** Display name */ - name: string; - - /** Username */ - username: string; - - /** Optional additional details */ - details?: { [key: string]: any }; - - /** Optional email */ - email?: string; - - /** Optional avatar URL */ - avatarUrl?: string; -} - -/** - * Room participant with account details - */ -export interface Participant { - /** Unique identifier */ - id: UUID; - - /** Associated account */ - account: Account; -} - -/** - * Represents a conversation room - */ -export interface Room { - /** Unique identifier */ - id: UUID; - - /** Room participants */ - participants: Participant[]; -} - -/** - * Represents a media attachment - */ -export type Media = { - /** Unique identifier */ - id: string; - - /** Media URL */ - url: string; - - /** Media title */ - title: string; - - /** Media source */ - source: string; - - /** Media description */ - description: string; - - /** Text content */ - text: string; - - /** Content type */ - contentType?: string; -}; - -/** - * Client interface for platform connections - */ -export type Client = { - /** Start client connection */ - start: (runtime: IAgentRuntime) => Promise; - - /** Stop client connection */ - stop: (runtime: IAgentRuntime) => Promise; -}; - -/** - * Plugin for extending agent functionality - */ -export type Plugin = { - /** Plugin name */ - name: string; - - /** Plugin description */ - description: string; - - /** Optional actions */ - actions?: Action[]; - - /** Optional providers */ - providers?: Provider[]; - - /** Optional evaluators */ - evaluators?: Evaluator[]; - - /** Optional services */ - services?: Service[]; - - /** Optional clients */ - clients?: Client[]; -}; - -/** - * Available client platforms - */ -export enum Clients { - DISCORD = "discord", - DIRECT = "direct", - TWITTER = "twitter", - TELEGRAM = "telegram", - FARCASTER = "farcaster", - LENS = "lens", - AUTO = "auto", - SLACK = "slack", -} - -export interface IAgentConfig { - [key: string]: string; -} - -export type TelemetrySettings = { - /** - * Enable or disable telemetry. Disabled by default while experimental. - */ - isEnabled?: boolean; - /** - * Enable or disable input recording. Enabled by default. - * - * You might want to disable input recording to avoid recording sensitive - * information, to reduce data transfers, or to increase performance. - */ - recordInputs?: boolean; - /** - * Enable or disable output recording. Enabled by default. - * - * You might want to disable output recording to avoid recording sensitive - * information, to reduce data transfers, or to increase performance. - */ - recordOutputs?: boolean; - /** - * Identifier for this function. Used to group telemetry data by function. - */ - functionId?: string; -}; - -export interface ModelConfiguration { - temperature?: number; - max_response_length?: number; - frequency_penalty?: number; - presence_penalty?: number; - maxInputTokens?: number; - experimental_telemetry?: TelemetrySettings; -} - -/** - * Configuration for an agent character - */ -export type Character = { - /** Optional unique identifier */ - id?: UUID; - - /** Character name */ - name: string; - - /** Optional username */ - username?: string; - - /** Optional system prompt */ - system?: string; - - /** Model provider to use */ - modelProvider: ModelProviderName; - - /** Image model provider to use, if different from modelProvider */ - imageModelProvider?: ModelProviderName; - - /** Image Vision model provider to use, if different from modelProvider */ - imageVisionModelProvider?: ModelProviderName; - - /** Optional model endpoint override */ - modelEndpointOverride?: string; - - /** Optional prompt templates */ - templates?: { - goalsTemplate?: string; - factsTemplate?: string; - messageHandlerTemplate?: string; - shouldRespondTemplate?: string; - continueMessageHandlerTemplate?: string; - evaluationTemplate?: string; - twitterSearchTemplate?: string; - twitterActionTemplate?: string; - twitterPostTemplate?: string; - twitterMessageHandlerTemplate?: string; - twitterShouldRespondTemplate?: string; - farcasterPostTemplate?: string; - lensPostTemplate?: string; - farcasterMessageHandlerTemplate?: string; - lensMessageHandlerTemplate?: string; - farcasterShouldRespondTemplate?: string; - lensShouldRespondTemplate?: string; - telegramMessageHandlerTemplate?: string; - telegramShouldRespondTemplate?: string; - discordVoiceHandlerTemplate?: string; - discordShouldRespondTemplate?: string; - discordMessageHandlerTemplate?: string; - slackMessageHandlerTemplate?: string; - slackShouldRespondTemplate?: string; - }; - - /** Character biography */ - bio: string | string[]; - - /** Character background lore */ - lore: string[]; - - /** Example messages */ - messageExamples: MessageExample[][]; - - /** Example posts */ - postExamples: string[]; - - /** Known topics */ - topics: string[]; - - /** Character traits */ - adjectives: string[]; - - /** Optional knowledge base */ - knowledge?: string[]; - - /** Supported client platforms */ - clients: Clients[]; - - /** Available plugins */ - plugins: Plugin[]; - - /** Optional configuration */ - settings?: { - secrets?: { [key: string]: string }; - intiface?: boolean; - imageSettings?: { - steps?: number; - width?: number; - height?: number; - negativePrompt?: string; - numIterations?: number; - guidanceScale?: number; - seed?: number; - modelId?: string; - jobId?: string; - count?: number; - stylePreset?: string; - hideWatermark?: boolean; - }; - voice?: { - model?: string; // For VITS - url?: string; // Legacy VITS support - elevenlabs?: { - // New structured ElevenLabs config - voiceId: string; - model?: string; - stability?: string; - similarityBoost?: string; - style?: string; - useSpeakerBoost?: string; - }; - }; - model?: string; - modelConfig?: ModelConfiguration; - embeddingModel?: string; - chains?: { - evm?: any[]; - solana?: any[]; - [key: string]: any[]; - }; - transcription?: TranscriptionProvider; - }; - - /** Optional client-specific config */ - clientConfig?: { - discord?: { - shouldIgnoreBotMessages?: boolean; - shouldIgnoreDirectMessages?: boolean; - shouldRespondOnlyToMentions?: boolean; - messageSimilarityThreshold?: number; - isPartOfTeam?: boolean; - teamAgentIds?: string[]; - teamLeaderId?: string; - teamMemberInterestKeywords?: string[]; - }; - telegram?: { - shouldIgnoreBotMessages?: boolean; - shouldIgnoreDirectMessages?: boolean; - shouldRespondOnlyToMentions?: boolean; - shouldOnlyJoinInAllowedGroups?: boolean; - allowedGroupIds?: string[]; - messageSimilarityThreshold?: number; - isPartOfTeam?: boolean; - teamAgentIds?: string[]; - teamLeaderId?: string; - teamMemberInterestKeywords?: string[]; - }; - slack?: { - shouldIgnoreBotMessages?: boolean; - shouldIgnoreDirectMessages?: boolean; - }; - gitbook?: { - keywords?: { - projectTerms?: string[]; - generalQueries?: string[]; - }; - documentTriggers?: string[]; - }; - }; - - /** Writing style guides */ - style: { - all: string[]; - chat: string[]; - post: string[]; - }; - - /** Optional Twitter profile */ - twitterProfile?: { - id: string; - username: string; - screenName: string; - bio: string; - nicknames?: string[]; - }; - /** Optional NFT prompt */ - nft?: { - prompt: string; - }; -}; - -/** - * Interface for database operations - */ -export interface IDatabaseAdapter { - /** Database instance */ - db: any; - - /** Optional initialization */ - init(): Promise; - - /** Close database connection */ - close(): Promise; - - /** Get account by ID */ - getAccountById(userId: UUID): Promise; - - /** Create new account */ - createAccount(account: Account): Promise; - - /** Get memories matching criteria */ - getMemories(params: { - roomId: UUID; - count?: number; - unique?: boolean; - tableName: string; - agentId: UUID; - start?: number; - end?: number; - }): Promise; - - getMemoryById(id: UUID): Promise; - - getMemoriesByRoomIds(params: { - tableName: string; - agentId: UUID; - roomIds: UUID[]; - }): Promise; - - getCachedEmbeddings(params: { - query_table_name: string; - query_threshold: number; - query_input: string; - query_field_name: string; - query_field_sub_name: string; - query_match_count: number; - }): Promise<{ embedding: number[]; levenshtein_score: number }[]>; - - log(params: { - body: { [key: string]: unknown }; - userId: UUID; - roomId: UUID; - type: string; - }): Promise; - - getActorDetails(params: { roomId: UUID }): Promise; - - searchMemories(params: { - tableName: string; - agentId: UUID; - roomId: UUID; - embedding: number[]; - match_threshold: number; - match_count: number; - unique: boolean; - }): Promise; - - updateGoalStatus(params: { - goalId: UUID; - status: GoalStatus; - }): Promise; - - searchMemoriesByEmbedding( - embedding: number[], - params: { - match_threshold?: number; - count?: number; - roomId?: UUID; - agentId?: UUID; - unique?: boolean; - tableName: string; - } - ): Promise; - - createMemory( - memory: Memory, - tableName: string, - unique?: boolean - ): Promise; - - removeMemory(memoryId: UUID, tableName: string): Promise; - - removeAllMemories(roomId: UUID, tableName: string): Promise; - - countMemories( - roomId: UUID, - unique?: boolean, - tableName?: string - ): Promise; - - getGoals(params: { - agentId: UUID; - roomId: UUID; - userId?: UUID | null; - onlyInProgress?: boolean; - count?: number; - }): Promise; - - updateGoal(goal: Goal): Promise; - - createGoal(goal: Goal): Promise; - - removeGoal(goalId: UUID): Promise; - - removeAllGoals(roomId: UUID): Promise; - - getRoom(roomId: UUID): Promise; - - createRoom(roomId?: UUID): Promise; - - removeRoom(roomId: UUID): Promise; - - getRoomsForParticipant(userId: UUID): Promise; - - getRoomsForParticipants(userIds: UUID[]): Promise; - - addParticipant(userId: UUID, roomId: UUID): Promise; - - removeParticipant(userId: UUID, roomId: UUID): Promise; - - getParticipantsForAccount(userId: UUID): Promise; - - getParticipantsForRoom(roomId: UUID): Promise; - - getParticipantUserState( - roomId: UUID, - userId: UUID - ): Promise<"FOLLOWED" | "MUTED" | null>; - - setParticipantUserState( - roomId: UUID, - userId: UUID, - state: "FOLLOWED" | "MUTED" | null - ): Promise; - - createRelationship(params: { userA: UUID; userB: UUID }): Promise; - - getRelationship(params: { - userA: UUID; - userB: UUID; - }): Promise; - - getRelationships(params: { userId: UUID }): Promise; -} - -export interface IDatabaseCacheAdapter { - getCache(params: { - agentId: UUID; - key: string; - }): Promise; - - setCache(params: { - agentId: UUID; - key: string; - value: string; - }): Promise; - - deleteCache(params: { agentId: UUID; key: string }): Promise; -} - -export interface IMemoryManager { - runtime: IAgentRuntime; - tableName: string; - constructor: Function; - - addEmbeddingToMemory(memory: Memory): Promise; - - getMemories(opts: { - roomId: UUID; - count?: number; - unique?: boolean; - start?: number; - end?: number; - }): Promise; - - getCachedEmbeddings( - content: string - ): Promise<{ embedding: number[]; levenshtein_score: number }[]>; - - getMemoryById(id: UUID): Promise; - getMemoriesByRoomIds(params: { roomIds: UUID[] }): Promise; - searchMemoriesByEmbedding( - embedding: number[], - opts: { - match_threshold?: number; - count?: number; - roomId: UUID; - unique?: boolean; - } - ): Promise; - - createMemory(memory: Memory, unique?: boolean): Promise; - - removeMemory(memoryId: UUID): Promise; - - removeAllMemories(roomId: UUID): Promise; - - countMemories(roomId: UUID, unique?: boolean): Promise; -} - -export type CacheOptions = { - expires?: number; -}; - -export enum CacheStore { - REDIS = "redis", - DATABASE = "database", - FILESYSTEM = "filesystem", -} - -export interface ICacheManager { - get(key: string): Promise; - set(key: string, value: T, options?: CacheOptions): Promise; - delete(key: string): Promise; -} - -export abstract class Service { - private static instance: Service | null = null; - - static get serviceType(): ServiceType { - throw new Error("Service must implement static serviceType getter"); - } - - public static getInstance(): T { - if (!Service.instance) { - Service.instance = new (this as any)(); - } - return Service.instance as T; - } - - get serviceType(): ServiceType { - return (this.constructor as typeof Service).serviceType; - } - - // Add abstract initialize method that must be implemented by derived classes - abstract initialize(runtime: IAgentRuntime): Promise; -} - -export interface IAgentRuntime { - // Properties - agentId: UUID; - serverUrl: string; - databaseAdapter: IDatabaseAdapter; - token: string | null; - modelProvider: ModelProviderName; - imageModelProvider: ModelProviderName; - imageVisionModelProvider: ModelProviderName; - character: Character; - providers: Provider[]; - actions: Action[]; - evaluators: Evaluator[]; - plugins: Plugin[]; - - fetch?: typeof fetch | null; - - messageManager: IMemoryManager; - descriptionManager: IMemoryManager; - documentsManager: IMemoryManager; - knowledgeManager: IMemoryManager; - loreManager: IMemoryManager; - - cacheManager: ICacheManager; - - services: Map; - // any could be EventEmitter - // but I think the real solution is forthcoming as a base client interface - clients: Record; - - initialize(): Promise; - - registerMemoryManager(manager: IMemoryManager): void; - - getMemoryManager(name: string): IMemoryManager | null; - - getService(service: ServiceType): T | null; - - registerService(service: Service): void; - - getSetting(key: string): string | null; - - // Methods - getConversationLength(): number; - - processActions( - message: Memory, - responses: Memory[], - state?: State, - callback?: HandlerCallback - ): Promise; - - evaluate( - message: Memory, - state?: State, - didRespond?: boolean, - callback?: HandlerCallback - ): Promise; - - ensureParticipantExists(userId: UUID, roomId: UUID): Promise; - - ensureUserExists( - userId: UUID, - userName: string | null, - name: string | null, - source: string | null - ): Promise; - - registerAction(action: Action): void; - - ensureConnection( - userId: UUID, - roomId: UUID, - userName?: string, - userScreenName?: string, - source?: string - ): Promise; - - ensureParticipantInRoom(userId: UUID, roomId: UUID): Promise; - - ensureRoomExists(roomId: UUID): Promise; - - composeState( - message: Memory, - additionalKeys?: { [key: string]: unknown } - ): Promise; - - updateRecentMessageState(state: State): Promise; -} - -export interface IImageDescriptionService extends Service { - describeImage( - imageUrl: string - ): Promise<{ title: string; description: string }>; -} - -export interface ITranscriptionService extends Service { - transcribeAttachment(audioBuffer: ArrayBuffer): Promise; - transcribeAttachmentLocally( - audioBuffer: ArrayBuffer - ): Promise; - transcribe(audioBuffer: ArrayBuffer): Promise; - transcribeLocally(audioBuffer: ArrayBuffer): Promise; -} - -export interface IVideoService extends Service { - isVideoUrl(url: string): boolean; - fetchVideoInfo(url: string): Promise; - downloadVideo(videoInfo: Media): Promise; - processVideo(url: string, runtime: IAgentRuntime): Promise; -} - -export interface ITextGenerationService extends Service { - initializeModel(): Promise; - queueMessageCompletion( - context: string, - temperature: number, - stop: string[], - frequency_penalty: number, - presence_penalty: number, - max_tokens: number - ): Promise; - queueTextCompletion( - context: string, - temperature: number, - stop: string[], - frequency_penalty: number, - presence_penalty: number, - max_tokens: number - ): Promise; - getEmbeddingResponse(input: string): Promise; -} - -export interface IBrowserService extends Service { - closeBrowser(): Promise; - getPageContent( - url: string, - runtime: IAgentRuntime - ): Promise<{ title: string; description: string; bodyContent: string }>; -} - -export interface ISpeechService extends Service { - getInstance(): ISpeechService; - generate(runtime: IAgentRuntime, text: string): Promise; -} - -export interface IPdfService extends Service { - getInstance(): IPdfService; - convertPdfToText(pdfBuffer: Buffer): Promise; -} - -export interface IAwsS3Service extends Service { - uploadFile( - imagePath: string, - subDirectory: string, - useSignedUrl: boolean, - expiresIn: number - ): Promise<{ - success: boolean; - url?: string; - error?: string; - }>; - generateSignedUrl(fileName: string, expiresIn: number): Promise; -} - -export type SearchImage = { - url: string; - description?: string; -}; - -export type SearchResult = { - title: string; - url: string; - content: string; - rawContent?: string; - score: number; - publishedDate?: string; -}; - -export type SearchResponse = { - answer?: string; - query: string; - responseTime: number; - images: SearchImage[]; - results: SearchResult[]; -}; - -export enum ServiceType { - IMAGE_DESCRIPTION = "image_description", - TRANSCRIPTION = "transcription", - VIDEO = "video", - TEXT_GENERATION = "text_generation", - BROWSER = "browser", - SPEECH_GENERATION = "speech_generation", - PDF = "pdf", - INTIFACE = "intiface", - AWS_S3 = "aws_s3", - BUTTPLUG = "buttplug", - SLACK = "slack", -} - -export enum LoggingLevel { - DEBUG = "debug", - VERBOSE = "verbose", - NONE = "none", -} - -export type KnowledgeItem = { - id: UUID; - content: Content; -}; - -export interface ActionResponse { - like: boolean; - retweet: boolean; - quote?: boolean; - reply?: boolean; -} - -export interface ISlackService extends Service { - client: any; -} - -export enum TokenizerType { - Auto = "auto", - TikToken = "tiktoken", -} - -export enum TranscriptionProvider { - OpenAI = "openai", - Deepgram = "deepgram", - Local = "local", -} From 4ca602ce7aa9bebd318f84a0c85f6df0d42dd6cc Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 20:03:20 +0800 Subject: [PATCH 06/19] added prune before installing in integrationTests.yaml file --- .github/workflows/integrationTests.yaml | 87 +++++++++++++------------ 1 file changed, 47 insertions(+), 40 deletions(-) diff --git a/.github/workflows/integrationTests.yaml b/.github/workflows/integrationTests.yaml index ef8bd46d220..dfe5e7e3914 100644 --- a/.github/workflows/integrationTests.yaml +++ b/.github/workflows/integrationTests.yaml @@ -1,44 +1,51 @@ name: Integration Tests + on: - push: - branches: - - "*" - pull_request_target: - branches: - - "*" + push: + branches: + - "*" + pull_request_target: + branches: + - "*" jobs: - integration-tests: - runs-on: ubuntu-latest - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - steps: - - uses: actions/checkout@v4 - - - uses: pnpm/action-setup@v3 - with: - version: 9.4.0 - - - uses: actions/setup-node@v4 - with: - node-version: "23" - cache: "pnpm" - - - name: Clean up - run: pnpm clean - - - name: Install dependencies - run: pnpm install -r --no-frozen-lockfile - - - name: Build packages - run: pnpm build - - - name: Check for API key - run: | - if [ -z "$OPENAI_API_KEY" ]; then - echo "Error: OPENAI_API_KEY is not set." - exit 1 - fi - - - name: Run integration tests - run: pnpm run integrationTests + integration-tests: + runs-on: ubuntu-latest + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up PNPM + uses: pnpm/action-setup@v3 + with: + version: 9.4.0 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "23" + cache: "pnpm" + + - name: Clean up previous builds and caches + run: | + pnpm store prune + pnpm clean + rm -rf node_modules .pnpm-store packages/*/node_modules # Removes all node_modules directories + + - name: Install dependencies with frozen lockfile + run: pnpm install --frozen-lockfile -r + + - name: Build packages + run: pnpm build + + - name: Check for API key + run: | + if [ -z "$OPENAI_API_KEY" ]; then + echo "Error: OPENAI_API_KEY is not set." + exit 1 + fi + + - name: Run integration tests + run: pnpm run integrationTests \ No newline at end of file From 4a352936bc32db8c9c447adbe2e36f52b61d2f68 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 20:31:40 +0800 Subject: [PATCH 07/19] modify the package.json of elizaos/core added require ./dist/index.js and remove @elizaos/source: ./src/index.ts --- packages/core/package.json | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/packages/core/package.json b/packages/core/package.json index 3a1b74388fe..c8e41585d59 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -9,11 +9,10 @@ "exports": { "./package.json": "./package.json", ".": { - "import": { - "@elizaos/source": "./src/index.ts", - "types": "./dist/index.d.ts", - "default": "./dist/index.js" - } + "import": "./dist/index.js", + "require": "./dist/index.js", + "types": "./dist/index.d.ts", + "default": "./dist/index.js" } }, "files": [ From d8dec47f44462d816b23f7de16386061d0824034 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 20:35:09 +0800 Subject: [PATCH 08/19] use node version 20 for integration test yaml --- .github/workflows/integrationTests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integrationTests.yaml b/.github/workflows/integrationTests.yaml index dfe5e7e3914..671b5eab7d4 100644 --- a/.github/workflows/integrationTests.yaml +++ b/.github/workflows/integrationTests.yaml @@ -25,7 +25,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: "23" + node-version: "20" cache: "pnpm" - name: Clean up previous builds and caches From ee05362dba3d9ecb71209a64945e02416ccfed64 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 20:37:52 +0800 Subject: [PATCH 09/19] use node version 23.5.0 for integration test yaml --- .github/workflows/integrationTests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integrationTests.yaml b/.github/workflows/integrationTests.yaml index 671b5eab7d4..e848ed0be4e 100644 --- a/.github/workflows/integrationTests.yaml +++ b/.github/workflows/integrationTests.yaml @@ -25,7 +25,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: "20" + node-version: "23.5.0" cache: "pnpm" - name: Clean up previous builds and caches From 535c75572f1855fcf98bc0c6e33a29d68e71d6b5 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 20:54:11 +0800 Subject: [PATCH 10/19] modify exports package.json of core and plugin-node --- packages/core/package.json | 2 +- packages/plugin-node/package.json | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/core/package.json b/packages/core/package.json index c8e41585d59..dc9c28cba1c 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -9,9 +9,9 @@ "exports": { "./package.json": "./package.json", ".": { + "types": "./dist/index.d.ts", "import": "./dist/index.js", "require": "./dist/index.js", - "types": "./dist/index.d.ts", "default": "./dist/index.js" } }, diff --git a/packages/plugin-node/package.json b/packages/plugin-node/package.json index e6381e210c5..b1770ba5b5c 100644 --- a/packages/plugin-node/package.json +++ b/packages/plugin-node/package.json @@ -9,8 +9,9 @@ "./package.json": "./package.json", ".": { "import": { - "@elizaos/source": "./src/index.ts", "types": "./dist/index.d.ts", + "import": "./dist/index.js", + "require": "./dist/index.js", "default": "./dist/index.js" } } From 7da65a859463e4beb831c150277df0c87246c2a4 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 21:10:02 +0800 Subject: [PATCH 11/19] added pnpm build command before post install in plugin node --- .github/workflows/integrationTests.yaml | 2 +- packages/core/package.json | 9 +++++---- packages/plugin-node/package.json | 5 ++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/integrationTests.yaml b/.github/workflows/integrationTests.yaml index e848ed0be4e..41594dde60b 100644 --- a/.github/workflows/integrationTests.yaml +++ b/.github/workflows/integrationTests.yaml @@ -35,7 +35,7 @@ jobs: rm -rf node_modules .pnpm-store packages/*/node_modules # Removes all node_modules directories - name: Install dependencies with frozen lockfile - run: pnpm install --frozen-lockfile -r + run: pnpm install -r --no-frozen-lockfile - name: Build packages run: pnpm build diff --git a/packages/core/package.json b/packages/core/package.json index dc9c28cba1c..3a1b74388fe 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -9,10 +9,11 @@ "exports": { "./package.json": "./package.json", ".": { - "types": "./dist/index.d.ts", - "import": "./dist/index.js", - "require": "./dist/index.js", - "default": "./dist/index.js" + "import": { + "@elizaos/source": "./src/index.ts", + "types": "./dist/index.d.ts", + "default": "./dist/index.js" + } } }, "files": [ diff --git a/packages/plugin-node/package.json b/packages/plugin-node/package.json index b1770ba5b5c..b70ed79c99c 100644 --- a/packages/plugin-node/package.json +++ b/packages/plugin-node/package.json @@ -9,9 +9,8 @@ "./package.json": "./package.json", ".": { "import": { + "@elizaos/source": "./src/index.ts", "types": "./dist/index.d.ts", - "import": "./dist/index.js", - "require": "./dist/index.js", "default": "./dist/index.js" } } @@ -87,7 +86,7 @@ "build": "tsup --format esm --dts", "dev": "tsup --format esm --dts --watch", "lint": "eslint --fix --cache .", - "postinstall": "node scripts/postinstall.js" + "postinstall": "pnpm run build && node scripts/postinstall.js" }, "peerDependencies": { "onnxruntime-node": "1.20.1", From 9d7d15f1993f43d577d8c17ef499e65b304de90b Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 21:20:45 +0800 Subject: [PATCH 12/19] use plugin-node from main --- packages/plugin-node/README.md | 163 +++++++----------- packages/plugin-node/package.json | 3 +- packages/plugin-node/scripts/postinstall.js | 3 +- packages/plugin-node/src/services/awsS3.ts | 3 +- packages/plugin-node/src/services/browser.ts | 17 +- packages/plugin-node/src/services/image.ts | 11 +- packages/plugin-node/src/services/speech.ts | 16 +- .../plugin-node/src/services/transcription.ts | 46 +++-- packages/plugin-node/src/services/video.ts | 67 ++++--- packages/plugin-node/tsconfig.json | 11 +- pnpm-lock.yaml | 9 + 11 files changed, 155 insertions(+), 194 deletions(-) diff --git a/packages/plugin-node/README.md b/packages/plugin-node/README.md index 99a5f098508..a9951681292 100644 --- a/packages/plugin-node/README.md +++ b/packages/plugin-node/README.md @@ -6,6 +6,7 @@ Core Node.js plugin for Eliza OS that provides essential services and actions fo The Node plugin serves as a foundational component of Eliza OS, bridging core Node.js capabilities with the Eliza ecosystem. It provides crucial services for file operations, media processing, speech synthesis, and cloud integrations, enabling both local and cloud-based functionality for Eliza agents. + ## Features - **AWS S3 Integration**: File upload and management with AWS S3 @@ -28,13 +29,11 @@ npm install @elizaos/plugin-node The plugin requires various environment variables depending on which services you plan to use: ### Core Settings - ```env OPENAI_API_KEY=your_openai_api_key ``` ### Voice Settings (Optional) - ```env ELEVENLABS_XI_API_KEY=your_elevenlabs_api_key ELEVENLABS_MODEL_ID=eleven_monolingual_v1 @@ -47,7 +46,6 @@ VITS_VOICE=en_US-hfc_female-medium ``` ### AWS Settings (Optional) - ```env AWS_ACCESS_KEY_ID=your_aws_access_key AWS_SECRET_ACCESS_KEY=your_aws_secret_key @@ -71,79 +69,65 @@ elizaOS.registerPlugin(nodePlugin); ## Services ### AwsS3Service - Handles file uploads and management with AWS S3. ### BrowserService - Provides web scraping and content extraction capabilities using Playwright. ### ImageDescriptionService - Processes and analyzes images to generate descriptions. ### LlamaService - Provides local LLM capabilities using LLaMA models. ### PdfService - Extracts and processes text content from PDF files. ### SpeechService - Handles text-to-speech conversion using ElevenLabs and VITS. ### TranscriptionService - Converts speech to text using various providers. ### VideoService - Processes video content, including YouTube video downloads and transcription. ## Actions ### describeImage - Analyzes and generates descriptions for images. ```typescript // Example usage const result = await runtime.executeAction("DESCRIBE_IMAGE", { - imageUrl: "path/to/image.jpg", + imageUrl: "path/to/image.jpg" }); ``` ## Dependencies The plugin requires several peer dependencies: - - `onnxruntime-node`: 1.20.1 - `whatwg-url`: 7.1.0 And trusted dependencies: - - `onnxruntime-node`: 1.20.1 - `sharp`: 0.33.5 ## Safety & Security ### File Operations - - **Path Sanitization**: All file paths are sanitized to prevent directory traversal attacks - **File Size Limits**: Enforced limits on upload sizes - **Type Checking**: Strict file type validation - **Temporary File Cleanup**: Automatic cleanup of temporary files ### API Keys & Credentials - - **Environment Isolation**: Sensitive credentials are isolated in environment variables - **Access Scoping**: Services are initialized with minimum required permissions - **Key Rotation**: Support for credential rotation without service interruption ### Media Processing - - **Resource Limits**: Memory and CPU usage limits for media processing - **Timeout Controls**: Automatic termination of long-running processes - **Format Validation**: Strict media format validation before processing @@ -153,31 +137,25 @@ And trusted dependencies: ### Common Issues 1. **Service Initialization Failures** - ```bash Error: Service initialization failed ``` - - Verify environment variables are properly set - Check service dependencies are installed - Ensure sufficient system permissions 2. **Media Processing Errors** - ```bash Error: Failed to process media file ``` - - Verify file format is supported - Check available system memory - Ensure ffmpeg is properly installed 3. **AWS S3 Connection Issues** - ```bash Error: AWS credentials not configured ``` - - Verify AWS credentials are set - Check S3 bucket permissions - Ensure correct region configuration @@ -185,9 +163,8 @@ Error: AWS credentials not configured ### Debug Mode Enable debug logging for detailed troubleshooting: - ```typescript -process.env.DEBUG = "eliza:plugin-node:*"; +process.env.DEBUG = 'eliza:plugin-node:*'; ``` ### System Requirements @@ -200,105 +177,95 @@ process.env.DEBUG = "eliza:plugin-node:*"; ### Performance Optimization 1. **Cache Management** - - - Regular cleanup of `content_cache` directory - - Implement cache size limits - - Monitor disk usage + - Regular cleanup of `content_cache` directory + - Implement cache size limits + - Monitor disk usage 2. **Memory Usage** - - - Configure max buffer sizes - - Implement streaming for large files - - Monitor memory consumption + - Configure max buffer sizes + - Implement streaming for large files + - Monitor memory consumption 3. **Concurrent Operations** - - Adjust queue size limits - - Configure worker threads - - Monitor process pool + - Adjust queue size limits + - Configure worker threads + - Monitor process pool ## Support For issues and feature requests, please: - 1. Check the troubleshooting guide above 2. Review existing GitHub issues 3. Submit a new issue with: - - System information - - Error logs - - Steps to reproduce + - System information + - Error logs + - Steps to reproduce ## Future Enhancements 1. **File Operations** - - - Enhanced streaming capabilities - - Advanced compression options - - Batch file processing - - File type detection - - Metadata management - - Version control integration + - Enhanced streaming capabilities + - Advanced compression options + - Batch file processing + - File type detection + - Metadata management + - Version control integration 2. **Media Processing** - - - Additional video formats - - Advanced image processing - - Audio enhancement tools - - Real-time processing - - Quality optimization - - Format conversion + - Additional video formats + - Advanced image processing + - Audio enhancement tools + - Real-time processing + - Quality optimization + - Format conversion 3. **Cloud Integration** - - - Multi-cloud support - - Advanced caching - - CDN optimization - - Auto-scaling features - - Cost optimization - - Backup automation + - Multi-cloud support + - Advanced caching + - CDN optimization + - Auto-scaling features + - Cost optimization + - Backup automation 4. **Speech Services** - - - Additional voice models - - Language expansion - - Emotion detection - - Voice cloning - - Real-time synthesis - - Custom voice training + - Additional voice models + - Language expansion + - Emotion detection + - Voice cloning + - Real-time synthesis + - Custom voice training 5. **Browser Automation** - - - Headless optimization - - Parallel processing - - Session management - - Cookie handling - - Proxy support - - Resource optimization + - Headless optimization + - Parallel processing + - Session management + - Cookie handling + - Proxy support + - Resource optimization 6. **Security Features** - - - Enhanced encryption - - Access control - - Audit logging - - Threat detection - - Rate limiting - - Compliance tools + - Enhanced encryption + - Access control + - Audit logging + - Threat detection + - Rate limiting + - Compliance tools 7. **Performance Optimization** - - - Memory management - - CPU utilization - - Concurrent operations - - Resource pooling - - Cache strategies - - Load balancing + - Memory management + - CPU utilization + - Concurrent operations + - Resource pooling + - Cache strategies + - Load balancing 8. **Developer Tools** - - Enhanced debugging - - Testing framework - - Documentation generator - - CLI improvements - - Monitoring tools - - Integration templates + - Enhanced debugging + - Testing framework + - Documentation generator + - CLI improvements + - Monitoring tools + - Integration templates We welcome community feedback and contributions to help prioritize these enhancements. @@ -322,16 +289,14 @@ This plugin integrates with and builds upon several key technologies: - [Sharp](https://sharp.pixelplumbing.com/) - Image processing Special thanks to: - - The Node.js community and all the open-source contributors who make these integrations possible. - The Eliza community for their contributions and feedback. For more information about Node.js capabilities: - - [Node.js Documentation](https://nodejs.org/en/docs/) - [Node.js Developer Portal](https://nodejs.org/en/about/) - [Node.js GitHub Repository](https://github.com/nodejs/node) ## License -This plugin is part of the Eliza project. See the main project repository for license information. +This plugin is part of the Eliza project. See the main project repository for license information. \ No newline at end of file diff --git a/packages/plugin-node/package.json b/packages/plugin-node/package.json index b70ed79c99c..24a0520641a 100644 --- a/packages/plugin-node/package.json +++ b/packages/plugin-node/package.json @@ -34,6 +34,7 @@ "@opendocsg/pdf2md": "0.1.32", "@types/uuid": "10.0.0", "alawmulaw": "6.0.0", + "bignumber": "1.1.0", "bignumber.js": "9.1.2", "capsolver-npm": "2.0.2", "cldr-segmentation": "2.2.1", @@ -86,7 +87,7 @@ "build": "tsup --format esm --dts", "dev": "tsup --format esm --dts --watch", "lint": "eslint --fix --cache .", - "postinstall": "pnpm run build && node scripts/postinstall.js" + "postinstall": "node scripts/postinstall.js" }, "peerDependencies": { "onnxruntime-node": "1.20.1", diff --git a/packages/plugin-node/scripts/postinstall.js b/packages/plugin-node/scripts/postinstall.js index 3bf7fb03e58..1aee6170fe1 100644 --- a/packages/plugin-node/scripts/postinstall.js +++ b/packages/plugin-node/scripts/postinstall.js @@ -1,12 +1,11 @@ import os from "os"; -import { elizaLogger } from "@elizaos/core"; const platform = os.platform(); if ( platform === "linux" && !(os.release().includes("ubuntu") || os.release().includes("debian")) ) { - elizaLogger.log( + console.log( "Skipping playwright installation on unsupported platform:", platform ); diff --git a/packages/plugin-node/src/services/awsS3.ts b/packages/plugin-node/src/services/awsS3.ts index 0c038c5ef5d..1f94286696a 100644 --- a/packages/plugin-node/src/services/awsS3.ts +++ b/packages/plugin-node/src/services/awsS3.ts @@ -3,7 +3,6 @@ import { IAwsS3Service, Service, ServiceType, - elizaLogger, } from "@elizaos/core"; import { GetObjectCommand, @@ -33,7 +32,7 @@ export class AwsS3Service extends Service implements IAwsS3Service { private runtime: IAgentRuntime | null = null; async initialize(runtime: IAgentRuntime): Promise { - elizaLogger.log("Initializing AwsS3Service"); + console.log("Initializing AwsS3Service"); this.runtime = runtime; this.fileUploadPath = runtime.getSetting("AWS_S3_UPLOAD_PATH") ?? ""; } diff --git a/packages/plugin-node/src/services/browser.ts b/packages/plugin-node/src/services/browser.ts index 4a482f295bd..863b3de4acc 100644 --- a/packages/plugin-node/src/services/browser.ts +++ b/packages/plugin-node/src/services/browser.ts @@ -7,7 +7,6 @@ import { stringToUuid } from "@elizaos/core"; import { PlaywrightBlocker } from "@cliqz/adblocker-playwright"; import CaptchaSolver from "capsolver-npm"; import { Browser, BrowserContext, chromium, Page } from "playwright"; -import { elizaLogger } from "@elizaos/core"; async function generateSummary( runtime: IAgentRuntime, @@ -170,7 +169,7 @@ export class BrowserService extends Service implements IBrowserService { try { if (!this.context) { - elizaLogger.log( + console.log( "Browser context not initialized. Call initializeBrowser() first." ); } @@ -190,7 +189,7 @@ export class BrowserService extends Service implements IBrowserService { const response = await page.goto(url, { waitUntil: "networkidle" }); if (!response) { - elizaLogger.error("Failed to load the page"); + console.log("Failed to load the page"); } if (response.status() === 403 || response.status() === 404) { @@ -217,7 +216,7 @@ export class BrowserService extends Service implements IBrowserService { }); return content; } catch (error) { - elizaLogger.error("Error:", error); + console.error("Error:", error); return { title: url, description: "Error, could not fetch content", @@ -277,7 +276,7 @@ export class BrowserService extends Service implements IBrowserService { }, solution.gRecaptchaResponse); } } catch (error) { - elizaLogger.error("Error solving CAPTCHA:", error); + console.error("Error solving CAPTCHA:", error); } } @@ -313,7 +312,7 @@ export class BrowserService extends Service implements IBrowserService { try { return await this.fetchPageContent(archiveUrl, runtime); } catch (error) { - elizaLogger.error("Error fetching from Internet Archive:", error); + console.error("Error fetching from Internet Archive:", error); } // Try Google Search as a last resort @@ -321,10 +320,8 @@ export class BrowserService extends Service implements IBrowserService { try { return await this.fetchPageContent(googleSearchUrl, runtime); } catch (error) { - elizaLogger.error("Error fetching from Google Search:", error); - elizaLogger.error( - "Failed to fetch content from alternative sources" - ); + console.error("Error fetching from Google Search:", error); + console.error("Failed to fetch content from alternative sources"); return { title: url, description: diff --git a/packages/plugin-node/src/services/image.ts b/packages/plugin-node/src/services/image.ts index 3ad31d52713..44d88f9e7df 100644 --- a/packages/plugin-node/src/services/image.ts +++ b/packages/plugin-node/src/services/image.ts @@ -187,12 +187,7 @@ export class ImageDescriptionService ): Promise { for (let attempt = 0; attempt < 3; attempt++) { try { - const shouldUseBase64 = - (isGif || isLocalFile) && - !( - this.runtime.imageModelProvider === - ModelProviderName.OPENAI - ); + const shouldUseBase64 = (isGif || isLocalFile)&& !(this.runtime.imageModelProvider === ModelProviderName.OPENAI); const mimeType = isGif ? "png" : path.extname(imageUrl).slice(1) || "jpeg"; @@ -214,8 +209,8 @@ export class ImageDescriptionService // If model provider is openai, use the endpoint, otherwise use the default openai endpoint. const endpoint = this.runtime.imageModelProvider === ModelProviderName.OPENAI - ? models[this.runtime.imageModelProvider].endpoint - : "https://api.openai.com/v1"; + ? models[this.runtime.imageModelProvider].endpoint + : "https://api.openai.com/v1"; const response = await fetch(endpoint + "/chat/completions", { method: "POST", headers: { diff --git a/packages/plugin-node/src/services/speech.ts b/packages/plugin-node/src/services/speech.ts index dcf568967ed..25176cc9e81 100644 --- a/packages/plugin-node/src/services/speech.ts +++ b/packages/plugin-node/src/services/speech.ts @@ -115,9 +115,7 @@ async function textToSpeech(runtime: IAgentRuntime, text: string) { status === 401 && errorBody.detail?.status === "quota_exceeded" ) { - elizaLogger.log( - "ElevenLabs quota exceeded, falling back to VITS" - ); + console.log("ElevenLabs quota exceeded, falling back to VITS"); throw new Error("QUOTA_EXCEEDED"); } @@ -179,12 +177,12 @@ async function textToSpeech(runtime: IAgentRuntime, text: string) { let wavStream: Readable; if (audio instanceof Buffer) { - elizaLogger.log("audio is a buffer"); + console.log("audio is a buffer"); wavStream = Readable.from(audio); } else if ("audioChannels" in audio && "sampleRate" in audio) { - elizaLogger.log("audio is a RawAudio"); + console.log("audio is a RawAudio"); const floatBuffer = Buffer.from(audio.audioChannels[0].buffer); - elizaLogger.log("buffer length: ", floatBuffer.length); + console.log("buffer length: ", floatBuffer.length); // Get the sample rate from the RawAudio object const sampleRate = audio.sampleRate; @@ -223,12 +221,12 @@ async function textToSpeech(runtime: IAgentRuntime, text: string) { async function processVitsAudio(audio: any): Promise { let wavStream: Readable; if (audio instanceof Buffer) { - elizaLogger.log("audio is a buffer"); + console.log("audio is a buffer"); wavStream = Readable.from(audio); } else if ("audioChannels" in audio && "sampleRate" in audio) { - elizaLogger.log("audio is a RawAudio"); + console.log("audio is a RawAudio"); const floatBuffer = Buffer.from(audio.audioChannels[0].buffer); - elizaLogger.log("buffer length: ", floatBuffer.length); + console.log("buffer length: ", floatBuffer.length); const sampleRate = audio.sampleRate; const floatArray = new Float32Array(floatBuffer.buffer); diff --git a/packages/plugin-node/src/services/transcription.ts b/packages/plugin-node/src/services/transcription.ts index d8e39ee2dae..daac3bf303f 100644 --- a/packages/plugin-node/src/services/transcription.ts +++ b/packages/plugin-node/src/services/transcription.ts @@ -80,30 +80,26 @@ export class TranscriptionService // 2) If not chosen from character, check .env if (!chosenProvider) { - const envProvider = this.runtime.getSetting( - "TRANSCRIPTION_PROVIDER" - ); + const envProvider = this.runtime.getSetting("TRANSCRIPTION_PROVIDER"); if (envProvider) { switch (envProvider.toLowerCase()) { case "deepgram": - { - const dgKey = - this.runtime.getSetting("DEEPGRAM_API_KEY"); - if (dgKey) { - this.deepgram = createClient(dgKey); - chosenProvider = TranscriptionProvider.Deepgram; - } + { + const dgKey = this.runtime.getSetting("DEEPGRAM_API_KEY"); + if (dgKey) { + this.deepgram = createClient(dgKey); + chosenProvider = TranscriptionProvider.Deepgram; } + } break; case "openai": - { - const openaiKey = - this.runtime.getSetting("OPENAI_API_KEY"); - if (openaiKey) { - this.openai = new OpenAI({ apiKey: openaiKey }); - chosenProvider = TranscriptionProvider.OpenAI; - } + { + const openaiKey = this.runtime.getSetting("OPENAI_API_KEY"); + if (openaiKey) { + this.openai = new OpenAI({ apiKey: openaiKey }); + chosenProvider = TranscriptionProvider.OpenAI; } + } break; case "local": chosenProvider = TranscriptionProvider.Local; @@ -171,34 +167,34 @@ export class TranscriptionService try { fs.accessSync("/usr/local/cuda/bin/nvcc", fs.constants.X_OK); this.isCudaAvailable = true; - elizaLogger.log( + console.log( "CUDA detected. Transcription will use CUDA acceleration." ); // eslint-disable-next-line } catch (_error) { - elizaLogger.log( + console.log( "CUDA not detected. Transcription will run on CPU." ); } } else if (platform === "win32") { const cudaPath = path.join( settings.CUDA_PATH || - "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0", + "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0", "bin", "nvcc.exe" ); if (fs.existsSync(cudaPath)) { this.isCudaAvailable = true; - elizaLogger.log( + console.log( "CUDA detected. Transcription will use CUDA acceleration." ); } else { - elizaLogger.log( + console.log( "CUDA not detected. Transcription will run on CPU." ); } } else { - elizaLogger.log( + console.log( "CUDA not supported on this platform. Transcription will run on CPU." ); } @@ -319,9 +315,7 @@ export class TranscriptionService * We'll keep transcribeUsingDefaultLogic() if needed by other code references, * but it’s no longer invoked in the new flow. */ - private async transcribeUsingDefaultLogic( - audioBuffer: ArrayBuffer - ): Promise { + private async transcribeUsingDefaultLogic(audioBuffer: ArrayBuffer): Promise { if (this.deepgram) { return await this.transcribeWithDeepgram(audioBuffer); } else if (this.openai) { diff --git a/packages/plugin-node/src/services/video.ts b/packages/plugin-node/src/services/video.ts index fc3190abea7..447aed67e02 100644 --- a/packages/plugin-node/src/services/video.ts +++ b/packages/plugin-node/src/services/video.ts @@ -6,7 +6,6 @@ import { Service, ServiceType, stringToUuid, - elizaLogger, } from "@elizaos/core"; import ffmpeg from "fluent-ffmpeg"; import fs from "fs"; @@ -64,7 +63,7 @@ export class VideoService extends Service implements IVideoService { }); return outputFile; } catch (error) { - elizaLogger.log("Error downloading media:", error); + console.error("Error downloading media:", error); throw new Error("Failed to download media"); } } @@ -87,7 +86,7 @@ export class VideoService extends Service implements IVideoService { }); return outputFile; } catch (error) { - elizaLogger.log("Error downloading video:", error); + console.error("Error downloading video:", error); throw new Error("Failed to download video"); } } @@ -149,14 +148,14 @@ export class VideoService extends Service implements IVideoService { const cached = await runtime.cacheManager.get(cacheKey); if (cached) { - elizaLogger.log("Returning cached video file"); + console.log("Returning cached video file"); return cached; } - elizaLogger.log("Cache miss, processing video"); - elizaLogger.log("Fetching video info"); + console.log("Cache miss, processing video"); + console.log("Fetching video info"); const videoInfo = await this.fetchVideoInfo(url); - elizaLogger.log("Getting transcript"); + console.log("Getting transcript"); const transcript = await this.getTranscript(url, videoInfo, runtime); const result: Media = { @@ -190,7 +189,7 @@ export class VideoService extends Service implements IVideoService { }; } } catch (error) { - elizaLogger.log("Error downloading MP4 file:", error); + console.error("Error downloading MP4 file:", error); // Fall back to using youtube-dl if direct download fails } } @@ -210,7 +209,7 @@ export class VideoService extends Service implements IVideoService { }); return result; } catch (error) { - elizaLogger.log("Error fetching video info:", error); + console.error("Error fetching video info:", error); throw new Error("Failed to fetch video information"); } } @@ -220,11 +219,11 @@ export class VideoService extends Service implements IVideoService { videoInfo: any, runtime: IAgentRuntime ): Promise { - elizaLogger.log("Getting transcript"); + console.log("Getting transcript"); try { // Check for manual subtitles if (videoInfo.subtitles && videoInfo.subtitles.en) { - elizaLogger.log("Manual subtitles found"); + console.log("Manual subtitles found"); const srtContent = await this.downloadSRT( videoInfo.subtitles.en[0].url ); @@ -236,7 +235,7 @@ export class VideoService extends Service implements IVideoService { videoInfo.automatic_captions && videoInfo.automatic_captions.en ) { - elizaLogger.log("Automatic captions found"); + console.log("Automatic captions found"); const captionUrl = videoInfo.automatic_captions.en[0].url; const captionContent = await this.downloadCaption(captionUrl); return this.parseCaption(captionContent); @@ -247,23 +246,23 @@ export class VideoService extends Service implements IVideoService { videoInfo.categories && videoInfo.categories.includes("Music") ) { - elizaLogger.log("Music video detected, no lyrics available"); + console.log("Music video detected, no lyrics available"); return "No lyrics available."; } // Fall back to audio transcription - elizaLogger.log( - "No subtitles or captions found, falling back to audio transcription" + console.log( + "No captions found, falling back to audio transcription" ); return this.transcribeAudio(url, runtime); } catch (error) { - elizaLogger.log("Error in getTranscript:", error); + console.error("Error in getTranscript:", error); throw error; } } private async downloadCaption(url: string): Promise { - elizaLogger.log("Downloading caption from:", url); + console.log("Downloading caption from:", url); const response = await fetch(url); if (!response.ok) { throw new Error( @@ -274,7 +273,7 @@ export class VideoService extends Service implements IVideoService { } private parseCaption(captionContent: string): string { - elizaLogger.log("Parsing caption"); + console.log("Parsing caption"); try { const jsonContent = JSON.parse(captionContent); if (jsonContent.events) { @@ -284,11 +283,11 @@ export class VideoService extends Service implements IVideoService { .join("") .replace("\n", " "); } else { - elizaLogger.log("Unexpected caption format:", jsonContent); + console.error("Unexpected caption format:", jsonContent); return "Error: Unable to parse captions"; } } catch (error) { - elizaLogger.log("Error parsing caption:", error); + console.error("Error parsing caption:", error); return "Error: Unable to parse captions"; } } @@ -302,7 +301,7 @@ export class VideoService extends Service implements IVideoService { } private async downloadSRT(url: string): Promise { - elizaLogger.log("downloadSRT"); + console.log("downloadSRT"); const response = await fetch(url); return await response.text(); } @@ -311,7 +310,7 @@ export class VideoService extends Service implements IVideoService { url: string, runtime: IAgentRuntime ): Promise { - elizaLogger.log("Preparing audio for transcription..."); + console.log("Preparing audio for transcription..."); const mp4FilePath = path.join( this.dataDir, `${this.getVideoId(url)}.mp4` @@ -324,20 +323,20 @@ export class VideoService extends Service implements IVideoService { if (!fs.existsSync(mp3FilePath)) { if (fs.existsSync(mp4FilePath)) { - elizaLogger.log("MP4 file found. Converting to MP3..."); + console.log("MP4 file found. Converting to MP3..."); await this.convertMp4ToMp3(mp4FilePath, mp3FilePath); } else { - elizaLogger.log("Downloading audio..."); + console.log("Downloading audio..."); await this.downloadAudio(url, mp3FilePath); } } - elizaLogger.log(`Audio prepared at ${mp3FilePath}`); + console.log(`Audio prepared at ${mp3FilePath}`); const audioBuffer = fs.readFileSync(mp3FilePath); - elizaLogger.log(`Audio file size: ${audioBuffer.length} bytes`); + console.log(`Audio file size: ${audioBuffer.length} bytes`); - elizaLogger.log("Starting transcription..."); + console.log("Starting transcription..."); const startTime = Date.now(); const transcriptionService = runtime.getService( ServiceType.TRANSCRIPTION @@ -350,7 +349,7 @@ export class VideoService extends Service implements IVideoService { const transcript = await transcriptionService.transcribe(audioBuffer); const endTime = Date.now(); - elizaLogger.log( + console.log( `Transcription completed in ${(endTime - startTime) / 1000} seconds` ); @@ -368,11 +367,11 @@ export class VideoService extends Service implements IVideoService { .noVideo() .audioCodec("libmp3lame") .on("end", () => { - elizaLogger.log("Conversion to MP3 complete"); + console.log("Conversion to MP3 complete"); resolve(); }) .on("error", (err) => { - elizaLogger.log("Error converting to MP3:", err); + console.error("Error converting to MP3:", err); reject(err); }) .run(); @@ -383,14 +382,14 @@ export class VideoService extends Service implements IVideoService { url: string, outputFile: string ): Promise { - elizaLogger.log("Downloading audio"); + console.log("Downloading audio"); outputFile = outputFile ?? path.join(this.dataDir, `${this.getVideoId(url)}.mp3`); try { if (url.endsWith(".mp4") || url.includes(".mp4?")) { - elizaLogger.log( + console.log( "Direct MP4 file detected, downloading and converting to MP3" ); const tempMp4File = path.join( @@ -417,7 +416,7 @@ export class VideoService extends Service implements IVideoService { .run(); }); } else { - elizaLogger.log( + console.log( "YouTube video detected, downloading audio with youtube-dl" ); await youtubeDl(url, { @@ -430,7 +429,7 @@ export class VideoService extends Service implements IVideoService { } return outputFile; } catch (error) { - elizaLogger.log("Error downloading audio:", error); + console.error("Error downloading audio:", error); throw new Error("Failed to download audio"); } } diff --git a/packages/plugin-node/tsconfig.json b/packages/plugin-node/tsconfig.json index d5059a358bb..2ef05a1844a 100644 --- a/packages/plugin-node/tsconfig.json +++ b/packages/plugin-node/tsconfig.json @@ -3,7 +3,12 @@ "compilerOptions": { "outDir": "dist", "rootDir": "src", - "types": ["node"] + "types": [ + "node" + ] }, - "include": ["src/**/*.ts", "src/**/*.d.ts"] -} + "include": [ + "src/**/*.ts", + "src/**/*.d.ts" + ] +} \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d47f9e4403f..95ca7617c18 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1548,6 +1548,9 @@ importers: alawmulaw: specifier: 6.0.0 version: 6.0.0 + bignumber: + specifier: 1.1.0 + version: 1.1.0 bignumber.js: specifier: 9.1.2 version: 9.1.2 @@ -9404,6 +9407,10 @@ packages: bignumber.js@9.1.2: resolution: {integrity: sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==} + bignumber@1.1.0: + resolution: {integrity: sha512-EGqHCKkEAwVwufcEOCYhZQqdVH+7cNCyPZ9yxisYvSjHFB+d9YcGMvorsFpeN5IJpC+lC6K+FHhu8+S4MgJazw==} + engines: {node: '>=0.4.0'} + bin-links@4.0.4: resolution: {integrity: sha512-cMtq4W5ZsEwcutJrVId+a/tjt8GSbS+h0oNkdl6+6rBuEv8Ot33Bevj5KPm40t309zuhVic8NjpuL42QCiJWWA==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -31031,6 +31038,8 @@ snapshots: bignumber.js@9.1.2: {} + bignumber@1.1.0: {} + bin-links@4.0.4: dependencies: cmd-shim: 6.0.3 From 04f1e3cfb37064b84cc5d9c692d6b6eea819ba80 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 22:00:46 +0800 Subject: [PATCH 13/19] test remove post install js call --- packages/plugin-node/package.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/plugin-node/package.json b/packages/plugin-node/package.json index 24a0520641a..f396739da37 100644 --- a/packages/plugin-node/package.json +++ b/packages/plugin-node/package.json @@ -86,8 +86,7 @@ "scripts": { "build": "tsup --format esm --dts", "dev": "tsup --format esm --dts --watch", - "lint": "eslint --fix --cache .", - "postinstall": "node scripts/postinstall.js" + "lint": "eslint --fix --cache ." }, "peerDependencies": { "onnxruntime-node": "1.20.1", From 350a2eca9d11edf07e1773713961eb2a57472448 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 22:08:28 +0800 Subject: [PATCH 14/19] increase version of plugin-node --- packages/core/package.json | 2 +- packages/plugin-node/package.json | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/core/package.json b/packages/core/package.json index 3a1b74388fe..d7369289147 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -35,7 +35,7 @@ "@eslint/js": "9.16.0", "@rollup/plugin-commonjs": "25.0.8", "@rollup/plugin-json": "6.1.0", - "@rollup/plugin-node-resolve": "15.3.0", + "@rollup/plugin-node-resolve": "16.0.0", "@rollup/plugin-replace": "5.0.7", "@rollup/plugin-terser": "0.1.0", "@rollup/plugin-typescript": "11.1.6", diff --git a/packages/plugin-node/package.json b/packages/plugin-node/package.json index f396739da37..24a0520641a 100644 --- a/packages/plugin-node/package.json +++ b/packages/plugin-node/package.json @@ -86,7 +86,8 @@ "scripts": { "build": "tsup --format esm --dts", "dev": "tsup --format esm --dts --watch", - "lint": "eslint --fix --cache ." + "lint": "eslint --fix --cache .", + "postinstall": "node scripts/postinstall.js" }, "peerDependencies": { "onnxruntime-node": "1.20.1", From b220182f9b9d136c3aaff9a57c24a121b9746237 Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 22:33:08 +0800 Subject: [PATCH 15/19] change node version integrationTest.yaml to 23.3.0 --- .github/workflows/integrationTests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integrationTests.yaml b/.github/workflows/integrationTests.yaml index 41594dde60b..9cb3ed73fbe 100644 --- a/.github/workflows/integrationTests.yaml +++ b/.github/workflows/integrationTests.yaml @@ -25,7 +25,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: "23.5.0" + node-version: "23.3.0" cache: "pnpm" - name: Clean up previous builds and caches From 8a2be86f628dea8c380f5bb905574b077d77618d Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 23:20:33 +0800 Subject: [PATCH 16/19] do some cleaning and rebuild --- .github/workflows/integrationTests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integrationTests.yaml b/.github/workflows/integrationTests.yaml index 9cb3ed73fbe..c24617fd910 100644 --- a/.github/workflows/integrationTests.yaml +++ b/.github/workflows/integrationTests.yaml @@ -25,7 +25,7 @@ jobs: - name: Set up Node.js uses: actions/setup-node@v4 with: - node-version: "23.3.0" + node-version: "23" cache: "pnpm" - name: Clean up previous builds and caches @@ -34,7 +34,7 @@ jobs: pnpm clean rm -rf node_modules .pnpm-store packages/*/node_modules # Removes all node_modules directories - - name: Install dependencies with frozen lockfile + - name: Install dependencies run: pnpm install -r --no-frozen-lockfile - name: Build packages From 15895d223bc56d7346fdab9ddb50b4dc1d5dac7e Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Sun, 5 Jan 2025 23:34:51 +0800 Subject: [PATCH 17/19] pmpm-lock changes --- pnpm-lock.yaml | 94 +++++++++++++++++++++++++++++--------------------- 1 file changed, 54 insertions(+), 40 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 95ca7617c18..67a4682c812 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -19922,7 +19922,7 @@ snapshots: '@acuminous/bitsyntax@0.1.2': dependencies: buffer-more-ints: 1.0.0 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 safe-buffer: 5.1.2 transitivePeerDependencies: - supports-color @@ -21863,7 +21863,7 @@ snapshots: dependencies: '@scure/bip32': 1.6.1 abitype: 1.0.8(typescript@5.6.3)(zod@3.23.8) - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 axios-mock-adapter: 1.22.0(axios@1.7.9) axios-retry: 4.5.0(axios@1.7.9) bip32: 4.0.0 @@ -23689,7 +23689,7 @@ snapshots: '@eslint/config-array@0.19.1': dependencies: '@eslint/object-schema': 2.1.5 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -23715,7 +23715,7 @@ snapshots: '@eslint/eslintrc@3.2.0': dependencies: ajv: 6.12.6 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 espree: 10.3.0 globals: 14.0.0 ignore: 5.3.2 @@ -24752,7 +24752,7 @@ snapshots: '@kwsites/file-exists@1.1.1': dependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -26725,7 +26725,7 @@ snapshots: '@pm2/pm2-version-check@1.0.4': dependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -29478,7 +29478,7 @@ snapshots: '@typescript-eslint/types': 8.16.0 '@typescript-eslint/typescript-estree': 8.16.0(typescript@5.6.3) '@typescript-eslint/visitor-keys': 8.16.0 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 eslint: 9.16.0(jiti@2.4.2) optionalDependencies: typescript: 5.6.3 @@ -29511,7 +29511,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 8.16.0(typescript@5.6.3) '@typescript-eslint/utils': 8.16.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.6.3) - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 eslint: 9.16.0(jiti@2.4.2) ts-api-utils: 1.4.3(typescript@5.6.3) optionalDependencies: @@ -29542,7 +29542,7 @@ snapshots: dependencies: '@typescript-eslint/types': 8.16.0 '@typescript-eslint/visitor-keys': 8.16.0 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 fast-glob: 3.3.2 is-glob: 4.0.3 minimatch: 9.0.5 @@ -30332,7 +30332,7 @@ snapshots: agent-base@6.0.2: dependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -30752,13 +30752,13 @@ snapshots: axios-mock-adapter@1.22.0(axios@1.7.9): dependencies: - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 fast-deep-equal: 3.1.3 is-buffer: 2.0.5 axios-retry@4.5.0(axios@1.7.9): dependencies: - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 is-retry-allowed: 2.2.0 axios@0.21.4: @@ -30769,7 +30769,7 @@ snapshots: axios@0.27.2: dependencies: - follow-redirects: 1.15.9(debug@4.4.0) + follow-redirects: 1.15.9 form-data: 4.0.1 transitivePeerDependencies: - debug @@ -30798,6 +30798,14 @@ snapshots: transitivePeerDependencies: - debug + axios@1.7.9: + dependencies: + follow-redirects: 1.15.9 + form-data: 4.0.1 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + axios@1.7.9(debug@4.4.0): dependencies: follow-redirects: 1.15.9(debug@4.4.0) @@ -31859,7 +31867,7 @@ snapshots: cmake-js@7.3.0: dependencies: axios: 1.7.9(debug@4.4.0) - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 fs-extra: 11.2.0 lodash.isplainobject: 4.0.6 memory-stream: 1.0.0 @@ -32809,7 +32817,7 @@ snapshots: debug-fabulous@2.0.2: dependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 memoizee: 0.4.17 transitivePeerDependencies: - supports-color @@ -32839,6 +32847,10 @@ snapshots: dependencies: ms: 2.1.3 + debug@4.4.0: + dependencies: + ms: 2.1.3 + debug@4.4.0(supports-color@5.5.0): dependencies: ms: 2.1.3 @@ -33768,7 +33780,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 escape-string-regexp: 4.0.0 eslint-scope: 8.2.0 eslint-visitor-keys: 4.2.0 @@ -34137,7 +34149,7 @@ snapshots: extract-zip@2.0.1: dependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 get-stream: 5.2.0 yauzl: 2.10.0 optionalDependencies: @@ -34354,13 +34366,15 @@ snapshots: async: 0.2.10 which: 1.3.1 + follow-redirects@1.15.9: {} + follow-redirects@1.15.9(debug@4.3.7): optionalDependencies: debug: 4.3.7 follow-redirects@1.15.9(debug@4.4.0): optionalDependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 fomo-sdk-solana@1.3.2(bufferutil@4.0.9)(encoding@0.1.13)(fastestsmallesttextencoderdecoder@1.0.22)(typescript@5.6.3)(utf-8-validate@5.0.10): dependencies: @@ -34705,7 +34719,7 @@ snapshots: dependencies: basic-ftp: 5.0.5 data-uri-to-buffer: 6.0.2 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -35449,7 +35463,7 @@ snapshots: http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -35498,21 +35512,21 @@ snapshots: https-proxy-agent@4.0.0: dependencies: agent-base: 5.1.1 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color https-proxy-agent@5.0.1: dependencies: agent-base: 6.0.2 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color https-proxy-agent@7.0.6: dependencies: agent-base: 7.1.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -38853,7 +38867,7 @@ snapshots: '@yarnpkg/lockfile': 1.1.0 '@yarnpkg/parsers': 3.0.0-rc.46 '@zkochan/js-yaml': 0.0.7 - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 chalk: 4.1.0 cli-cursor: 3.1.0 cli-spinners: 2.6.1 @@ -39268,7 +39282,7 @@ snapshots: dependencies: '@tootallnate/quickjs-emscripten': 0.23.0 agent-base: 7.1.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 get-uri: 6.0.4 http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 @@ -39633,7 +39647,7 @@ snapshots: pm2-axon-rpc@0.7.1: dependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -39641,7 +39655,7 @@ snapshots: dependencies: amp: 0.3.1 amp-message: 0.1.2 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 escape-string-regexp: 4.0.0 transitivePeerDependencies: - supports-color @@ -39658,7 +39672,7 @@ snapshots: pm2-sysmonit@1.2.8: dependencies: async: 3.2.6 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 pidusage: 2.0.21 systeminformation: 5.23.5 tx2: 1.0.5 @@ -39680,7 +39694,7 @@ snapshots: commander: 2.15.1 croner: 4.1.97 dayjs: 1.11.13 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 enquirer: 2.3.6 eventemitter2: 5.0.1 fclone: 1.0.11 @@ -40565,7 +40579,7 @@ snapshots: proxy-agent@6.3.1: dependencies: agent-base: 7.1.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 lru-cache: 7.18.3 @@ -40659,7 +40673,7 @@ snapshots: puppeteer-extra-plugin-capsolver@2.0.1(bufferutil@4.0.9)(encoding@0.1.13)(puppeteer-core@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(typescript@5.6.3)(utf-8-validate@5.0.10): dependencies: - axios: 1.7.9(debug@4.4.0) + axios: 1.7.9 capsolver-npm: 2.0.2 puppeteer: 19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10) puppeteer-extra: 3.3.6(puppeteer-core@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(puppeteer@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10)) @@ -40678,7 +40692,7 @@ snapshots: puppeteer-extra-plugin@3.2.3(puppeteer-extra@3.3.6(puppeteer-core@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(puppeteer@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))): dependencies: '@types/debug': 4.1.12 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 merge-deep: 3.0.3 optionalDependencies: puppeteer-extra: 3.3.6(puppeteer-core@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(puppeteer@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10)) @@ -40688,7 +40702,7 @@ snapshots: puppeteer-extra@3.3.6(puppeteer-core@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(puppeteer@19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10)): dependencies: '@types/debug': 4.1.12 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 deepmerge: 4.3.1 optionalDependencies: puppeteer: 19.11.1(bufferutil@4.0.9)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10) @@ -41320,7 +41334,7 @@ snapshots: require-in-the-middle@5.2.0: dependencies: - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 module-details-from-path: 1.0.3 resolve: 1.22.10 transitivePeerDependencies: @@ -41898,7 +41912,7 @@ snapshots: dependencies: '@kwsites/file-exists': 1.1.1 '@kwsites/promise-deferred': 1.1.1 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 transitivePeerDependencies: - supports-color @@ -41979,7 +41993,7 @@ snapshots: socks-proxy-agent@8.0.5: dependencies: agent-base: 7.1.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 socks: 2.8.3 transitivePeerDependencies: - supports-color @@ -42959,7 +42973,7 @@ snapshots: cac: 6.7.14 chokidar: 4.0.3 consola: 3.3.3 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 esbuild: 0.24.2 joycon: 3.1.1 picocolors: 1.1.1 @@ -42986,7 +43000,7 @@ snapshots: tuf-js@2.2.1: dependencies: '@tufjs/models': 2.0.1 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 make-fetch-happen: 13.0.1 transitivePeerDependencies: - supports-color @@ -43682,7 +43696,7 @@ snapshots: vite-node@2.1.5(@types/node@22.10.5)(terser@5.37.0): dependencies: cac: 6.7.14 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 es-module-lexer: 1.6.0 pathe: 1.1.2 vite: 5.4.11(@types/node@22.10.5)(terser@5.37.0) @@ -43795,7 +43809,7 @@ snapshots: '@vitest/spy': 2.1.5 '@vitest/utils': 2.1.5 chai: 5.1.2 - debug: 4.4.0(supports-color@8.1.1) + debug: 4.4.0 expect-type: 1.1.0 magic-string: 0.30.17 pathe: 1.1.2 From 74b656271bf77c8ecbc5baa817113ecbe4d8094d Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Mon, 6 Jan 2025 00:36:14 +0800 Subject: [PATCH 18/19] added todo in code --- packages/plugin-twilio/src/actions/sendSms.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/plugin-twilio/src/actions/sendSms.ts b/packages/plugin-twilio/src/actions/sendSms.ts index cae33f12558..37ecafbdd93 100644 --- a/packages/plugin-twilio/src/actions/sendSms.ts +++ b/packages/plugin-twilio/src/actions/sendSms.ts @@ -67,7 +67,7 @@ export const sendSmsAction: Action = { if (!twilioNumber) { console.error('Twilio phone number is missing'); - + // TODO: generate text to have a different message from AI Agent _callback({ text: `Sorry there was an issue send sms, please try again later`, }); @@ -92,7 +92,7 @@ export const sendSmsAction: Action = { if (!mobileNumberProvidedByUser) { console.error('Mobile number is missing'); - + // TODO: generate text to have a different message from AI Agent _callback({ text: `Sorry, there was an issue send sms, please try again later`, }); @@ -117,6 +117,7 @@ export const sendSmsAction: Action = { if(messageToSendFromUser==null){ console.error('messageToSendFromUser is empty or null'); + // TODO: generate text to have a different message from AI Agent _callback({ text: `Sorry there was an issue sending the WhatsApp message, please try again later`, }); From 30a250a7b8ac749f1168b19e0fee5cd5ef09238e Mon Sep 17 00:00:00 2001 From: Juan Carlos Claridad Date: Mon, 6 Jan 2025 09:17:48 +0800 Subject: [PATCH 19/19] fixed pnpm-lock file --- pnpm-lock.yaml | 134 +++++++++++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 65 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f1327f33c8f..9cae138245e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -893,8 +893,8 @@ importers: specifier: 6.1.0 version: 6.1.0(rollup@2.79.2) '@rollup/plugin-node-resolve': - specifier: 15.3.0 - version: 15.3.0(rollup@2.79.2) + specifier: 16.0.0 + version: 16.0.0(rollup@2.79.2) '@rollup/plugin-replace': specifier: 5.0.7 version: 5.0.7(rollup@2.79.2) @@ -1960,7 +1960,7 @@ importers: version: link:../core tsup: specifier: 8.3.5 - version: 8.3.5(@swc/core@1.10.4(@swc/helpers@0.5.15))(jiti@2.4.2)(postcss@8.4.49)(typescript@5.6.3)(yaml@2.7.0) + version: 8.3.5(@swc/core@1.10.1(@swc/helpers@0.5.15))(jiti@2.4.2)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.6.3)(yaml@2.6.1) twilio: specifier: ^5.4.0 version: 5.4.0 @@ -7194,6 +7194,15 @@ packages: rollup: optional: true + '@rollup/plugin-node-resolve@16.0.0': + resolution: {integrity: sha512-0FPvAeVUT/zdWoO0jnb/V5BlBsUSNfkIOtFHzMO4H9MOklrmQFY6FduVHKucNb/aTFxvnGhj4MNj/T1oNdDfNg==} + engines: {node: '>=14.0.0'} + peerDependencies: + rollup: ^2.78.0||^3.0.0||^4.0.0 + peerDependenciesMeta: + rollup: + optional: true + '@rollup/plugin-replace@5.0.7': resolution: {integrity: sha512-PqxSfuorkHz/SPpyngLyg5GCEkOcee9M1bkxiVDr41Pd61mqP1PLOoDPbpl44SB2mQGKwV/In74gqQmGITOhEQ==} engines: {node: '>=14.0.0'} @@ -19228,6 +19237,10 @@ packages: tweetnacl@1.0.3: resolution: {integrity: sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==} + twilio@5.4.0: + resolution: {integrity: sha512-kEmxzdOLTzXzUEXIkBVwT1Itxlbp+rtGrQogNfPtSE3EjoEsxrxB/9tdMIEbrsioL8CzTk/+fiKNJekAyHxjuQ==} + engines: {node: '>=14.0'} + twitter-api-v2@1.18.2: resolution: {integrity: sha512-ggImmoAeVgETYqrWeZy+nWnDpwgTP+IvFEc03Pitt1HcgMX+Yw17rP38Fb5FFTinuyNvS07EPtAfZ184uIyB0A==} @@ -20555,7 +20568,7 @@ snapshots: '@acuminous/bitsyntax@0.1.2': dependencies: buffer-more-ints: 1.0.0 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) safe-buffer: 5.1.2 transitivePeerDependencies: - supports-color @@ -22498,7 +22511,7 @@ snapshots: dependencies: '@scure/bip32': 1.6.0 abitype: 1.0.8(typescript@5.6.3)(zod@3.23.8) - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) axios-mock-adapter: 1.22.0(axios@1.7.9) axios-retry: 4.5.0(axios@1.7.9) bip32: 4.0.0 @@ -24514,7 +24527,7 @@ snapshots: '@eslint/config-array@0.19.1': dependencies: '@eslint/object-schema': 2.1.5 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -24540,7 +24553,7 @@ snapshots: '@eslint/eslintrc@3.2.0': dependencies: ajv: 6.12.6 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) espree: 10.3.0 globals: 14.0.0 ignore: 5.3.2 @@ -25592,7 +25605,7 @@ snapshots: '@kwsites/file-exists@1.1.1': dependencies: - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -27660,7 +27673,7 @@ snapshots: '@pm2/pm2-version-check@1.0.4': dependencies: - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -28488,25 +28501,25 @@ snapshots: optionalDependencies: rollup: 4.29.1 - '@rollup/plugin-node-resolve@15.3.0(rollup@2.79.2)': + '@rollup/plugin-node-resolve@15.3.0(rollup@3.29.5)': dependencies: - '@rollup/pluginutils': 5.1.4(rollup@2.79.2) + '@rollup/pluginutils': 5.1.4(rollup@3.29.5) '@types/resolve': 1.20.2 deepmerge: 4.3.1 is-module: 1.0.0 resolve: 1.22.10 optionalDependencies: - rollup: 2.79.2 + rollup: 3.29.5 - '@rollup/plugin-node-resolve@15.3.0(rollup@3.29.5)': + '@rollup/plugin-node-resolve@16.0.0(rollup@2.79.2)': dependencies: - '@rollup/pluginutils': 5.1.4(rollup@3.29.5) + '@rollup/pluginutils': 5.1.4(rollup@2.79.2) '@types/resolve': 1.20.2 deepmerge: 4.3.1 is-module: 1.0.0 resolve: 1.22.10 optionalDependencies: - rollup: 3.29.5 + rollup: 2.79.2 '@rollup/plugin-replace@5.0.7(rollup@2.79.2)': dependencies: @@ -30458,7 +30471,7 @@ snapshots: '@typescript-eslint/types': 8.16.0 '@typescript-eslint/typescript-estree': 8.16.0(typescript@5.6.3) '@typescript-eslint/visitor-keys': 8.16.0 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) eslint: 9.16.0(jiti@2.4.2) optionalDependencies: typescript: 5.6.3 @@ -30491,7 +30504,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 8.16.0(typescript@5.6.3) '@typescript-eslint/utils': 8.16.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.6.3) - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) eslint: 9.16.0(jiti@2.4.2) ts-api-utils: 1.4.3(typescript@5.6.3) optionalDependencies: @@ -31313,7 +31326,7 @@ snapshots: agent-base@6.0.2: dependencies: - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -31739,13 +31752,13 @@ snapshots: axios-mock-adapter@1.22.0(axios@1.7.9): dependencies: - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) fast-deep-equal: 3.1.3 is-buffer: 2.0.5 axios-retry@4.5.0(axios@1.7.9): dependencies: - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) is-retry-allowed: 2.2.0 axios@0.21.4: @@ -31756,7 +31769,7 @@ snapshots: axios@0.27.2: dependencies: - follow-redirects: 1.15.9 + follow-redirects: 1.15.9(debug@4.4.0) form-data: 4.0.1 transitivePeerDependencies: - debug @@ -31785,14 +31798,6 @@ snapshots: transitivePeerDependencies: - debug - axios@1.7.9: - dependencies: - follow-redirects: 1.15.9 - form-data: 4.0.1 - proxy-from-env: 1.1.0 - transitivePeerDependencies: - - debug - axios@1.7.9(debug@4.4.0): dependencies: follow-redirects: 1.15.9(debug@4.4.0) @@ -32893,7 +32898,7 @@ snapshots: cmake-js@7.3.0: dependencies: axios: 1.7.9(debug@4.4.0) - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) fs-extra: 11.2.0 lodash.isplainobject: 4.0.6 memory-stream: 1.0.0 @@ -33860,7 +33865,7 @@ snapshots: debug-fabulous@2.0.2: dependencies: - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) memoizee: 0.4.17 transitivePeerDependencies: - supports-color @@ -33890,10 +33895,6 @@ snapshots: dependencies: ms: 2.1.3 - debug@4.4.0: - dependencies: - ms: 2.1.3 - debug@4.4.0(supports-color@5.5.0): dependencies: ms: 2.1.3 @@ -34901,7 +34902,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) escape-string-regexp: 4.0.0 eslint-scope: 8.2.0 eslint-visitor-keys: 4.2.0 @@ -35507,15 +35508,13 @@ snapshots: async: 0.2.10 which: 1.3.1 - follow-redirects@1.15.9: {} - follow-redirects@1.15.9(debug@4.3.7): optionalDependencies: debug: 4.3.7 follow-redirects@1.15.9(debug@4.4.0): optionalDependencies: - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) fomo-sdk-solana@1.3.2(bufferutil@4.0.8)(encoding@0.1.13)(fastestsmallesttextencoderdecoder@1.0.22)(typescript@5.6.3)(utf-8-validate@5.0.10): dependencies: @@ -35864,7 +35863,7 @@ snapshots: dependencies: basic-ftp: 5.0.5 data-uri-to-buffer: 6.0.2 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -36614,7 +36613,7 @@ snapshots: http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -36665,21 +36664,21 @@ snapshots: https-proxy-agent@4.0.0: dependencies: agent-base: 5.1.1 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color https-proxy-agent@5.0.1: dependencies: agent-base: 6.0.2 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color https-proxy-agent@7.0.6: dependencies: agent-base: 7.1.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -40558,7 +40557,7 @@ snapshots: dependencies: '@tootallnate/quickjs-emscripten': 0.23.0 agent-base: 7.1.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) get-uri: 6.0.4 http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 @@ -40964,7 +40963,7 @@ snapshots: pm2-axon-rpc@0.7.1: dependencies: - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -40972,7 +40971,7 @@ snapshots: dependencies: amp: 0.3.1 amp-message: 0.1.2 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) escape-string-regexp: 4.0.0 transitivePeerDependencies: - supports-color @@ -40989,7 +40988,7 @@ snapshots: pm2-sysmonit@1.2.8: dependencies: async: 3.2.6 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) pidusage: 2.0.21 systeminformation: 5.23.5 tx2: 1.0.5 @@ -41011,7 +41010,7 @@ snapshots: commander: 2.15.1 croner: 4.1.97 dayjs: 1.11.13 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) enquirer: 2.3.6 eventemitter2: 5.0.1 fclone: 1.0.11 @@ -41904,7 +41903,7 @@ snapshots: proxy-agent@6.3.1: dependencies: agent-base: 7.1.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) http-proxy-agent: 7.0.2 https-proxy-agent: 7.0.6 lru-cache: 7.18.3 @@ -41998,7 +41997,7 @@ snapshots: puppeteer-extra-plugin-capsolver@2.0.1(bufferutil@4.0.8)(encoding@0.1.13)(puppeteer-core@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(typescript@5.6.3)(utf-8-validate@5.0.10): dependencies: - axios: 1.7.9 + axios: 1.7.9(debug@4.4.0) capsolver-npm: 2.0.2 puppeteer: 19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10) puppeteer-extra: 3.3.6(puppeteer-core@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(puppeteer@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10)) @@ -42017,7 +42016,7 @@ snapshots: puppeteer-extra-plugin@3.2.3(puppeteer-extra@3.3.6(puppeteer-core@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(puppeteer@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))): dependencies: '@types/debug': 4.1.12 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) merge-deep: 3.0.3 optionalDependencies: puppeteer-extra: 3.3.6(puppeteer-core@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(puppeteer@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10)) @@ -42027,7 +42026,7 @@ snapshots: puppeteer-extra@3.3.6(puppeteer-core@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10))(puppeteer@19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10)): dependencies: '@types/debug': 4.1.12 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) deepmerge: 4.3.1 optionalDependencies: puppeteer: 19.11.1(bufferutil@4.0.8)(encoding@0.1.13)(typescript@5.6.3)(utf-8-validate@5.0.10) @@ -42682,14 +42681,6 @@ snapshots: require-from-string@2.0.2: {} require-in-the-middle@5.2.0: - dependencies: - debug: 4.4.0 - module-details-from-path: 1.0.3 - resolve: 1.22.10 - transitivePeerDependencies: - - supports-color - - require-in-the-middle@7.4.0: dependencies: debug: 4.4.0(supports-color@8.1.1) module-details-from-path: 1.0.3 @@ -43269,7 +43260,7 @@ snapshots: dependencies: '@kwsites/file-exists': 1.1.1 '@kwsites/promise-deferred': 1.1.1 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -43363,7 +43354,7 @@ snapshots: socks-proxy-agent@8.0.5: dependencies: agent-base: 7.1.3 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) socks: 2.8.3 transitivePeerDependencies: - supports-color @@ -44437,7 +44428,7 @@ snapshots: tuf-js@2.2.1: dependencies: '@tufjs/models': 2.0.1 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) make-fetch-happen: 13.0.1 transitivePeerDependencies: - supports-color @@ -44483,6 +44474,19 @@ snapshots: tweetnacl@1.0.3: {} + twilio@5.4.0: + dependencies: + axios: 1.7.9(debug@4.4.0) + dayjs: 1.11.13 + https-proxy-agent: 5.0.1 + jsonwebtoken: 9.0.2 + qs: 6.13.1 + scmp: 2.1.0 + xmlbuilder: 13.0.2 + transitivePeerDependencies: + - debug + - supports-color + twitter-api-v2@1.18.2: {} tx2@1.0.5: @@ -45218,7 +45222,7 @@ snapshots: '@vitest/spy': 2.1.5 '@vitest/utils': 2.1.5 chai: 5.1.2 - debug: 4.4.0 + debug: 4.4.0(supports-color@8.1.1) expect-type: 1.1.0 magic-string: 0.30.17 pathe: 1.1.2