From caa09723acada7c4c5498aece54661cc43e43ce1 Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 15:02:59 -0500 Subject: [PATCH 01/10] llama_local, ollama and logger fixes/udpates --- package.json | 3 +- packages/core/src/embedding.ts | 6 +- packages/core/src/generation.ts | 11 +- packages/core/src/logger.ts | 45 ++- packages/core/src/runtime.ts | 24 ++ packages/plugin-node/src/services/image.ts | 12 +- packages/plugin-node/src/services/llama.ts | 322 ++++++++++++++++++--- pnpm-lock.yaml | 3 + 8 files changed, 360 insertions(+), 66 deletions(-) diff --git a/package.json b/package.json index 20bef6415f3..901cbf4ecf6 100644 --- a/package.json +++ b/package.json @@ -47,7 +47,8 @@ "dependencies": { "ollama-ai-provider": "^0.16.1", "optional": "^0.1.4", - "sharp": "^0.33.5" + "sharp": "^0.33.5", + "tslog": "^4.9.3" }, "packageManager": "pnpm@9.12.3+sha512.cce0f9de9c5a7c95bef944169cc5dfe8741abfb145078c0d508b868056848a87c81e626246cb60967cbd7fd29a6c062ef73ff840d96b3c86c40ac92cf4a813ee" } diff --git a/packages/core/src/embedding.ts b/packages/core/src/embedding.ts index 2d03d3f4bbc..b81229be6b5 100644 --- a/packages/core/src/embedding.ts +++ b/packages/core/src/embedding.ts @@ -86,8 +86,10 @@ export async function embed(runtime: IAgentRuntime, input: string) { // 3. Fallback to OpenAI embedding model const embeddingModel = settings.USE_OPENAI_EMBEDDING ? "text-embedding-3-small" - : modelProvider.model?.[ModelClass.EMBEDDING] || - models[ModelProviderName.OPENAI].model[ModelClass.EMBEDDING]; + : runtime.character.modelProvider === ModelProviderName.OLLAMA + ? settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large" + : modelProvider.model?.[ModelClass.EMBEDDING]|| + models[ModelProviderName.OPENAI].model[ModelClass.EMBEDDING];; if (!embeddingModel) { throw new Error("No embedding model configured"); diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index 3fe3174fe80..9a60e0877cf 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -62,7 +62,12 @@ export async function generateText({ return ""; } - elizaLogger.log("Genarating text..."); + elizaLogger.log("Generating text..."); + + elizaLogger.info("Generating text with options:", { + modelProvider: runtime.modelProvider, + model: modelClass, + }); const provider = runtime.modelProvider; const endpoint = @@ -84,6 +89,8 @@ export async function generateText({ model = runtime.getSetting("LLAMACLOUD_MODEL_SMALL"); } + elizaLogger.info("Selected model:", model); + const temperature = models[provider].settings.temperature; const frequency_penalty = models[provider].settings.frequency_penalty; const presence_penalty = models[provider].settings.presence_penalty; @@ -709,7 +716,7 @@ export async function generateMessageResponse({ let retryLength = 1000; // exponential backoff while (true) { try { - elizaLogger.log("Genarating message response.."); + elizaLogger.log("Generating message response.."); const response = await generateText({ runtime, diff --git a/packages/core/src/logger.ts b/packages/core/src/logger.ts index f8172d0b6ca..ae9b3a19852 100644 --- a/packages/core/src/logger.ts +++ b/packages/core/src/logger.ts @@ -1,4 +1,11 @@ -class ElizaLogger { +import settings from "./settings.ts"; +import { Logger, ILogObjMeta, ILogObj } from "tslog"; + +interface IElizaLogger extends Logger { + progress(message: string): void; +} + +class ElizaLogger implements IElizaLogger { constructor() { // Check if we're in Node.js environment this.isNode = @@ -7,7 +14,7 @@ class ElizaLogger { process.versions.node != null; // Set verbose based on environment - this.verbose = this.isNode ? process.env.verbose === "true" : false; + this.verbose = this.isNode ? settings.VERBOSE === "true" : false; } private isNode: boolean; @@ -173,6 +180,7 @@ class ElizaLogger { } } + // @ts-ignore - custom implementation log(...strings) { this.#logWithStyle(strings, { fg: "white", @@ -182,6 +190,7 @@ class ElizaLogger { }); } + // @ts-ignore - custom implementation warn(...strings) { this.#logWithStyle(strings, { fg: "yellow", @@ -191,6 +200,7 @@ class ElizaLogger { }); } + // @ts-ignore - custom implementation error(...strings) { this.#logWithStyle(strings, { fg: "red", @@ -200,6 +210,7 @@ class ElizaLogger { }); } + // @ts-ignore - custom implementation info(...strings) { this.#logWithStyle(strings, { fg: "blue", @@ -209,15 +220,7 @@ class ElizaLogger { }); } - success(...strings) { - this.#logWithStyle(strings, { - fg: "green", - bg: "", - icon: "\u2713", - groupTitle: ` ${this.successesTitle}`, - }); - } - + // @ts-ignore - custom implementation debug(...strings) { if (!this.verbose) return; this.#logWithStyle(strings, { @@ -228,6 +231,15 @@ class ElizaLogger { }); } + success(...strings) { + this.#logWithStyle(strings, { + fg: "green", + bg: "", + icon: "\u2713", + groupTitle: ` ${this.successesTitle}`, + }); + } + assert(...strings) { this.#logWithStyle(strings, { fg: "cyan", @@ -236,6 +248,17 @@ class ElizaLogger { groupTitle: ` ${this.assertsTitle}`, }); } + + progress(message: string) { + if (this.isNode) { + // Clear the current line and move cursor to beginning + process.stdout.clearLine(0); + process.stdout.cursorTo(0); + process.stdout.write(message); + } else { + console.log(message); + } + } } export const elizaLogger = new ElizaLogger(); diff --git a/packages/core/src/runtime.ts b/packages/core/src/runtime.ts index f973b321d6a..7623349a6c3 100644 --- a/packages/core/src/runtime.ts +++ b/packages/core/src/runtime.ts @@ -176,7 +176,9 @@ export class AgentRuntime implements IAgentRuntime { return; } + // Add the service to the services map this.services.set(serviceType, service); + elizaLogger.success(`Service ${serviceType} registered successfully`); } /** @@ -217,6 +219,12 @@ export class AgentRuntime implements IAgentRuntime { cacheManager: ICacheManager; logging?: boolean; }) { + elizaLogger.info("Initializing AgentRuntime with options:", { + character: opts.character?.name, + modelProvider: opts.modelProvider, + characterModelProvider: opts.character?.modelProvider + }); + this.#conversationLength = opts.conversationLength ?? this.#conversationLength; this.databaseAdapter = opts.databaseAdapter; @@ -280,10 +288,26 @@ export class AgentRuntime implements IAgentRuntime { }); this.serverUrl = opts.serverUrl ?? this.serverUrl; + + elizaLogger.info("Setting model provider..."); + elizaLogger.info("- Character model provider:", this.character.modelProvider); + elizaLogger.info("- Opts model provider:", opts.modelProvider); + elizaLogger.info("- Current model provider:", this.modelProvider); + this.modelProvider = this.character.modelProvider ?? opts.modelProvider ?? this.modelProvider; + + elizaLogger.info("Selected model provider:", this.modelProvider); + + // Validate model provider + if (!Object.values(ModelProviderName).includes(this.modelProvider)) { + elizaLogger.error("Invalid model provider:", this.modelProvider); + elizaLogger.error("Available providers:", Object.values(ModelProviderName)); + throw new Error(`Invalid model provider: ${this.modelProvider}`); + } + if (!this.serverUrl) { elizaLogger.warn("No serverUrl provided, defaulting to localhost"); } diff --git a/packages/plugin-node/src/services/image.ts b/packages/plugin-node/src/services/image.ts index aed37cd9275..530e20f0f6f 100644 --- a/packages/plugin-node/src/services/image.ts +++ b/packages/plugin-node/src/services/image.ts @@ -63,7 +63,7 @@ export class ImageDescriptionService env.backends.onnx.wasm.proxy = false; env.backends.onnx.wasm.numThreads = 1; - elizaLogger.log("Downloading Florence model..."); + elizaLogger.info("Downloading Florence model..."); this.model = await Florence2ForConditionalGeneration.from_pretrained( this.modelId, @@ -71,9 +71,9 @@ export class ImageDescriptionService device: "gpu", progress_callback: (progress) => { if (progress.status === "downloading") { - elizaLogger.log( - `Model download progress: ${JSON.stringify(progress)}` - ); + const percent = ((progress.loaded / progress.total) * 100).toFixed(1); + const dots = '.'.repeat(Math.floor(Number(percent) / 5)); + elizaLogger.info(`Downloading Florence model: [${dots.padEnd(20, ' ')}] ${percent}%`); } }, } @@ -81,10 +81,14 @@ export class ImageDescriptionService elizaLogger.success("Florence model downloaded successfully"); + elizaLogger.info("Downloading processor..."); this.processor = (await AutoProcessor.from_pretrained( this.modelId )) as Florence2Processor; + + elizaLogger.info("Downloading tokenizer..."); this.tokenizer = await AutoTokenizer.from_pretrained(this.modelId); + elizaLogger.success("Image service initialization complete"); } async describeImage( diff --git a/packages/plugin-node/src/services/llama.ts b/packages/plugin-node/src/services/llama.ts index 720972278f3..bd66f748504 100644 --- a/packages/plugin-node/src/services/llama.ts +++ b/packages/plugin-node/src/services/llama.ts @@ -181,16 +181,39 @@ export class LlamaService extends Service { this.modelPath = path.join(__dirname, modelName); } - async initialize(runtime: IAgentRuntime): Promise {} + async initialize(runtime: IAgentRuntime): Promise { + elizaLogger.info("Initializing LlamaService..."); + try { + // Check if we should use Ollama + if (runtime.modelProvider === ModelProviderName.OLLAMA) { + elizaLogger.info("Using Ollama provider"); + this.modelInitialized = true; + return; + } + + elizaLogger.info("Using local GGUF model"); + elizaLogger.info("Ensuring model is initialized..."); + await this.ensureInitialized(); + elizaLogger.success("LlamaService initialized successfully"); + } catch (error) { + elizaLogger.error("Failed to initialize LlamaService:", error); + // Re-throw with more context + throw new Error(`LlamaService initialization failed: ${error.message}`); + } + } private async ensureInitialized() { if (!this.modelInitialized) { + elizaLogger.info("Model not initialized, starting initialization..."); await this.initializeModel(); + } else { + elizaLogger.info("Model already initialized"); } } async initializeModel() { try { + elizaLogger.info("Checking model file..."); await this.checkModel(); const systemInfo = await si.graphics(); @@ -199,92 +222,102 @@ export class LlamaService extends Service { ); if (hasCUDA) { - console.log("**** LlamaService: CUDA detected"); + elizaLogger.info("LlamaService: CUDA detected, using GPU acceleration"); } else { - console.warn( - "**** LlamaService: No CUDA detected - local response will be slow" - ); + elizaLogger.warn("LlamaService: No CUDA detected - local response will be slow"); } + elizaLogger.info("Initializing Llama instance..."); this.llama = await getLlama({ - gpu: "cuda", + gpu: hasCUDA ? "cuda" : undefined, }); + + elizaLogger.info("Creating JSON schema grammar..."); const grammar = new LlamaJsonSchemaGrammar( this.llama, jsonSchemaGrammar as GbnfJsonSchema ); this.grammar = grammar; + elizaLogger.info("Loading model..."); this.model = await this.llama.loadModel({ modelPath: this.modelPath, }); + elizaLogger.info("Creating context and sequence..."); this.ctx = await this.model.createContext({ contextSize: 8192 }); this.sequence = this.ctx.getSequence(); this.modelInitialized = true; + elizaLogger.success("Model initialization complete"); this.processQueue(); } catch (error) { console.error( - "Model initialization failed. Deleting model and retrying...", - error - ); + "Model initialization failed. Deleting model and retrying:", error); + try { + elizaLogger.info("Attempting to delete and re-download model..."); await this.deleteModel(); await this.initializeModel(); + } catch (retryError) { + elizaLogger.error("Model re-initialization failed:", retryError); + throw new Error(`Model initialization failed after retry: ${retryError.message}`); + } } } async checkModel() { if (!fs.existsSync(this.modelPath)) { + elizaLogger.info("Model file not found, starting download..."); await new Promise((resolve, reject) => { const file = fs.createWriteStream(this.modelPath); let downloadedSize = 0; + let totalSize = 0; const downloadModel = (url: string) => { https .get(url, (response) => { - const isRedirect = - response.statusCode >= 300 && - response.statusCode < 400; - if (isRedirect) { - const redirectUrl = response.headers.location; - if (redirectUrl) { - downloadModel(redirectUrl); - return; - } else { - reject(new Error("Redirect URL not found")); + if (response.statusCode >= 300 && response.statusCode < 400 && response.headers.location) { + elizaLogger.info(`Following redirect to: ${response.headers.location}`); + downloadModel(response.headers.location); return; } + + if (response.statusCode !== 200) { + reject(new Error(`Failed to download model: HTTP ${response.statusCode}`)); + return; } - const totalSize = parseInt( - response.headers["content-length"] ?? "0", - 10 - ); + totalSize = parseInt(response.headers['content-length'] || '0', 10); + elizaLogger.info(`Downloading model: Hermes-3-Llama-3.1-8B.Q8_0.gguf`); + elizaLogger.info(`Download location: ${this.modelPath}`); + elizaLogger.info(`Total size: ${(totalSize / 1024 / 1024).toFixed(2)} MB`); + response.pipe(file); + + let progressString = ''; response.on("data", (chunk) => { downloadedSize += chunk.length; - file.write(chunk); - - // Log progress - const progress = ( - (downloadedSize / totalSize) * - 100 - ).toFixed(2); - process.stdout.write( - `Downloaded ${progress}%\r` - ); + const progress = totalSize > 0 ? ((downloadedSize / totalSize) * 100).toFixed(1) : '0.0'; + const dots = '.'.repeat(Math.floor(Number(progress) / 5)); + progressString = `Downloading model: [${dots.padEnd(20, ' ')}] ${progress}%`; + elizaLogger.progress(progressString); }); - response.on("end", () => { - file.end(); + file.on("finish", () => { + file.close(); + elizaLogger.progress(''); // Clear the progress line + elizaLogger.success("Model download complete"); resolve(); }); + + response.on("error", (error) => { + fs.unlink(this.modelPath, () => {}); + reject(new Error(`Model download failed: ${error.message}`)); + }); }) - .on("error", (err) => { - fs.unlink(this.modelPath, () => {}); // Delete the file async - console.error("Download failed:", err.message); - reject(err); + .on("error", (error) => { + fs.unlink(this.modelPath, () => {}); + reject(new Error(`Model download request failed: ${error.message}`)); }); }; @@ -392,6 +425,36 @@ export class LlamaService extends Service { this.isProcessing = false; } + async completion(prompt: string, runtime: IAgentRuntime): Promise { + try { + await this.initialize(runtime); + + if (runtime.modelProvider === ModelProviderName.OLLAMA) { + return await this.ollamaCompletion(prompt); + } + + return await this.localCompletion(prompt); + } catch (error) { + elizaLogger.error("Error in completion:", error); + throw error; + } + } + + async embedding(text: string, runtime: IAgentRuntime): Promise { + try { + await this.initialize(runtime); + + if (runtime.modelProvider === ModelProviderName.OLLAMA) { + return await this.ollamaEmbedding(text); + } + + return await this.localEmbedding(text); + } catch (error) { + elizaLogger.error("Error in embedding:", error); + throw error; + } + } + private async getCompletionResponse( context: string, temperature: number, @@ -401,6 +464,37 @@ export class LlamaService extends Service { max_tokens: number, useGrammar: boolean ): Promise { + const ollamaModel = process.env.OLLAMA_MODEL; + if (ollamaModel) { + const ollamaUrl = process.env.OLLAMA_SERVER_URL || 'http://localhost:11434'; + elizaLogger.info(`Using Ollama API at ${ollamaUrl} with model ${ollamaModel}`); + + const response = await fetch(`${ollamaUrl}/api/generate`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: ollamaModel, + prompt: context, + stream: false, + options: { + temperature, + stop, + frequency_penalty, + presence_penalty, + num_predict: max_tokens, + } + }), + }); + + if (!response.ok) { + throw new Error(`Ollama request failed: ${response.statusText}`); + } + + const result = await response.json(); + return useGrammar ? { content: result.response } : result.response; + } + + // Use local GGUF model if (!this.sequence) { throw new Error("Model not initialized."); } @@ -429,7 +523,7 @@ export class LlamaService extends Service { })) { const current = this.model.detokenize([...responseTokens, token]); if ([...stop].some((s) => current.includes(s))) { - console.log("Stop sequence found"); + elizaLogger.info("Stop sequence found"); break; } @@ -437,12 +531,12 @@ export class LlamaService extends Service { process.stdout.write(this.model!.detokenize([token])); if (useGrammar) { if (current.replaceAll("\n", "").includes("}```")) { - console.log("JSON block found"); + elizaLogger.info("JSON block found"); break; } } if (responseTokens.length > max_tokens) { - console.log("Max tokens reached"); + elizaLogger.info("Max tokens reached"); break; } } @@ -472,7 +566,7 @@ export class LlamaService extends Service { await this.sequence.clearHistory(); return parsedResponse; } catch (error) { - console.error("Error parsing JSON:", error); + elizaLogger.error("Error parsing JSON:", error); } } else { await this.sequence.clearHistory(); @@ -481,15 +575,151 @@ export class LlamaService extends Service { } async getEmbeddingResponse(input: string): Promise { - await this.ensureInitialized(); - if (!this.model) { - throw new Error("Model not initialized. Call initialize() first."); + const ollamaModel = process.env.OLLAMA_MODEL; + if (ollamaModel) { + const ollamaUrl = process.env.OLLAMA_SERVER_URL || 'http://localhost:11434'; + const embeddingModel = process.env.OLLAMA_EMBEDDING_MODEL || 'mxbai-embed-large'; + elizaLogger.info(`Using Ollama API for embeddings with model ${embeddingModel}`); + + const response = await fetch(`${ollamaUrl}/api/embeddings`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: embeddingModel, + prompt: input, + }), + }); + + if (!response.ok) { + throw new Error(`Ollama embeddings request failed: ${response.statusText}`); + } + + const result = await response.json(); + return result.embedding; + } + + // Use local GGUF model + if (!this.sequence) { + throw new Error("Sequence not initialized"); } const embeddingContext = await this.model.createEmbeddingContext(); const embedding = await embeddingContext.getEmbeddingFor(input); return embedding?.vector ? [...embedding.vector] : undefined; } + + private async ollamaCompletion(prompt: string): Promise { + const ollamaModel = process.env.OLLAMA_MODEL; + const ollamaUrl = process.env.OLLAMA_SERVER_URL || 'http://localhost:11434'; + elizaLogger.info(`Using Ollama API at ${ollamaUrl} with model ${ollamaModel}`); + + const response = await fetch(`${ollamaUrl}/api/generate`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: ollamaModel, + prompt: prompt, + stream: false, + options: { + temperature: 0.7, + stop: ["\n"], + frequency_penalty: 0.5, + presence_penalty: 0.5, + num_predict: 256, + } + }), + }); + + if (!response.ok) { + throw new Error(`Ollama request failed: ${response.statusText}`); + } + + const result = await response.json(); + return result.response; + } + + private async ollamaEmbedding(text: string): Promise { + const ollamaModel = process.env.OLLAMA_MODEL; + const ollamaUrl = process.env.OLLAMA_SERVER_URL || 'http://localhost:11434'; + const embeddingModel = process.env.OLLAMA_EMBEDDING_MODEL || 'mxbai-embed-large'; + elizaLogger.info(`Using Ollama API for embeddings with model ${embeddingModel}`); + + const response = await fetch(`${ollamaUrl}/api/embeddings`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: embeddingModel, + prompt: text, + }), + }); + + if (!response.ok) { + throw new Error(`Ollama embeddings request failed: ${response.statusText}`); + } + + const result = await response.json(); + return result.embedding; + } + + private async localCompletion(prompt: string): Promise { + if (!this.sequence) { + throw new Error("Sequence not initialized"); + } + + const tokens = this.model!.tokenize(prompt); + + // tokenize the words to punish + const wordsToPunishTokens = wordsToPunish + .map((word) => this.model!.tokenize(word)) + .flat(); + + const repeatPenalty: LlamaContextSequenceRepeatPenalty = { + punishTokens: () => wordsToPunishTokens, + penalty: 1.2, + frequencyPenalty: 0.5, + presencePenalty: 0.5, + }; + + const responseTokens: Token[] = []; + + for await (const token of this.sequence.evaluate(tokens, { + temperature: 0.7, + repeatPenalty: repeatPenalty, + yieldEogToken: false, + })) { + const current = this.model.detokenize([...responseTokens, token]); + if (current.includes("\n")) { + elizaLogger.info("Stop sequence found"); + break; + } + + responseTokens.push(token); + process.stdout.write(this.model!.detokenize([token])); + if (responseTokens.length > 256) { + elizaLogger.info("Max tokens reached"); + break; + } + } + + const response = this.model!.detokenize(responseTokens); + + if (!response) { + throw new Error("Response is undefined"); + } + + await this.sequence.clearHistory(); + return response; + } + + private async localEmbedding(text: string): Promise { + if (!this.sequence) { + throw new Error("Sequence not initialized"); + } + + const embeddingContext = await this.model.createEmbeddingContext(); + const embedding = await embeddingContext.getEmbeddingFor(text); + return embedding?.vector ? [...embedding.vector] : undefined; + } } export default LlamaService; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9994e7715d1..0bf9f8f0abd 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -20,6 +20,9 @@ importers: sharp: specifier: ^0.33.5 version: 0.33.5 + tslog: + specifier: ^4.9.3 + version: 4.9.3 devDependencies: '@commitlint/cli': specifier: ^18.4.4 From 9330697fb09f36023c35c0993d7c58b03c1aa0ac Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 15:13:58 -0500 Subject: [PATCH 02/10] fix: pretty formatting issues --- packages/plugin-node/src/services/llama.ts | 5 ++--- pnpm-lock.yaml | 6 ++++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/packages/plugin-node/src/services/llama.ts b/packages/plugin-node/src/services/llama.ts index bd66f748504..2a64b48d00f 100644 --- a/packages/plugin-node/src/services/llama.ts +++ b/packages/plugin-node/src/services/llama.ts @@ -1,4 +1,4 @@ -import { elizaLogger, IAgentRuntime, ServiceType } from "@ai16z/eliza"; +import { elizaLogger, IAgentRuntime, ServiceType, ModelProviderName } from "@ai16z/eliza"; import { Service } from "@ai16z/eliza"; import fs from "fs"; import https from "https"; @@ -252,8 +252,7 @@ export class LlamaService extends Service { elizaLogger.success("Model initialization complete"); this.processQueue(); } catch (error) { - console.error( - "Model initialization failed. Deleting model and retrying:", error); + elizaLogger.error("Model initialization failed. Deleting model and retrying:", error); try { elizaLogger.info("Attempting to delete and re-download model..."); await this.deleteModel(); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 0bf9f8f0abd..e768c65105c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -13663,6 +13663,10 @@ packages: tslib@2.8.0: resolution: {integrity: sha512-jWVzBLplnCmoaTr13V9dYbiQ99wvZRd0vNWaDRg+aVYRcjDF3nDksxFDE/+fkXnKhpnUUkmx5pK/v8mCtLVqZA==} + tslog@4.9.3: + resolution: {integrity: sha512-oDWuGVONxhVEBtschLf2cs/Jy8i7h1T+CpdkTNWQgdAF7DhRo2G8vMCgILKe7ojdEkLhICWgI1LYSSKaJsRgcw==} + engines: {node: '>=16'} + tsup@8.3.5: resolution: {integrity: sha512-Tunf6r6m6tnZsG9GYWndg0z8dEV7fD733VBFzFJ5Vcm1FtlXB8xBD/rtrBi2a3YKEV7hHtxiZtW5EAVADoe1pA==} engines: {node: '>=18'} @@ -30745,6 +30749,8 @@ snapshots: tslib@2.8.0: {} + tslog@4.9.3: {} + tsup@8.3.5(@swc/core@1.9.2(@swc/helpers@0.5.15))(jiti@2.4.0)(postcss@8.4.49)(typescript@5.6.3)(yaml@2.6.1): dependencies: bundle-require: 5.0.0(esbuild@0.24.0) From 7e8a62716420dc5f7f8b8a3201d850a1b61c5ec0 Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 15:14:29 -0500 Subject: [PATCH 03/10] fix: llama vs ollama issues --- packages/core/src/embedding.ts | 6 +- packages/core/src/runtime.ts | 16 +- packages/plugin-node/src/services/image.ts | 13 +- packages/plugin-node/src/services/llama.ts | 186 +++++++++++++++------ 4 files changed, 156 insertions(+), 65 deletions(-) diff --git a/packages/core/src/embedding.ts b/packages/core/src/embedding.ts index b81229be6b5..9c9a6f53a1a 100644 --- a/packages/core/src/embedding.ts +++ b/packages/core/src/embedding.ts @@ -87,9 +87,9 @@ export async function embed(runtime: IAgentRuntime, input: string) { const embeddingModel = settings.USE_OPENAI_EMBEDDING ? "text-embedding-3-small" : runtime.character.modelProvider === ModelProviderName.OLLAMA - ? settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large" - : modelProvider.model?.[ModelClass.EMBEDDING]|| - models[ModelProviderName.OPENAI].model[ModelClass.EMBEDDING];; + ? settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large" + : modelProvider.model?.[ModelClass.EMBEDDING] || + models[ModelProviderName.OPENAI].model[ModelClass.EMBEDDING]; if (!embeddingModel) { throw new Error("No embedding model configured"); diff --git a/packages/core/src/runtime.ts b/packages/core/src/runtime.ts index 7623349a6c3..825ca3e2fc8 100644 --- a/packages/core/src/runtime.ts +++ b/packages/core/src/runtime.ts @@ -222,7 +222,7 @@ export class AgentRuntime implements IAgentRuntime { elizaLogger.info("Initializing AgentRuntime with options:", { character: opts.character?.name, modelProvider: opts.modelProvider, - characterModelProvider: opts.character?.modelProvider + characterModelProvider: opts.character?.modelProvider, }); this.#conversationLength = @@ -288,9 +288,12 @@ export class AgentRuntime implements IAgentRuntime { }); this.serverUrl = opts.serverUrl ?? this.serverUrl; - + elizaLogger.info("Setting model provider..."); - elizaLogger.info("- Character model provider:", this.character.modelProvider); + elizaLogger.info( + "- Character model provider:", + this.character.modelProvider + ); elizaLogger.info("- Opts model provider:", opts.modelProvider); elizaLogger.info("- Current model provider:", this.modelProvider); @@ -298,13 +301,16 @@ export class AgentRuntime implements IAgentRuntime { this.character.modelProvider ?? opts.modelProvider ?? this.modelProvider; - + elizaLogger.info("Selected model provider:", this.modelProvider); // Validate model provider if (!Object.values(ModelProviderName).includes(this.modelProvider)) { elizaLogger.error("Invalid model provider:", this.modelProvider); - elizaLogger.error("Available providers:", Object.values(ModelProviderName)); + elizaLogger.error( + "Available providers:", + Object.values(ModelProviderName) + ); throw new Error(`Invalid model provider: ${this.modelProvider}`); } diff --git a/packages/plugin-node/src/services/image.ts b/packages/plugin-node/src/services/image.ts index 530e20f0f6f..c01328fb834 100644 --- a/packages/plugin-node/src/services/image.ts +++ b/packages/plugin-node/src/services/image.ts @@ -71,9 +71,16 @@ export class ImageDescriptionService device: "gpu", progress_callback: (progress) => { if (progress.status === "downloading") { - const percent = ((progress.loaded / progress.total) * 100).toFixed(1); - const dots = '.'.repeat(Math.floor(Number(percent) / 5)); - elizaLogger.info(`Downloading Florence model: [${dots.padEnd(20, ' ')}] ${percent}%`); + const percent = ( + (progress.loaded / progress.total) * + 100 + ).toFixed(1); + const dots = ".".repeat( + Math.floor(Number(percent) / 5) + ); + elizaLogger.info( + `Downloading Florence model: [${dots.padEnd(20, " ")}] ${percent}%` + ); } }, } diff --git a/packages/plugin-node/src/services/llama.ts b/packages/plugin-node/src/services/llama.ts index 2a64b48d00f..b15561f9dbe 100644 --- a/packages/plugin-node/src/services/llama.ts +++ b/packages/plugin-node/src/services/llama.ts @@ -1,4 +1,9 @@ -import { elizaLogger, IAgentRuntime, ServiceType, ModelProviderName } from "@ai16z/eliza"; +import { + elizaLogger, + IAgentRuntime, + ServiceType, + ModelProviderName, +} from "@ai16z/eliza"; import { Service } from "@ai16z/eliza"; import fs from "fs"; import https from "https"; @@ -198,13 +203,17 @@ export class LlamaService extends Service { } catch (error) { elizaLogger.error("Failed to initialize LlamaService:", error); // Re-throw with more context - throw new Error(`LlamaService initialization failed: ${error.message}`); + throw new Error( + `LlamaService initialization failed: ${error.message}` + ); } } private async ensureInitialized() { if (!this.modelInitialized) { - elizaLogger.info("Model not initialized, starting initialization..."); + elizaLogger.info( + "Model not initialized, starting initialization..." + ); await this.initializeModel(); } else { elizaLogger.info("Model already initialized"); @@ -222,9 +231,13 @@ export class LlamaService extends Service { ); if (hasCUDA) { - elizaLogger.info("LlamaService: CUDA detected, using GPU acceleration"); + elizaLogger.info( + "LlamaService: CUDA detected, using GPU acceleration" + ); } else { - elizaLogger.warn("LlamaService: No CUDA detected - local response will be slow"); + elizaLogger.warn( + "LlamaService: No CUDA detected - local response will be slow" + ); } elizaLogger.info("Initializing Llama instance..."); @@ -252,14 +265,24 @@ export class LlamaService extends Service { elizaLogger.success("Model initialization complete"); this.processQueue(); } catch (error) { - elizaLogger.error("Model initialization failed. Deleting model and retrying:", error); + elizaLogger.error( + "Model initialization failed. Deleting model and retrying:", + error + ); try { - elizaLogger.info("Attempting to delete and re-download model..."); - await this.deleteModel(); - await this.initializeModel(); + elizaLogger.info( + "Attempting to delete and re-download model..." + ); + await this.deleteModel(); + await this.initializeModel(); } catch (retryError) { - elizaLogger.error("Model re-initialization failed:", retryError); - throw new Error(`Model initialization failed after retry: ${retryError.message}`); + elizaLogger.error( + "Model re-initialization failed:", + retryError + ); + throw new Error( + `Model initialization failed after retry: ${retryError.message}` + ); } } } @@ -275,48 +298,83 @@ export class LlamaService extends Service { const downloadModel = (url: string) => { https .get(url, (response) => { - if (response.statusCode >= 300 && response.statusCode < 400 && response.headers.location) { - elizaLogger.info(`Following redirect to: ${response.headers.location}`); + if ( + response.statusCode >= 300 && + response.statusCode < 400 && + response.headers.location + ) { + elizaLogger.info( + `Following redirect to: ${response.headers.location}` + ); downloadModel(response.headers.location); - return; - } + return; + } if (response.statusCode !== 200) { - reject(new Error(`Failed to download model: HTTP ${response.statusCode}`)); + reject( + new Error( + `Failed to download model: HTTP ${response.statusCode}` + ) + ); return; } - totalSize = parseInt(response.headers['content-length'] || '0', 10); - elizaLogger.info(`Downloading model: Hermes-3-Llama-3.1-8B.Q8_0.gguf`); - elizaLogger.info(`Download location: ${this.modelPath}`); - elizaLogger.info(`Total size: ${(totalSize / 1024 / 1024).toFixed(2)} MB`); + totalSize = parseInt( + response.headers["content-length"] || "0", + 10 + ); + elizaLogger.info( + `Downloading model: Hermes-3-Llama-3.1-8B.Q8_0.gguf` + ); + elizaLogger.info( + `Download location: ${this.modelPath}` + ); + elizaLogger.info( + `Total size: ${(totalSize / 1024 / 1024).toFixed(2)} MB` + ); response.pipe(file); - let progressString = ''; + let progressString = ""; response.on("data", (chunk) => { downloadedSize += chunk.length; - const progress = totalSize > 0 ? ((downloadedSize / totalSize) * 100).toFixed(1) : '0.0'; - const dots = '.'.repeat(Math.floor(Number(progress) / 5)); - progressString = `Downloading model: [${dots.padEnd(20, ' ')}] ${progress}%`; + const progress = + totalSize > 0 + ? ( + (downloadedSize / totalSize) * + 100 + ).toFixed(1) + : "0.0"; + const dots = ".".repeat( + Math.floor(Number(progress) / 5) + ); + progressString = `Downloading model: [${dots.padEnd(20, " ")}] ${progress}%`; elizaLogger.progress(progressString); }); file.on("finish", () => { file.close(); - elizaLogger.progress(''); // Clear the progress line + elizaLogger.progress(""); // Clear the progress line elizaLogger.success("Model download complete"); resolve(); }); response.on("error", (error) => { fs.unlink(this.modelPath, () => {}); - reject(new Error(`Model download failed: ${error.message}`)); + reject( + new Error( + `Model download failed: ${error.message}` + ) + ); }); }) .on("error", (error) => { fs.unlink(this.modelPath, () => {}); - reject(new Error(`Model download request failed: ${error.message}`)); + reject( + new Error( + `Model download request failed: ${error.message}` + ) + ); }); }; @@ -465,12 +523,15 @@ export class LlamaService extends Service { ): Promise { const ollamaModel = process.env.OLLAMA_MODEL; if (ollamaModel) { - const ollamaUrl = process.env.OLLAMA_SERVER_URL || 'http://localhost:11434'; - elizaLogger.info(`Using Ollama API at ${ollamaUrl} with model ${ollamaModel}`); - + const ollamaUrl = + process.env.OLLAMA_SERVER_URL || "http://localhost:11434"; + elizaLogger.info( + `Using Ollama API at ${ollamaUrl} with model ${ollamaModel}` + ); + const response = await fetch(`${ollamaUrl}/api/generate`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, + method: "POST", + headers: { "Content-Type": "application/json" }, body: JSON.stringify({ model: ollamaModel, prompt: context, @@ -481,12 +542,14 @@ export class LlamaService extends Service { frequency_penalty, presence_penalty, num_predict: max_tokens, - } + }, }), }); if (!response.ok) { - throw new Error(`Ollama request failed: ${response.statusText}`); + throw new Error( + `Ollama request failed: ${response.statusText}` + ); } const result = await response.json(); @@ -576,13 +639,17 @@ export class LlamaService extends Service { async getEmbeddingResponse(input: string): Promise { const ollamaModel = process.env.OLLAMA_MODEL; if (ollamaModel) { - const ollamaUrl = process.env.OLLAMA_SERVER_URL || 'http://localhost:11434'; - const embeddingModel = process.env.OLLAMA_EMBEDDING_MODEL || 'mxbai-embed-large'; - elizaLogger.info(`Using Ollama API for embeddings with model ${embeddingModel}`); - + const ollamaUrl = + process.env.OLLAMA_SERVER_URL || "http://localhost:11434"; + const embeddingModel = + process.env.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large"; + elizaLogger.info( + `Using Ollama API for embeddings with model ${embeddingModel}` + ); + const response = await fetch(`${ollamaUrl}/api/embeddings`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, + method: "POST", + headers: { "Content-Type": "application/json" }, body: JSON.stringify({ model: embeddingModel, prompt: input, @@ -590,7 +657,9 @@ export class LlamaService extends Service { }); if (!response.ok) { - throw new Error(`Ollama embeddings request failed: ${response.statusText}`); + throw new Error( + `Ollama embeddings request failed: ${response.statusText}` + ); } const result = await response.json(); @@ -609,12 +678,15 @@ export class LlamaService extends Service { private async ollamaCompletion(prompt: string): Promise { const ollamaModel = process.env.OLLAMA_MODEL; - const ollamaUrl = process.env.OLLAMA_SERVER_URL || 'http://localhost:11434'; - elizaLogger.info(`Using Ollama API at ${ollamaUrl} with model ${ollamaModel}`); - + const ollamaUrl = + process.env.OLLAMA_SERVER_URL || "http://localhost:11434"; + elizaLogger.info( + `Using Ollama API at ${ollamaUrl} with model ${ollamaModel}` + ); + const response = await fetch(`${ollamaUrl}/api/generate`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, + method: "POST", + headers: { "Content-Type": "application/json" }, body: JSON.stringify({ model: ollamaModel, prompt: prompt, @@ -625,7 +697,7 @@ export class LlamaService extends Service { frequency_penalty: 0.5, presence_penalty: 0.5, num_predict: 256, - } + }, }), }); @@ -639,13 +711,17 @@ export class LlamaService extends Service { private async ollamaEmbedding(text: string): Promise { const ollamaModel = process.env.OLLAMA_MODEL; - const ollamaUrl = process.env.OLLAMA_SERVER_URL || 'http://localhost:11434'; - const embeddingModel = process.env.OLLAMA_EMBEDDING_MODEL || 'mxbai-embed-large'; - elizaLogger.info(`Using Ollama API for embeddings with model ${embeddingModel}`); - + const ollamaUrl = + process.env.OLLAMA_SERVER_URL || "http://localhost:11434"; + const embeddingModel = + process.env.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large"; + elizaLogger.info( + `Using Ollama API for embeddings with model ${embeddingModel}` + ); + const response = await fetch(`${ollamaUrl}/api/embeddings`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, + method: "POST", + headers: { "Content-Type": "application/json" }, body: JSON.stringify({ model: embeddingModel, prompt: text, @@ -653,7 +729,9 @@ export class LlamaService extends Service { }); if (!response.ok) { - throw new Error(`Ollama embeddings request failed: ${response.statusText}`); + throw new Error( + `Ollama embeddings request failed: ${response.statusText}` + ); } const result = await response.json(); From a6126f9662ecef24ad5da24dcf7252557219b608 Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 18:18:48 -0500 Subject: [PATCH 04/10] fix: ollamaModel unused variable. fix security.md --- packages/plugin-node/src/services/llama.ts | 29 +++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/packages/plugin-node/src/services/llama.ts b/packages/plugin-node/src/services/llama.ts index 720972278f3..d4982e6bc6d 100644 --- a/packages/plugin-node/src/services/llama.ts +++ b/packages/plugin-node/src/services/llama.ts @@ -486,9 +486,32 @@ export class LlamaService extends Service { throw new Error("Model not initialized. Call initialize() first."); } - const embeddingContext = await this.model.createEmbeddingContext(); - const embedding = await embeddingContext.getEmbeddingFor(input); - return embedding?.vector ? [...embedding.vector] : undefined; + const ollamaModel = process.env.OLLAMA_MODEL; + const ollamaUrl = + process.env.OLLAMA_SERVER_URL || "http://localhost:11434"; + const embeddingModel = + process.env.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large"; + elizaLogger.info( + `Using Ollama API for embeddings with model ${embeddingModel} (base: ${ollamaModel})` + ); + + const response = await fetch(`${ollamaUrl}/api/embeddings`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + input: input, + model: embeddingModel, + }), + }); + + if (!response.ok) { + throw new Error(`Failed to get embedding: ${response.statusText}`); + } + + const embedding = await response.json(); + return embedding.vector; } } From 93608e02a6c2d793af5f7fcb6d1154215ba4eac5 Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 18:19:22 -0500 Subject: [PATCH 05/10] fix: security.md failed to commit --- SECURITY.md | 97 ++++++++++++++++++++++++++++------------------------- 1 file changed, 51 insertions(+), 46 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index a08255046e3..95045cf7a38 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -17,74 +17,79 @@ We take the security of Eliza seriously. If you believe you have found a securit 1. **DO NOT** create a public GitHub issue for the vulnerability 2. Send an email to security@eliza.builders with: - - A detailed description of the vulnerability - - Steps to reproduce the issue - - Potential impact of the vulnerability - - Any possible mitigations you've identified + - A detailed description of the vulnerability + - Steps to reproduce the issue + - Potential impact of the vulnerability + - Any possible mitigations you've identified ### What to Expect -- **Initial Response**: Within 48 hours, you will receive an acknowledgment of your report -- **Updates**: We will provide updates every 5 business days about the progress -- **Resolution Timeline**: We aim to resolve critical issues within 15 days -- **Disclosure**: We will coordinate with you on the public disclosure timing +- **Initial Response**: Within 48 hours, you will receive an acknowledgment of your report +- **Updates**: We will provide updates every 5 business days about the progress +- **Resolution Timeline**: We aim to resolve critical issues within 15 days +- **Disclosure**: We will coordinate with you on the public disclosure timing ## Security Best Practices ### For Contributors 1. **API Keys and Secrets** - - Never commit API keys, passwords, or other secrets to the repository - - Use environment variables as described in our secrets management guide - - Rotate any accidentally exposed credentials immediately + + - Never commit API keys, passwords, or other secrets to the repository + - Use environment variables as described in our secrets management guide + - Rotate any accidentally exposed credentials immediately 2. **Dependencies** - - Keep all dependencies up to date - - Review security advisories for dependencies regularly - - Use `pnpm audit` to check for known vulnerabilities + + - Keep all dependencies up to date + - Review security advisories for dependencies regularly + - Use `pnpm audit` to check for known vulnerabilities 3. **Code Review** - - All code changes must go through pull request review - - Security-sensitive changes require additional review - - Enable branch protection on main branches + - All code changes must go through pull request review + - Security-sensitive changes require additional review + - Enable branch protection on main branches ### For Users 1. **Environment Setup** - - Follow our [secrets management guide](docs/guides/secrets-management.md) for secure configuration - - Use separate API keys for development and production - - Regularly rotate credentials + + - Follow our [secrets management guide](docs/guides/secrets-management.md) for secure configuration + - Use separate API keys for development and production + - Regularly rotate credentials 2. **Model Provider Security** - - Use appropriate rate limiting for API calls - - Monitor usage patterns for unusual activity - - Implement proper authentication for exposed endpoints + + - Use appropriate rate limiting for API calls + - Monitor usage patterns for unusual activity + - Implement proper authentication for exposed endpoints 3. **Platform Integration** - - Use separate bot tokens for different environments - - Implement proper permission scoping for platform APIs - - Regular audit of platform access and permissions + - Use separate bot tokens for different environments + - Implement proper permission scoping for platform APIs + - Regular audit of platform access and permissions ## Security Features ### Current Implementation -- Environment variable based secrets management -- Type-safe API implementations -- Automated dependency updates via Renovate -- Continuous Integration security checks +- Environment variable based secrets management +- Type-safe API implementations +- Automated dependency updates via Renovate +- Continuous Integration security checks ### Planned Improvements 1. **Q4 2024** - - Automated security scanning in CI pipeline - - Enhanced rate limiting implementation - - Improved audit logging + + - Automated security scanning in CI pipeline + - Enhanced rate limiting implementation + - Improved audit logging 2. **Q1 2025** - - Security-focused documentation improvements - - Enhanced platform permission management - - Automated vulnerability scanning + - Security-focused documentation improvements + - Enhanced platform permission management + - Automated vulnerability scanning ## Vulnerability Disclosure Policy @@ -100,21 +105,21 @@ We follow a coordinated disclosure process: We believe in recognizing security researchers who help improve our security. Contributors who report valid security issues will be: -- Credited in our security acknowledgments (unless they wish to remain anonymous) -- Added to our security hall of fame -- Considered for our bug bounty program (coming soon) +- Credited in our security acknowledgments (unless they wish to remain anonymous) +- Added to our security hall of fame +- Considered for our bug bounty program (coming soon) ## License Considerations As an MIT licensed project, users should understand: -- The software is provided "as is" -- No warranty is provided -- Users are responsible for their own security implementations -- Contributors grant perpetual license to their contributions +- The software is provided "as is" +- No warranty is provided +- Users are responsible for their own security implementations +- Contributors grant perpetual license to their contributions ## Contact -- Security Issues: security@eliza.builders -- General Questions: Join our [Discord](https://discord.gg/ai16z) -- Updates: Follow our [security advisory page](https://github.com/ai16z/eliza/security/advisories) +- Security Issues: security@eliza.builders +- General Questions: Join our [Discord](https://discord.gg/ai16z) +- Updates: Follow our [security advisory page](https://github.com/ai16z/eliza/security/advisories) From eddfc28ddf4ffc388fd1fd41921ffd0bab26279b Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 18:11:34 -0500 Subject: [PATCH 06/10] fix: update unreferenced ollamaModel variable --- packages/core/src/defaultCharacter.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/src/defaultCharacter.ts b/packages/core/src/defaultCharacter.ts index 2ca1bf0179c..1a1b4e75834 100644 --- a/packages/core/src/defaultCharacter.ts +++ b/packages/core/src/defaultCharacter.ts @@ -5,7 +5,7 @@ export const defaultCharacter: Character = { username: "eliza", plugins: [], clients: [], - modelProvider: ModelProviderName.OPENAI, + modelProvider: ModelProviderName.OLLAMA, settings: { secrets: {}, voice: { From ea52d23a2c73372204db12e4f65c8ee975e7ae99 Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 18:12:02 -0500 Subject: [PATCH 07/10] fix: unreferenced ollamaModel variable --- packages/plugin-node/src/services/llama.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/plugin-node/src/services/llama.ts b/packages/plugin-node/src/services/llama.ts index b15561f9dbe..f2f3a38a8de 100644 --- a/packages/plugin-node/src/services/llama.ts +++ b/packages/plugin-node/src/services/llama.ts @@ -644,7 +644,7 @@ export class LlamaService extends Service { const embeddingModel = process.env.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large"; elizaLogger.info( - `Using Ollama API for embeddings with model ${embeddingModel}` + `Using Ollama API for embeddings with model ${embeddingModel} (base: ${ollamaModel})` ); const response = await fetch(`${ollamaUrl}/api/embeddings`, { @@ -716,7 +716,7 @@ export class LlamaService extends Service { const embeddingModel = process.env.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large"; elizaLogger.info( - `Using Ollama API for embeddings with model ${embeddingModel}` + `Using Ollama API for embeddings with model ${embeddingModel} (base: ${ollamaModel})` ); const response = await fetch(`${ollamaUrl}/api/embeddings`, { From 644ebb2f2d3dc6dcfc76d011670d9e189b76dcd3 Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 19:20:31 -0500 Subject: [PATCH 08/10] fix: missing updates for logger.ts --- packages/core/src/logger.ts | 45 ++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/packages/core/src/logger.ts b/packages/core/src/logger.ts index f8172d0b6ca..ae9b3a19852 100644 --- a/packages/core/src/logger.ts +++ b/packages/core/src/logger.ts @@ -1,4 +1,11 @@ -class ElizaLogger { +import settings from "./settings.ts"; +import { Logger, ILogObjMeta, ILogObj } from "tslog"; + +interface IElizaLogger extends Logger { + progress(message: string): void; +} + +class ElizaLogger implements IElizaLogger { constructor() { // Check if we're in Node.js environment this.isNode = @@ -7,7 +14,7 @@ class ElizaLogger { process.versions.node != null; // Set verbose based on environment - this.verbose = this.isNode ? process.env.verbose === "true" : false; + this.verbose = this.isNode ? settings.VERBOSE === "true" : false; } private isNode: boolean; @@ -173,6 +180,7 @@ class ElizaLogger { } } + // @ts-ignore - custom implementation log(...strings) { this.#logWithStyle(strings, { fg: "white", @@ -182,6 +190,7 @@ class ElizaLogger { }); } + // @ts-ignore - custom implementation warn(...strings) { this.#logWithStyle(strings, { fg: "yellow", @@ -191,6 +200,7 @@ class ElizaLogger { }); } + // @ts-ignore - custom implementation error(...strings) { this.#logWithStyle(strings, { fg: "red", @@ -200,6 +210,7 @@ class ElizaLogger { }); } + // @ts-ignore - custom implementation info(...strings) { this.#logWithStyle(strings, { fg: "blue", @@ -209,15 +220,7 @@ class ElizaLogger { }); } - success(...strings) { - this.#logWithStyle(strings, { - fg: "green", - bg: "", - icon: "\u2713", - groupTitle: ` ${this.successesTitle}`, - }); - } - + // @ts-ignore - custom implementation debug(...strings) { if (!this.verbose) return; this.#logWithStyle(strings, { @@ -228,6 +231,15 @@ class ElizaLogger { }); } + success(...strings) { + this.#logWithStyle(strings, { + fg: "green", + bg: "", + icon: "\u2713", + groupTitle: ` ${this.successesTitle}`, + }); + } + assert(...strings) { this.#logWithStyle(strings, { fg: "cyan", @@ -236,6 +248,17 @@ class ElizaLogger { groupTitle: ` ${this.assertsTitle}`, }); } + + progress(message: string) { + if (this.isNode) { + // Clear the current line and move cursor to beginning + process.stdout.clearLine(0); + process.stdout.cursorTo(0); + process.stdout.write(message); + } else { + console.log(message); + } + } } export const elizaLogger = new ElizaLogger(); From 3e4cb1e895ce8147eb9c1b2a9dda1b58c5e112a5 Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 19:36:46 -0500 Subject: [PATCH 09/10] fix: ollamaModel already defined --- packages/core/src/defaultCharacter.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/src/defaultCharacter.ts b/packages/core/src/defaultCharacter.ts index 2ca1bf0179c..1a1b4e75834 100644 --- a/packages/core/src/defaultCharacter.ts +++ b/packages/core/src/defaultCharacter.ts @@ -5,7 +5,7 @@ export const defaultCharacter: Character = { username: "eliza", plugins: [], clients: [], - modelProvider: ModelProviderName.OPENAI, + modelProvider: ModelProviderName.OLLAMA, settings: { secrets: {}, voice: { From c6afcd99cf6beec63b50939387adda653c85cef4 Mon Sep 17 00:00:00 2001 From: yodamaster726 Date: Fri, 22 Nov 2024 19:38:47 -0500 Subject: [PATCH 10/10] fix: ollamaModel already defined fix: ollamaModel already defined --- packages/plugin-node/src/services/llama.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/plugin-node/src/services/llama.ts b/packages/plugin-node/src/services/llama.ts index d4982e6bc6d..b930fcfbe89 100644 --- a/packages/plugin-node/src/services/llama.ts +++ b/packages/plugin-node/src/services/llama.ts @@ -164,6 +164,7 @@ export class LlamaService extends Service { private ctx: LlamaContext | undefined; private sequence: LlamaContextSequence | undefined; private modelUrl: string; + private ollamaModel: string | undefined; private messageQueue: QueuedMessage[] = []; private isProcessing: boolean = false; @@ -179,6 +180,7 @@ export class LlamaService extends Service { "https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true"; const modelName = "model.gguf"; this.modelPath = path.join(__dirname, modelName); + this.ollamaModel = process.env.OLLAMA_MODEL; } async initialize(runtime: IAgentRuntime): Promise {} @@ -486,13 +488,12 @@ export class LlamaService extends Service { throw new Error("Model not initialized. Call initialize() first."); } - const ollamaModel = process.env.OLLAMA_MODEL; const ollamaUrl = process.env.OLLAMA_SERVER_URL || "http://localhost:11434"; const embeddingModel = process.env.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large"; elizaLogger.info( - `Using Ollama API for embeddings with model ${embeddingModel} (base: ${ollamaModel})` + `Using Ollama API for embeddings with model ${embeddingModel} (base: ${this.ollamaModel})` ); const response = await fetch(`${ollamaUrl}/api/embeddings`, {