File tree 1 file changed +3
-2
lines changed
packages/plugin-node/src/services
1 file changed +3
-2
lines changed Original file line number Diff line number Diff line change @@ -164,6 +164,7 @@ export class LlamaService extends Service {
164
164
private ctx : LlamaContext | undefined ;
165
165
private sequence : LlamaContextSequence | undefined ;
166
166
private modelUrl : string ;
167
+ private ollamaModel : string | undefined ;
167
168
168
169
private messageQueue : QueuedMessage [ ] = [ ] ;
169
170
private isProcessing : boolean = false ;
@@ -179,6 +180,7 @@ export class LlamaService extends Service {
179
180
"https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true" ;
180
181
const modelName = "model.gguf" ;
181
182
this . modelPath = path . join ( __dirname , modelName ) ;
183
+ this . ollamaModel = process . env . OLLAMA_MODEL ;
182
184
}
183
185
184
186
async initialize ( runtime : IAgentRuntime ) : Promise < void > { }
@@ -486,13 +488,12 @@ export class LlamaService extends Service {
486
488
throw new Error ( "Model not initialized. Call initialize() first." ) ;
487
489
}
488
490
489
- const ollamaModel = process . env . OLLAMA_MODEL ;
490
491
const ollamaUrl =
491
492
process . env . OLLAMA_SERVER_URL || "http://localhost:11434" ;
492
493
const embeddingModel =
493
494
process . env . OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large" ;
494
495
elizaLogger . info (
495
- `Using Ollama API for embeddings with model ${ embeddingModel } (base: ${ ollamaModel } )`
496
+ `Using Ollama API for embeddings with model ${ embeddingModel } (base: ${ this . ollamaModel } )`
496
497
) ;
497
498
498
499
const response = await fetch ( `${ ollamaUrl } /api/embeddings` , {
You can’t perform that action at this time.
0 commit comments