File tree 1 file changed +3
-2
lines changed
packages/plugin-node/src/services
1 file changed +3
-2
lines changed Original file line number Diff line number Diff line change @@ -169,6 +169,7 @@ export class LlamaService extends Service {
169
169
private ctx : LlamaContext | undefined ;
170
170
private sequence : LlamaContextSequence | undefined ;
171
171
private modelUrl : string ;
172
+ private ollamaModel : string | undefined ;
172
173
173
174
private messageQueue : QueuedMessage [ ] = [ ] ;
174
175
private isProcessing : boolean = false ;
@@ -184,6 +185,7 @@ export class LlamaService extends Service {
184
185
"https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true" ;
185
186
const modelName = "model.gguf" ;
186
187
this . modelPath = path . join ( __dirname , modelName ) ;
188
+ this . ollamaModel = process . env . OLLAMA_MODEL ;
187
189
}
188
190
189
191
async initialize ( runtime : IAgentRuntime ) : Promise < void > {
@@ -671,13 +673,12 @@ export class LlamaService extends Service {
671
673
throw new Error ( "Sequence not initialized" ) ;
672
674
}
673
675
674
- const ollamaModel = process . env . OLLAMA_MODEL ;
675
676
const ollamaUrl =
676
677
process . env . OLLAMA_SERVER_URL || "http://localhost:11434" ;
677
678
const embeddingModel =
678
679
process . env . OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large" ;
679
680
elizaLogger . info (
680
- `Using Ollama API for embeddings with model ${ embeddingModel } (base: ${ ollamaModel } )`
681
+ `Using Ollama API for embeddings with model ${ embeddingModel } (base: ${ this . ollamaModel } )`
681
682
) ;
682
683
683
684
const response = await fetch ( `${ ollamaUrl } /api/embeddings` , {
You can’t perform that action at this time.
0 commit comments