Skip to content

Commit d0aed6d

Browse files
Merge pull request #2 from yodamaster726/ollama-fix
Ollama fix
2 parents b2a947b + c6afcd9 commit d0aed6d

File tree

1 file changed

+3
-2
lines changed
  • packages/plugin-node/src/services

1 file changed

+3
-2
lines changed

packages/plugin-node/src/services/llama.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,7 @@ export class LlamaService extends Service {
169169
private ctx: LlamaContext | undefined;
170170
private sequence: LlamaContextSequence | undefined;
171171
private modelUrl: string;
172+
private ollamaModel: string | undefined;
172173

173174
private messageQueue: QueuedMessage[] = [];
174175
private isProcessing: boolean = false;
@@ -184,6 +185,7 @@ export class LlamaService extends Service {
184185
"https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true";
185186
const modelName = "model.gguf";
186187
this.modelPath = path.join(__dirname, modelName);
188+
this.ollamaModel = process.env.OLLAMA_MODEL;
187189
}
188190

189191
async initialize(runtime: IAgentRuntime): Promise<void> {
@@ -671,13 +673,12 @@ export class LlamaService extends Service {
671673
throw new Error("Sequence not initialized");
672674
}
673675

674-
const ollamaModel = process.env.OLLAMA_MODEL;
675676
const ollamaUrl =
676677
process.env.OLLAMA_SERVER_URL || "http://localhost:11434";
677678
const embeddingModel =
678679
process.env.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large";
679680
elizaLogger.info(
680-
`Using Ollama API for embeddings with model ${embeddingModel} (base: ${ollamaModel})`
681+
`Using Ollama API for embeddings with model ${embeddingModel} (base: ${this.ollamaModel})`
681682
);
682683

683684
const response = await fetch(`${ollamaUrl}/api/embeddings`, {

0 commit comments

Comments
 (0)