Skip to content

Commit 7567269

Browse files
author
mike dupont
committed
fixing compile errors
1 parent 55db7fc commit 7567269

File tree

5 files changed

+38
-35
lines changed

5 files changed

+38
-35
lines changed

package.json

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66
"build-docker": "turbo run build",
77
"cleanstart": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && pnpm --filter \"@elizaos/agent\" start --isRoot",
88
"cleanstart:debug": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot",
9-
"start": "pnpm --filter \"@elizaos/agent\" start --isRoot",
9+
"start": "pnpm --filter \"@elizaos/agent\" start --isRoot --characters=characters/eliza.character.json",
1010
"start:client": "pnpm --dir client dev",
11-
"start:debug": "cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot",
11+
"start:debug": "cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot --characters=characters/eliza.character.json",
1212
"dev": "bash ./scripts/dev.sh",
1313
"lint": "bash ./scripts/lint.sh",
1414
"prettier-check": "npx prettier --check --cache .",

packages/core/generation.ts

+4-4
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import { Buffer } from "buffer";
1414
import { createOllama } from "ollama-ai-provider";
1515
import OpenAI from "openai";
1616
import { encodingForModel, TiktokenModel } from "js-tiktoken";
17-
import { AutoTokenizer } from "@huggingface/transformers";
17+
//import { AutoTokenizer } from "@huggingface/transformers";
1818
import Together from "together-ai";
1919
import { ZodSchema } from "zod";
2020
import { elizaLogger } from "./index.ts";
@@ -100,13 +100,13 @@ export async function trimTokens(
100100
return truncateTiktoken("gpt-4o", context, maxTokens);
101101
}
102102

103-
async function truncateAuto(
103+
/*async function truncateAuto(
104104
modelPath: string,
105105
context: string,
106106
maxTokens: number
107107
) {
108108
try {
109-
const tokenizer = await AutoTokenizer.from_pretrained(modelPath);
109+
//const tokenizer = await AutoTokenizer.from_pretrained(modelPath);
110110
const tokens = tokenizer.encode(context);
111111
112112
// If already within limits, return unchanged
@@ -124,7 +124,7 @@ async function truncateAuto(
124124
// Return truncated string if tokenization fails
125125
return context.slice(-maxTokens * 4); // Rough estimate of 4 chars per token
126126
}
127-
}
127+
}*/
128128

129129
async function truncateTiktoken(
130130
model: TiktokenModel,

packages/core/src/generation.ts

+24-21
Original file line numberDiff line numberDiff line change
@@ -507,30 +507,33 @@ export async function generateText({
507507

508508
case ModelProviderName.BEDROCK: {
509509
elizaLogger.debug("Initializing Bedrock model.");
510-
511-
const { text: bedrockResponse } = await aiGenerateText({
512-
model: bedrock(model),
513-
prompt: context,
514-
system:
510+
try {
511+
const { text: bedrockResponse } = await aiGenerateText({
512+
model: bedrock(model),
513+
prompt: context,
514+
system:
515515
runtime.character.system ??
516-
settings.SYSTEM_PROMPT ??
516+
settings.SYSTEM_PROMPT ??
517517
undefined,
518-
tools: tools,
519-
onStepFinish: onStepFinish,
520-
maxSteps: maxSteps,
521-
temperature: temperature,
522-
maxTokens: max_response_length,
523-
frequencyPenalty: frequency_penalty,
524-
presencePenalty: presence_penalty,
525-
experimental_telemetry: experimental_telemetry,
518+
tools: tools,
519+
onStepFinish: onStepFinish,
520+
maxSteps: maxSteps,
521+
temperature: temperature,
522+
maxTokens: max_response_length,
523+
frequencyPenalty: frequency_penalty,
524+
presencePenalty: presence_penalty,
525+
experimental_telemetry: experimental_telemetry,
526526
});
527-
528-
response = bedrockResponse;
529-
elizaLogger.debug("Received response from bedrock model.");
530-
break;
531-
}
532-
533-
case ModelProviderName.CLAUDE_VERTEX: {
527+
response = bedrockResponse;
528+
elizaLogger.debug("Received response from bedrock model.");
529+
} catch (error) {
530+
elizaLogger.error("Error in bedrock:", error);
531+
throw error;
532+
}
533+
break;
534+
}
535+
536+
case ModelProviderName.CLAUDE_VERTEX: {
534537
elizaLogger.debug("Initializing Claude Vertex model.");
535538

536539
const anthropic = createAnthropic({

packages/core/src/localembeddingManager.ts

+7-7
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import path from "node:path";
22
import { fileURLToPath } from "url";
3-
import { FlagEmbedding, EmbeddingModel } from "fastembed";
3+
//import { FlagEmbedding, EmbeddingModel } from "fastembed";
44
import elizaLogger from "./logger";
55

66
class LocalEmbeddingModelManager {
77
private static instance: LocalEmbeddingModelManager | null;
8-
private model: FlagEmbedding | null = null;
8+
private model: null = null;
99
private initPromise: Promise<void> | null = null;
1010
private initializationLock = false;
1111

@@ -79,11 +79,11 @@ class LocalEmbeddingModelManager {
7979

8080
elizaLogger.debug("Initializing BGE embedding model...");
8181

82-
this.model = await FlagEmbedding.init({
83-
cacheDir: cacheDir,
84-
model: EmbeddingModel.BGESmallENV15,
85-
maxLength: 512,
86-
});
82+
// this.model = await FlagEmbedding.init({
83+
// cacheDir: cacheDir,
84+
// model: EmbeddingModel.BGESmallENV15,
85+
// maxLength: 512,
86+
// });
8787

8888
elizaLogger.debug("BGE model initialized successfully");
8989
} catch (error) {

packages/core/src/ragknowledge.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -367,7 +367,7 @@ export class RAGKnowledgeManager implements IRAGKnowledgeManager {
367367
};
368368

369369
const startTime = Date.now();
370-
let content = file.content;
370+
const content = file.content;
371371

372372
try {
373373
const fileSizeKB = new TextEncoder().encode(content).length / 1024;

0 commit comments

Comments
 (0)