Skip to content

Commit bbac29e

Browse files
committed
handle errors on messages so it doesnt crash, add new embedding
1 parent 8ae0136 commit bbac29e

File tree

7 files changed

+383
-45
lines changed

7 files changed

+383
-45
lines changed

packages/client-discord/src/actions/download_media.ts

-4
Original file line numberDiff line numberDiff line change
@@ -133,10 +133,6 @@ export default {
133133
console.error(
134134
"Max retries reached. Failed to send message with attachment."
135135
);
136-
await callback({
137-
...response,
138-
text: "Sorry, I encountered an error while trying to send the video attachment. Please try again later.",
139-
});
140136
break;
141137
}
142138

packages/client-discord/src/messages.ts

+1-3
Original file line numberDiff line numberDiff line change
@@ -595,9 +595,7 @@ export class MessageManager {
595595
await this.voiceManager.playAudioStream(userId, audioStream);
596596
} else {
597597
// For text channels, send the error message
598-
await message.reply(
599-
"Sorry, I encountered an error while processing your request."
600-
);
598+
console.error("Error sending message:", error);
601599
}
602600
}
603601
}

packages/client-telegram/src/messageManager.ts

+1-3
Original file line numberDiff line numberDiff line change
@@ -474,9 +474,7 @@ export class MessageManager {
474474
);
475475
} catch (error) {
476476
console.error("❌ Error handling message:", error);
477-
await ctx.reply(
478-
"Sorry, I encountered an error while processing your request."
479-
);
477+
console.error("Error sending message:", error);
480478
}
481479
}
482480
}

packages/core/package.json

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
"watch": "tsc --watch",
1616
"dev": "tsup --format esm --dts --watch",
1717
"build:docs": "cd docs && pnpm run build",
18-
"postinstall": "npx playwright install-deps && npx playwright install",
1918
"test": "jest --runInBand",
2019
"test:watch": "jest --runInBand --watch"
2120
},
@@ -43,9 +42,9 @@
4342
"eslint-plugin-prettier": "5.2.1",
4443
"jest": "29.7.0",
4544
"lint-staged": "15.2.10",
46-
"prettier": "3.3.3",
4745
"nodemon": "3.1.7",
4846
"pm2": "5.4.2",
47+
"prettier": "3.3.3",
4948
"rimraf": "6.0.1",
5049
"rollup": "2.79.2",
5150
"ts-jest": "29.2.5",
@@ -64,6 +63,7 @@
6463
"@types/uuid": "^10.0.0",
6564
"ai": "^3.4.23",
6665
"anthropic-vertex-ai": "^1.0.0",
66+
"fastembed": "^1.14.1",
6767
"gaxios": "6.7.1",
6868
"glob": "11.0.0",
6969
"js-sha1": "0.7.0",

packages/core/src/embedding.ts

+30-12
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
1+
import path from "path";
12
import models from "./models.ts";
23
import {
34
IAgentRuntime,
45
ITextGenerationService,
56
ModelProviderName,
67
ServiceType,
78
} from "./types.ts";
9+
import fs from "fs";
10+
import { EmbeddingModel, FlagEmbedding } from "fastembed";
811

912
/**
1013
* Send a message to the OpenAI API for embedding.
@@ -20,21 +23,35 @@ export async function embed(runtime: IAgentRuntime, input: string) {
2023
runtime.character.modelProvider !== ModelProviderName.OPENAI &&
2124
runtime.character.modelProvider !== ModelProviderName.OLLAMA
2225
) {
23-
const service = runtime.getService<ITextGenerationService>(
24-
ServiceType.TEXT_GENERATION
25-
);
26+
27+
// make sure to trim tokens to 8192
28+
29+
const embeddingModel = await FlagEmbedding.init({
30+
model: EmbeddingModel.BGEBaseEN
31+
});
2632

27-
const instance = service?.getInstance();
33+
const embedding: number[] = await embeddingModel.queryEmbed(input);
34+
console.log("Embedding dimensions: ", embedding.length);
35+
return embedding;
2836

29-
if (instance) {
30-
return await instance.getEmbeddingResponse(input);
31-
}
37+
// commented out the text generation service that uses llama
38+
// const service = runtime.getService<ITextGenerationService>(
39+
// ServiceType.TEXT_GENERATION
40+
// );
41+
42+
// const instance = service?.getInstance();
43+
44+
// if (instance) {
45+
// return await instance.getEmbeddingResponse(input);
46+
// }
3247
}
48+
49+
// TODO: Fix retrieveCachedEmbedding
3350
// Check if we already have the embedding in the lore
34-
// const cachedEmbedding = await retrieveCachedEmbedding(runtime, input);
35-
// if (cachedEmbedding) {
36-
// return cachedEmbedding;
37-
// }
51+
const cachedEmbedding = await retrieveCachedEmbedding(runtime, input);
52+
if (cachedEmbedding) {
53+
return cachedEmbedding;
54+
}
3855

3956
const requestOptions = {
4057
method: "POST",
@@ -48,7 +65,8 @@ export async function embed(runtime: IAgentRuntime, input: string) {
4865
body: JSON.stringify({
4966
input,
5067
model: embeddingModel,
51-
length: 1536,
68+
length: 768, // we are squashing dimensions to 768 for openai, even thought the model supports 1536
69+
// -- this is ok for matryoshka embeddings but longterm, we might want to support 1536
5270
}),
5371
};
5472
try {

packages/plugin-node/package.json

+2-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,8 @@
6363
},
6464
"scripts": {
6565
"build": "tsup --format esm --dts",
66-
"dev": "tsup --watch"
66+
"dev": "tsup --watch",
67+
"postinstall": "npx playwright install-deps && npx playwright install"
6768
},
6869
"peerDependencies": {
6970
"whatwg-url": "7.1.0",

0 commit comments

Comments
 (0)