Skip to content

Commit ca99c91

Browse files
fix: telegram issues and oolama
1 parent 51c31fc commit ca99c91

File tree

3 files changed

+48
-46
lines changed

3 files changed

+48
-46
lines changed

packages/client-telegram/src/index.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ export const TelegramClientInterface: Client = {
2020
return tg;
2121
},
2222
stop: async (_runtime: IAgentRuntime) => {
23-
console.warn("Telegram client does not support stopping yet");
23+
elizaLogger.warn("Telegram client does not support stopping yet");
2424
},
2525
};
2626

packages/client-telegram/src/messageManager.ts

+37-34
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import { Message } from "@telegraf/types";
22
import { Context, Telegraf } from "telegraf";
33

4-
import { composeContext } from "@ai16z/eliza";
4+
import { composeContext, elizaLogger, ServiceType } from "@ai16z/eliza";
55
import { embeddingZeroVector } from "@ai16z/eliza";
66
import {
77
Content,
@@ -17,7 +17,6 @@ import { stringToUuid } from "@ai16z/eliza";
1717

1818
import { generateMessageResponse, generateShouldRespond } from "@ai16z/eliza";
1919
import { messageCompletionFooter, shouldRespondFooter } from "@ai16z/eliza";
20-
import { ImageDescriptionService } from "@ai16z/plugin-node";
2120

2221
const MAX_MESSAGE_LENGTH = 4096; // Telegram's max message length
2322

@@ -137,57 +136,49 @@ Thread of Tweets You Are Replying To:
137136
export class MessageManager {
138137
public bot: Telegraf<Context>;
139138
private runtime: IAgentRuntime;
140-
private imageService: IImageDescriptionService;
141139

142140
constructor(bot: Telegraf<Context>, runtime: IAgentRuntime) {
143141
this.bot = bot;
144142
this.runtime = runtime;
145-
this.imageService = ImageDescriptionService.getInstance();
146143
}
147144

148145
// Process image messages and generate descriptions
149146
private async processImage(
150147
message: Message
151148
): Promise<{ description: string } | null> {
152-
// console.log(
153-
// "🖼️ Processing image message:",
154-
// JSON.stringify(message, null, 2)
155-
// );
156-
157149
try {
158150
let imageUrl: string | null = null;
159151

160-
// Handle photo messages
161152
if ("photo" in message && message.photo?.length > 0) {
162153
const photo = message.photo[message.photo.length - 1];
163154
const fileLink = await this.bot.telegram.getFileLink(
164155
photo.file_id
165156
);
166157
imageUrl = fileLink.toString();
167-
}
168-
// Handle image documents
169-
else if (
158+
} else if (
170159
"document" in message &&
171160
message.document?.mime_type?.startsWith("image/")
172161
) {
173-
const doc = message.document;
174162
const fileLink = await this.bot.telegram.getFileLink(
175-
doc.file_id
163+
message.document.file_id
176164
);
177165
imageUrl = fileLink.toString();
178166
}
179167

180168
if (imageUrl) {
169+
const imageDescriptionService =
170+
this.runtime.getService<IImageDescriptionService>(
171+
ServiceType.IMAGE_DESCRIPTION
172+
);
181173
const { title, description } =
182-
await this.imageService.describeImage(imageUrl);
183-
const fullDescription = `[Image: ${title}\n${description}]`;
184-
return { description: fullDescription };
174+
await imageDescriptionService.describeImage(imageUrl);
175+
return { description: `[Image: ${title}\n${description}]` };
185176
}
186177
} catch (error) {
187178
console.error("❌ Error processing image:", error);
188179
}
189180

190-
return null; // No image found
181+
return null;
191182
}
192183

193184
// Decide if the bot should respond to the message
@@ -196,7 +187,6 @@ export class MessageManager {
196187
state: State
197188
): Promise<boolean> {
198189
// Respond if bot is mentioned
199-
200190
if (
201191
"text" in message &&
202192
message.text?.includes(`@${this.bot.botInfo?.username}`)
@@ -209,7 +199,7 @@ export class MessageManager {
209199
return true;
210200
}
211201

212-
// Respond to images in group chats
202+
// Don't respond to images in group chats
213203
if (
214204
"photo" in message ||
215205
("document" in message &&
@@ -238,7 +228,7 @@ export class MessageManager {
238228
return response === "RESPOND";
239229
}
240230

241-
return false; // No criteria met
231+
return false;
242232
}
243233

244234
// Send long messages in chunks
@@ -291,7 +281,7 @@ export class MessageManager {
291281
// Generate a response using AI
292282
private async _generateResponse(
293283
message: Memory,
294-
state: State,
284+
_state: State,
295285
context: string
296286
): Promise<Content> {
297287
const { userId, roomId } = message;
@@ -306,9 +296,10 @@ export class MessageManager {
306296
console.error("❌ No response from generateMessageResponse");
307297
return null;
308298
}
299+
309300
await this.runtime.databaseAdapter.log({
310301
body: { message, context, response },
311-
userId: userId,
302+
userId,
312303
roomId,
313304
type: "response",
314305
});
@@ -342,14 +333,23 @@ export class MessageManager {
342333
try {
343334
// Convert IDs to UUIDs
344335
const userId = stringToUuid(ctx.from.id.toString()) as UUID;
336+
337+
// Get user name
345338
const userName =
346339
ctx.from.username || ctx.from.first_name || "Unknown User";
340+
341+
// Get chat ID
347342
const chatId = stringToUuid(
348343
ctx.chat?.id.toString() + "-" + this.runtime.agentId
349344
) as UUID;
345+
346+
// Get agent ID
350347
const agentId = this.runtime.agentId;
348+
349+
// Get room ID
351350
const roomId = chatId;
352351

352+
// Ensure connection
353353
await this.runtime.ensureConnection(
354354
userId,
355355
roomId,
@@ -358,6 +358,7 @@ export class MessageManager {
358358
"telegram"
359359
);
360360

361+
// Get message ID
361362
const messageId = stringToUuid(
362363
message.message_id.toString() + "-" + this.runtime.agentId
363364
) as UUID;
@@ -382,17 +383,18 @@ export class MessageManager {
382383
return; // Skip if no content
383384
}
384385

386+
// Create content
385387
const content: Content = {
386388
text: fullText,
387389
source: "telegram",
388-
// inReplyTo:
389-
// "reply_to_message" in message && message.reply_to_message
390-
// ? stringToUuid(
391-
// message.reply_to_message.message_id.toString() +
392-
// "-" +
393-
// this.runtime.agentId
394-
// )
395-
// : undefined,
390+
inReplyTo:
391+
"reply_to_message" in message && message.reply_to_message
392+
? stringToUuid(
393+
message.reply_to_message.message_id.toString() +
394+
"-" +
395+
this.runtime.agentId
396+
)
397+
: undefined,
396398
};
397399

398400
// Create memory for the message
@@ -406,6 +408,7 @@ export class MessageManager {
406408
embedding: embeddingZeroVector,
407409
};
408410

411+
// Create memory
409412
await this.runtime.messageManager.createMemory(memory);
410413

411414
// Update state with the new memory
@@ -498,8 +501,8 @@ export class MessageManager {
498501

499502
await this.runtime.evaluate(memory, state, shouldRespond);
500503
} catch (error) {
501-
console.error("❌ Error handling message:", error);
502-
console.error("Error sending message:", error);
504+
elizaLogger.error("❌ Error handling message:", error);
505+
elizaLogger.error("Error sending message:", error);
503506
}
504507
}
505508
}

packages/plugin-node/src/services/llama.ts

+10-11
Original file line numberDiff line numberDiff line change
@@ -189,22 +189,21 @@ export class LlamaService extends Service {
189189
}
190190

191191
async initialize(runtime: IAgentRuntime): Promise<void> {
192-
elizaLogger.info("Initializing LlamaService...");
193192
try {
194-
// Check if we should use Ollama
195-
if (runtime.modelProvider === ModelProviderName.OLLAMA) {
196-
elizaLogger.info("Using Ollama provider");
197-
this.modelInitialized = true;
193+
if (runtime.modelProvider === ModelProviderName.LLAMALOCAL) {
194+
elizaLogger.info("Initializing LlamaService...");
195+
elizaLogger.info("Using local GGUF model");
196+
elizaLogger.info("Ensuring model is initialized...");
197+
await this.ensureInitialized();
198+
elizaLogger.success("LlamaService initialized successfully");
199+
} else {
200+
elizaLogger.info(
201+
"Not using local model, skipping initialization"
202+
);
198203
return;
199204
}
200-
201-
elizaLogger.info("Using local GGUF model");
202-
elizaLogger.info("Ensuring model is initialized...");
203-
await this.ensureInitialized();
204-
elizaLogger.success("LlamaService initialized successfully");
205205
} catch (error) {
206206
elizaLogger.error("Failed to initialize LlamaService:", error);
207-
// Re-throw with more context
208207
throw new Error(
209208
`LlamaService initialization failed: ${error.message}`
210209
);

0 commit comments

Comments
 (0)