Skip to content

Commit 5a75db9

Browse files
committed
working
1 parent 177d343 commit 5a75db9

File tree

6 files changed

+696
-158
lines changed

6 files changed

+696
-158
lines changed

characters/eliza-code-assistant.character.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@
6060
"Focus on official Eliza documentation and GitHub resources",
6161
"Encourage best practices and contribution guidelines"
6262
],
63-
"knowledge": ["docs/", "CONTRIBUTING.md", "README.md"],
63+
"knowledge": ["docs/*", "CONTRIBUTING.md", "README.md"],
6464
"examples": [
6565
[
6666
{

package.json

+3-3
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@
44
"preinstall": "npx only-allow pnpm",
55
"build": "turbo run build --filter=!eliza-docs",
66
"build-docker": "turbo run build",
7-
"cleanstart": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && pnpm --filter \"@elizaos/agent\" start --isRoot",
8-
"cleandev": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && pnpm --filter \"@elizaos/agent\" run dev --isRoot",
9-
"cleanstart:debug": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot",
7+
"cleanstart": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && pnpm build && pnpm --filter \"@elizaos/agent\" start --isRoot",
8+
"cleandev": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && bash ./scripts/dev.sh --characters=../characters/eliza-code-assistant.character.json",
9+
"cleanstart:debug": "if [ -f agent/data/db.sqlite ]; then rm agent/data/db.sqlite; fi && pnpm build && cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot",
1010
"start": "pnpm --filter \"@elizaos/agent\" start --isRoot",
1111
"start:client": "pnpm --dir client dev",
1212
"start:debug": "cross-env NODE_ENV=development VERBOSE=true DEBUG=eliza:* pnpm --filter \"@elizaos/agent\" start --isRoot",

packages/core/src/generation.ts

+234
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,24 @@ export async function generateText({
198198
presencePenalty: presence_penalty,
199199
});
200200

201+
console.log("\n[LLM Debug]", {
202+
sent: {
203+
prompt: context.slice(0, 200) + "...",
204+
system:
205+
runtime.character.system ??
206+
settings.SYSTEM_PROMPT ??
207+
undefined,
208+
model,
209+
temperature,
210+
maxTokens: max_response_length,
211+
},
212+
received: {
213+
response: openaiResponse?.slice(0, 200) + "...",
214+
responseType: typeof openaiResponse,
215+
responseLength: openaiResponse?.length,
216+
},
217+
});
218+
201219
response = openaiResponse;
202220
elizaLogger.debug("Received response from OpenAI model.");
203221
break;
@@ -221,6 +239,24 @@ export async function generateText({
221239
presencePenalty: presence_penalty,
222240
});
223241

242+
console.log("\n[LLM Debug]", {
243+
sent: {
244+
prompt: context.slice(0, 200) + "...",
245+
system:
246+
runtime.character.system ??
247+
settings.SYSTEM_PROMPT ??
248+
undefined,
249+
model,
250+
temperature,
251+
maxTokens: max_response_length,
252+
},
253+
received: {
254+
response: googleResponse?.slice(0, 200) + "...",
255+
responseType: typeof googleResponse,
256+
responseLength: googleResponse?.length,
257+
},
258+
});
259+
224260
response = googleResponse;
225261
elizaLogger.debug("Received response from Google model.");
226262
break;
@@ -247,6 +283,24 @@ export async function generateText({
247283
presencePenalty: presence_penalty,
248284
});
249285

286+
console.log("\n[LLM Debug]", {
287+
sent: {
288+
prompt: context.slice(0, 200) + "...",
289+
system:
290+
runtime.character.system ??
291+
settings.SYSTEM_PROMPT ??
292+
undefined,
293+
model,
294+
temperature,
295+
maxTokens: max_response_length,
296+
},
297+
received: {
298+
response: anthropicResponse?.slice(0, 200) + "...",
299+
responseType: typeof anthropicResponse,
300+
responseLength: anthropicResponse?.length,
301+
},
302+
});
303+
250304
response = anthropicResponse;
251305
elizaLogger.debug("Received response from Anthropic model.");
252306
break;
@@ -273,6 +327,24 @@ export async function generateText({
273327
presencePenalty: presence_penalty,
274328
});
275329

330+
console.log("\n[LLM Debug]", {
331+
sent: {
332+
prompt: context.slice(0, 200) + "...",
333+
system:
334+
runtime.character.system ??
335+
settings.SYSTEM_PROMPT ??
336+
undefined,
337+
model,
338+
temperature,
339+
maxTokens: max_response_length,
340+
},
341+
received: {
342+
response: anthropicResponse?.slice(0, 200) + "...",
343+
responseType: typeof anthropicResponse,
344+
responseLength: anthropicResponse?.length,
345+
},
346+
});
347+
276348
response = anthropicResponse;
277349
elizaLogger.debug(
278350
"Received response from Claude Vertex model."
@@ -303,6 +375,24 @@ export async function generateText({
303375
presencePenalty: presence_penalty,
304376
});
305377

378+
console.log("\n[LLM Debug]", {
379+
sent: {
380+
prompt: context.slice(0, 200) + "...",
381+
system:
382+
runtime.character.system ??
383+
settings.SYSTEM_PROMPT ??
384+
undefined,
385+
model,
386+
temperature,
387+
maxTokens: max_response_length,
388+
},
389+
received: {
390+
response: grokResponse?.slice(0, 200) + "...",
391+
responseType: typeof grokResponse,
392+
responseLength: grokResponse?.length,
393+
},
394+
});
395+
306396
response = grokResponse;
307397
elizaLogger.debug("Received response from Grok model.");
308398
break;
@@ -324,6 +414,24 @@ export async function generateText({
324414
presencePenalty: presence_penalty,
325415
});
326416

417+
console.log("\n[LLM Debug]", {
418+
sent: {
419+
prompt: context.slice(0, 200) + "...",
420+
system:
421+
runtime.character.system ??
422+
settings.SYSTEM_PROMPT ??
423+
undefined,
424+
model,
425+
temperature,
426+
maxTokens: max_response_length,
427+
},
428+
received: {
429+
response: groqResponse?.slice(0, 200) + "...",
430+
responseType: typeof groqResponse,
431+
responseLength: groqResponse?.length,
432+
},
433+
});
434+
327435
response = groqResponse;
328436
break;
329437
}
@@ -375,6 +483,24 @@ export async function generateText({
375483
presencePenalty: presence_penalty,
376484
});
377485

486+
console.log("\n[LLM Debug]", {
487+
sent: {
488+
prompt: context.slice(0, 200) + "...",
489+
system:
490+
runtime.character.system ??
491+
settings.SYSTEM_PROMPT ??
492+
undefined,
493+
model,
494+
temperature,
495+
maxTokens: max_response_length,
496+
},
497+
received: {
498+
response: redpillResponse?.slice(0, 200) + "...",
499+
responseType: typeof redpillResponse,
500+
responseLength: redpillResponse?.length,
501+
},
502+
});
503+
378504
response = redpillResponse;
379505
elizaLogger.debug("Received response from redpill model.");
380506
break;
@@ -402,6 +528,24 @@ export async function generateText({
402528
presencePenalty: presence_penalty,
403529
});
404530

531+
console.log("\n[LLM Debug]", {
532+
sent: {
533+
prompt: context.slice(0, 200) + "...",
534+
system:
535+
runtime.character.system ??
536+
settings.SYSTEM_PROMPT ??
537+
undefined,
538+
model,
539+
temperature,
540+
maxTokens: max_response_length,
541+
},
542+
received: {
543+
response: openrouterResponse?.slice(0, 200) + "...",
544+
responseType: typeof openrouterResponse,
545+
responseLength: openrouterResponse?.length,
546+
},
547+
});
548+
405549
response = openrouterResponse;
406550
elizaLogger.debug("Received response from OpenRouter model.");
407551
break;
@@ -428,6 +572,24 @@ export async function generateText({
428572
presencePenalty: presence_penalty,
429573
});
430574

575+
console.log("\n[LLM Debug]", {
576+
sent: {
577+
prompt: context.slice(0, 200) + "...",
578+
system:
579+
runtime.character.system ??
580+
settings.SYSTEM_PROMPT ??
581+
undefined,
582+
model,
583+
temperature,
584+
maxTokens: max_response_length,
585+
},
586+
received: {
587+
response: ollamaResponse?.slice(0, 200) + "...",
588+
responseType: typeof ollamaResponse,
589+
responseLength: ollamaResponse?.length,
590+
},
591+
});
592+
431593
response = ollamaResponse;
432594
}
433595
elizaLogger.debug("Received response from Ollama model.");
@@ -454,6 +616,24 @@ export async function generateText({
454616
presencePenalty: presence_penalty,
455617
});
456618

619+
console.log("\n[LLM Debug]", {
620+
sent: {
621+
prompt: context.slice(0, 200) + "...",
622+
system:
623+
runtime.character.system ??
624+
settings.SYSTEM_PROMPT ??
625+
undefined,
626+
model,
627+
temperature,
628+
maxTokens: max_response_length,
629+
},
630+
received: {
631+
response: heuristResponse?.slice(0, 200) + "...",
632+
responseType: typeof heuristResponse,
633+
responseLength: heuristResponse?.length,
634+
},
635+
});
636+
457637
response = heuristResponse;
458638
elizaLogger.debug("Received response from Heurist model.");
459639
break;
@@ -503,6 +683,24 @@ export async function generateText({
503683
presencePenalty: presence_penalty,
504684
});
505685

686+
console.log("\n[LLM Debug]", {
687+
sent: {
688+
prompt: context.slice(0, 200) + "...",
689+
system:
690+
runtime.character.system ??
691+
settings.SYSTEM_PROMPT ??
692+
undefined,
693+
model,
694+
temperature,
695+
maxTokens: max_response_length,
696+
},
697+
received: {
698+
response: openaiResponse?.slice(0, 200) + "...",
699+
responseType: typeof openaiResponse,
700+
responseLength: openaiResponse?.length,
701+
},
702+
});
703+
506704
response = openaiResponse;
507705
elizaLogger.debug("Received response from GAIANET model.");
508706
break;
@@ -529,6 +727,24 @@ export async function generateText({
529727
presencePenalty: presence_penalty,
530728
});
531729

730+
console.log("\n[LLM Debug]", {
731+
sent: {
732+
prompt: context.slice(0, 200) + "...",
733+
system:
734+
runtime.character.system ??
735+
settings.SYSTEM_PROMPT ??
736+
undefined,
737+
model,
738+
temperature,
739+
maxTokens: max_response_length,
740+
},
741+
received: {
742+
response: galadrielResponse?.slice(0, 200) + "...",
743+
responseType: typeof galadrielResponse,
744+
responseLength: galadrielResponse?.length,
745+
},
746+
});
747+
532748
response = galadrielResponse;
533749
elizaLogger.debug("Received response from Galadriel model.");
534750
break;
@@ -552,6 +768,24 @@ export async function generateText({
552768
maxTokens: max_response_length,
553769
});
554770

771+
console.log("\n[LLM Debug]", {
772+
sent: {
773+
prompt: context.slice(0, 200) + "...",
774+
system:
775+
runtime.character.system ??
776+
settings.SYSTEM_PROMPT ??
777+
undefined,
778+
model,
779+
temperature,
780+
maxTokens: max_response_length,
781+
},
782+
received: {
783+
response: veniceResponse?.slice(0, 200) + "...",
784+
responseType: typeof veniceResponse,
785+
responseLength: veniceResponse?.length,
786+
},
787+
});
788+
555789
response = veniceResponse;
556790
elizaLogger.debug("Received response from Venice model.");
557791
break;

packages/core/src/knowledge.ts

+8
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,14 @@ async function get(
99
runtime: AgentRuntime,
1010
message: Memory
1111
): Promise<KnowledgeItem[]> {
12+
// Skip knowledge search if flag is set
13+
if (message?.metadata?.skipKnowledge) {
14+
elizaLogger.debug(
15+
"Skipping knowledge search due to skipKnowledge flag"
16+
);
17+
return [];
18+
}
19+
1220
// Add validation for message
1321
if (!message?.content?.text) {
1422
elizaLogger.warn("Invalid message for knowledge query:", {

packages/core/src/types.ts

+3
Original file line numberDiff line numberDiff line change
@@ -355,6 +355,9 @@ export interface Memory {
355355

356356
/** Embedding similarity score */
357357
similarity?: number;
358+
359+
/** Optional metadata */
360+
metadata?: { [key: string]: any };
358361
}
359362

360363
/**

0 commit comments

Comments
 (0)