@@ -198,6 +198,24 @@ export async function generateText({
198
198
presencePenalty : presence_penalty ,
199
199
} ) ;
200
200
201
+ console . log ( "\n[LLM Debug]" , {
202
+ sent : {
203
+ prompt : context . slice ( 0 , 200 ) + "..." ,
204
+ system :
205
+ runtime . character . system ??
206
+ settings . SYSTEM_PROMPT ??
207
+ undefined ,
208
+ model,
209
+ temperature,
210
+ maxTokens : max_response_length ,
211
+ } ,
212
+ received : {
213
+ response : openaiResponse ?. slice ( 0 , 200 ) + "..." ,
214
+ responseType : typeof openaiResponse ,
215
+ responseLength : openaiResponse ?. length ,
216
+ } ,
217
+ } ) ;
218
+
201
219
response = openaiResponse ;
202
220
elizaLogger . debug ( "Received response from OpenAI model." ) ;
203
221
break ;
@@ -221,6 +239,24 @@ export async function generateText({
221
239
presencePenalty : presence_penalty ,
222
240
} ) ;
223
241
242
+ console . log ( "\n[LLM Debug]" , {
243
+ sent : {
244
+ prompt : context . slice ( 0 , 200 ) + "..." ,
245
+ system :
246
+ runtime . character . system ??
247
+ settings . SYSTEM_PROMPT ??
248
+ undefined ,
249
+ model,
250
+ temperature,
251
+ maxTokens : max_response_length ,
252
+ } ,
253
+ received : {
254
+ response : googleResponse ?. slice ( 0 , 200 ) + "..." ,
255
+ responseType : typeof googleResponse ,
256
+ responseLength : googleResponse ?. length ,
257
+ } ,
258
+ } ) ;
259
+
224
260
response = googleResponse ;
225
261
elizaLogger . debug ( "Received response from Google model." ) ;
226
262
break ;
@@ -247,6 +283,24 @@ export async function generateText({
247
283
presencePenalty : presence_penalty ,
248
284
} ) ;
249
285
286
+ console . log ( "\n[LLM Debug]" , {
287
+ sent : {
288
+ prompt : context . slice ( 0 , 200 ) + "..." ,
289
+ system :
290
+ runtime . character . system ??
291
+ settings . SYSTEM_PROMPT ??
292
+ undefined ,
293
+ model,
294
+ temperature,
295
+ maxTokens : max_response_length ,
296
+ } ,
297
+ received : {
298
+ response : anthropicResponse ?. slice ( 0 , 200 ) + "..." ,
299
+ responseType : typeof anthropicResponse ,
300
+ responseLength : anthropicResponse ?. length ,
301
+ } ,
302
+ } ) ;
303
+
250
304
response = anthropicResponse ;
251
305
elizaLogger . debug ( "Received response from Anthropic model." ) ;
252
306
break ;
@@ -273,6 +327,24 @@ export async function generateText({
273
327
presencePenalty : presence_penalty ,
274
328
} ) ;
275
329
330
+ console . log ( "\n[LLM Debug]" , {
331
+ sent : {
332
+ prompt : context . slice ( 0 , 200 ) + "..." ,
333
+ system :
334
+ runtime . character . system ??
335
+ settings . SYSTEM_PROMPT ??
336
+ undefined ,
337
+ model,
338
+ temperature,
339
+ maxTokens : max_response_length ,
340
+ } ,
341
+ received : {
342
+ response : anthropicResponse ?. slice ( 0 , 200 ) + "..." ,
343
+ responseType : typeof anthropicResponse ,
344
+ responseLength : anthropicResponse ?. length ,
345
+ } ,
346
+ } ) ;
347
+
276
348
response = anthropicResponse ;
277
349
elizaLogger . debug (
278
350
"Received response from Claude Vertex model."
@@ -303,6 +375,24 @@ export async function generateText({
303
375
presencePenalty : presence_penalty ,
304
376
} ) ;
305
377
378
+ console . log ( "\n[LLM Debug]" , {
379
+ sent : {
380
+ prompt : context . slice ( 0 , 200 ) + "..." ,
381
+ system :
382
+ runtime . character . system ??
383
+ settings . SYSTEM_PROMPT ??
384
+ undefined ,
385
+ model,
386
+ temperature,
387
+ maxTokens : max_response_length ,
388
+ } ,
389
+ received : {
390
+ response : grokResponse ?. slice ( 0 , 200 ) + "..." ,
391
+ responseType : typeof grokResponse ,
392
+ responseLength : grokResponse ?. length ,
393
+ } ,
394
+ } ) ;
395
+
306
396
response = grokResponse ;
307
397
elizaLogger . debug ( "Received response from Grok model." ) ;
308
398
break ;
@@ -324,6 +414,24 @@ export async function generateText({
324
414
presencePenalty : presence_penalty ,
325
415
} ) ;
326
416
417
+ console . log ( "\n[LLM Debug]" , {
418
+ sent : {
419
+ prompt : context . slice ( 0 , 200 ) + "..." ,
420
+ system :
421
+ runtime . character . system ??
422
+ settings . SYSTEM_PROMPT ??
423
+ undefined ,
424
+ model,
425
+ temperature,
426
+ maxTokens : max_response_length ,
427
+ } ,
428
+ received : {
429
+ response : groqResponse ?. slice ( 0 , 200 ) + "..." ,
430
+ responseType : typeof groqResponse ,
431
+ responseLength : groqResponse ?. length ,
432
+ } ,
433
+ } ) ;
434
+
327
435
response = groqResponse ;
328
436
break ;
329
437
}
@@ -375,6 +483,24 @@ export async function generateText({
375
483
presencePenalty : presence_penalty ,
376
484
} ) ;
377
485
486
+ console . log ( "\n[LLM Debug]" , {
487
+ sent : {
488
+ prompt : context . slice ( 0 , 200 ) + "..." ,
489
+ system :
490
+ runtime . character . system ??
491
+ settings . SYSTEM_PROMPT ??
492
+ undefined ,
493
+ model,
494
+ temperature,
495
+ maxTokens : max_response_length ,
496
+ } ,
497
+ received : {
498
+ response : redpillResponse ?. slice ( 0 , 200 ) + "..." ,
499
+ responseType : typeof redpillResponse ,
500
+ responseLength : redpillResponse ?. length ,
501
+ } ,
502
+ } ) ;
503
+
378
504
response = redpillResponse ;
379
505
elizaLogger . debug ( "Received response from redpill model." ) ;
380
506
break ;
@@ -402,6 +528,24 @@ export async function generateText({
402
528
presencePenalty : presence_penalty ,
403
529
} ) ;
404
530
531
+ console . log ( "\n[LLM Debug]" , {
532
+ sent : {
533
+ prompt : context . slice ( 0 , 200 ) + "..." ,
534
+ system :
535
+ runtime . character . system ??
536
+ settings . SYSTEM_PROMPT ??
537
+ undefined ,
538
+ model,
539
+ temperature,
540
+ maxTokens : max_response_length ,
541
+ } ,
542
+ received : {
543
+ response : openrouterResponse ?. slice ( 0 , 200 ) + "..." ,
544
+ responseType : typeof openrouterResponse ,
545
+ responseLength : openrouterResponse ?. length ,
546
+ } ,
547
+ } ) ;
548
+
405
549
response = openrouterResponse ;
406
550
elizaLogger . debug ( "Received response from OpenRouter model." ) ;
407
551
break ;
@@ -428,6 +572,24 @@ export async function generateText({
428
572
presencePenalty : presence_penalty ,
429
573
} ) ;
430
574
575
+ console . log ( "\n[LLM Debug]" , {
576
+ sent : {
577
+ prompt : context . slice ( 0 , 200 ) + "..." ,
578
+ system :
579
+ runtime . character . system ??
580
+ settings . SYSTEM_PROMPT ??
581
+ undefined ,
582
+ model,
583
+ temperature,
584
+ maxTokens : max_response_length ,
585
+ } ,
586
+ received : {
587
+ response : ollamaResponse ?. slice ( 0 , 200 ) + "..." ,
588
+ responseType : typeof ollamaResponse ,
589
+ responseLength : ollamaResponse ?. length ,
590
+ } ,
591
+ } ) ;
592
+
431
593
response = ollamaResponse ;
432
594
}
433
595
elizaLogger . debug ( "Received response from Ollama model." ) ;
@@ -454,6 +616,24 @@ export async function generateText({
454
616
presencePenalty : presence_penalty ,
455
617
} ) ;
456
618
619
+ console . log ( "\n[LLM Debug]" , {
620
+ sent : {
621
+ prompt : context . slice ( 0 , 200 ) + "..." ,
622
+ system :
623
+ runtime . character . system ??
624
+ settings . SYSTEM_PROMPT ??
625
+ undefined ,
626
+ model,
627
+ temperature,
628
+ maxTokens : max_response_length ,
629
+ } ,
630
+ received : {
631
+ response : heuristResponse ?. slice ( 0 , 200 ) + "..." ,
632
+ responseType : typeof heuristResponse ,
633
+ responseLength : heuristResponse ?. length ,
634
+ } ,
635
+ } ) ;
636
+
457
637
response = heuristResponse ;
458
638
elizaLogger . debug ( "Received response from Heurist model." ) ;
459
639
break ;
@@ -503,6 +683,24 @@ export async function generateText({
503
683
presencePenalty : presence_penalty ,
504
684
} ) ;
505
685
686
+ console . log ( "\n[LLM Debug]" , {
687
+ sent : {
688
+ prompt : context . slice ( 0 , 200 ) + "..." ,
689
+ system :
690
+ runtime . character . system ??
691
+ settings . SYSTEM_PROMPT ??
692
+ undefined ,
693
+ model,
694
+ temperature,
695
+ maxTokens : max_response_length ,
696
+ } ,
697
+ received : {
698
+ response : openaiResponse ?. slice ( 0 , 200 ) + "..." ,
699
+ responseType : typeof openaiResponse ,
700
+ responseLength : openaiResponse ?. length ,
701
+ } ,
702
+ } ) ;
703
+
506
704
response = openaiResponse ;
507
705
elizaLogger . debug ( "Received response from GAIANET model." ) ;
508
706
break ;
@@ -529,6 +727,24 @@ export async function generateText({
529
727
presencePenalty : presence_penalty ,
530
728
} ) ;
531
729
730
+ console . log ( "\n[LLM Debug]" , {
731
+ sent : {
732
+ prompt : context . slice ( 0 , 200 ) + "..." ,
733
+ system :
734
+ runtime . character . system ??
735
+ settings . SYSTEM_PROMPT ??
736
+ undefined ,
737
+ model,
738
+ temperature,
739
+ maxTokens : max_response_length ,
740
+ } ,
741
+ received : {
742
+ response : galadrielResponse ?. slice ( 0 , 200 ) + "..." ,
743
+ responseType : typeof galadrielResponse ,
744
+ responseLength : galadrielResponse ?. length ,
745
+ } ,
746
+ } ) ;
747
+
532
748
response = galadrielResponse ;
533
749
elizaLogger . debug ( "Received response from Galadriel model." ) ;
534
750
break ;
@@ -552,6 +768,24 @@ export async function generateText({
552
768
maxTokens : max_response_length ,
553
769
} ) ;
554
770
771
+ console . log ( "\n[LLM Debug]" , {
772
+ sent : {
773
+ prompt : context . slice ( 0 , 200 ) + "..." ,
774
+ system :
775
+ runtime . character . system ??
776
+ settings . SYSTEM_PROMPT ??
777
+ undefined ,
778
+ model,
779
+ temperature,
780
+ maxTokens : max_response_length ,
781
+ } ,
782
+ received : {
783
+ response : veniceResponse ?. slice ( 0 , 200 ) + "..." ,
784
+ responseType : typeof veniceResponse ,
785
+ responseLength : veniceResponse ?. length ,
786
+ } ,
787
+ } ) ;
788
+
555
789
response = veniceResponse ;
556
790
elizaLogger . debug ( "Received response from Venice model." ) ;
557
791
break ;
0 commit comments