@@ -198,24 +198,6 @@ export async function generateText({
198
198
presencePenalty : presence_penalty ,
199
199
} ) ;
200
200
201
- console . log ( "\n[LLM Debug]" , {
202
- sent : {
203
- prompt : context . slice ( 0 , 200 ) + "..." ,
204
- system :
205
- runtime . character . system ??
206
- settings . SYSTEM_PROMPT ??
207
- undefined ,
208
- model,
209
- temperature,
210
- maxTokens : max_response_length ,
211
- } ,
212
- received : {
213
- response : openaiResponse ?. slice ( 0 , 200 ) + "..." ,
214
- responseType : typeof openaiResponse ,
215
- responseLength : openaiResponse ?. length ,
216
- } ,
217
- } ) ;
218
-
219
201
response = openaiResponse ;
220
202
elizaLogger . debug ( "Received response from OpenAI model." ) ;
221
203
break ;
@@ -239,24 +221,6 @@ export async function generateText({
239
221
presencePenalty : presence_penalty ,
240
222
} ) ;
241
223
242
- console . log ( "\n[LLM Debug]" , {
243
- sent : {
244
- prompt : context . slice ( 0 , 200 ) + "..." ,
245
- system :
246
- runtime . character . system ??
247
- settings . SYSTEM_PROMPT ??
248
- undefined ,
249
- model,
250
- temperature,
251
- maxTokens : max_response_length ,
252
- } ,
253
- received : {
254
- response : googleResponse ?. slice ( 0 , 200 ) + "..." ,
255
- responseType : typeof googleResponse ,
256
- responseLength : googleResponse ?. length ,
257
- } ,
258
- } ) ;
259
-
260
224
response = googleResponse ;
261
225
elizaLogger . debug ( "Received response from Google model." ) ;
262
226
break ;
@@ -283,24 +247,6 @@ export async function generateText({
283
247
presencePenalty : presence_penalty ,
284
248
} ) ;
285
249
286
- console . log ( "\n[LLM Debug]" , {
287
- sent : {
288
- prompt : context . slice ( 0 , 200 ) + "..." ,
289
- system :
290
- runtime . character . system ??
291
- settings . SYSTEM_PROMPT ??
292
- undefined ,
293
- model,
294
- temperature,
295
- maxTokens : max_response_length ,
296
- } ,
297
- received : {
298
- response : anthropicResponse ?. slice ( 0 , 200 ) + "..." ,
299
- responseType : typeof anthropicResponse ,
300
- responseLength : anthropicResponse ?. length ,
301
- } ,
302
- } ) ;
303
-
304
250
response = anthropicResponse ;
305
251
elizaLogger . debug ( "Received response from Anthropic model." ) ;
306
252
break ;
@@ -327,24 +273,6 @@ export async function generateText({
327
273
presencePenalty : presence_penalty ,
328
274
} ) ;
329
275
330
- console . log ( "\n[LLM Debug]" , {
331
- sent : {
332
- prompt : context . slice ( 0 , 200 ) + "..." ,
333
- system :
334
- runtime . character . system ??
335
- settings . SYSTEM_PROMPT ??
336
- undefined ,
337
- model,
338
- temperature,
339
- maxTokens : max_response_length ,
340
- } ,
341
- received : {
342
- response : anthropicResponse ?. slice ( 0 , 200 ) + "..." ,
343
- responseType : typeof anthropicResponse ,
344
- responseLength : anthropicResponse ?. length ,
345
- } ,
346
- } ) ;
347
-
348
276
response = anthropicResponse ;
349
277
elizaLogger . debug (
350
278
"Received response from Claude Vertex model."
@@ -375,24 +303,6 @@ export async function generateText({
375
303
presencePenalty : presence_penalty ,
376
304
} ) ;
377
305
378
- console . log ( "\n[LLM Debug]" , {
379
- sent : {
380
- prompt : context . slice ( 0 , 200 ) + "..." ,
381
- system :
382
- runtime . character . system ??
383
- settings . SYSTEM_PROMPT ??
384
- undefined ,
385
- model,
386
- temperature,
387
- maxTokens : max_response_length ,
388
- } ,
389
- received : {
390
- response : grokResponse ?. slice ( 0 , 200 ) + "..." ,
391
- responseType : typeof grokResponse ,
392
- responseLength : grokResponse ?. length ,
393
- } ,
394
- } ) ;
395
-
396
306
response = grokResponse ;
397
307
elizaLogger . debug ( "Received response from Grok model." ) ;
398
308
break ;
@@ -414,24 +324,6 @@ export async function generateText({
414
324
presencePenalty : presence_penalty ,
415
325
} ) ;
416
326
417
- console . log ( "\n[LLM Debug]" , {
418
- sent : {
419
- prompt : context . slice ( 0 , 200 ) + "..." ,
420
- system :
421
- runtime . character . system ??
422
- settings . SYSTEM_PROMPT ??
423
- undefined ,
424
- model,
425
- temperature,
426
- maxTokens : max_response_length ,
427
- } ,
428
- received : {
429
- response : groqResponse ?. slice ( 0 , 200 ) + "..." ,
430
- responseType : typeof groqResponse ,
431
- responseLength : groqResponse ?. length ,
432
- } ,
433
- } ) ;
434
-
435
327
response = groqResponse ;
436
328
break ;
437
329
}
@@ -483,24 +375,6 @@ export async function generateText({
483
375
presencePenalty : presence_penalty ,
484
376
} ) ;
485
377
486
- console . log ( "\n[LLM Debug]" , {
487
- sent : {
488
- prompt : context . slice ( 0 , 200 ) + "..." ,
489
- system :
490
- runtime . character . system ??
491
- settings . SYSTEM_PROMPT ??
492
- undefined ,
493
- model,
494
- temperature,
495
- maxTokens : max_response_length ,
496
- } ,
497
- received : {
498
- response : redpillResponse ?. slice ( 0 , 200 ) + "..." ,
499
- responseType : typeof redpillResponse ,
500
- responseLength : redpillResponse ?. length ,
501
- } ,
502
- } ) ;
503
-
504
378
response = redpillResponse ;
505
379
elizaLogger . debug ( "Received response from redpill model." ) ;
506
380
break ;
@@ -528,24 +402,6 @@ export async function generateText({
528
402
presencePenalty : presence_penalty ,
529
403
} ) ;
530
404
531
- console . log ( "\n[LLM Debug]" , {
532
- sent : {
533
- prompt : context . slice ( 0 , 200 ) + "..." ,
534
- system :
535
- runtime . character . system ??
536
- settings . SYSTEM_PROMPT ??
537
- undefined ,
538
- model,
539
- temperature,
540
- maxTokens : max_response_length ,
541
- } ,
542
- received : {
543
- response : openrouterResponse ?. slice ( 0 , 200 ) + "..." ,
544
- responseType : typeof openrouterResponse ,
545
- responseLength : openrouterResponse ?. length ,
546
- } ,
547
- } ) ;
548
-
549
405
response = openrouterResponse ;
550
406
elizaLogger . debug ( "Received response from OpenRouter model." ) ;
551
407
break ;
@@ -572,24 +428,6 @@ export async function generateText({
572
428
presencePenalty : presence_penalty ,
573
429
} ) ;
574
430
575
- console . log ( "\n[LLM Debug]" , {
576
- sent : {
577
- prompt : context . slice ( 0 , 200 ) + "..." ,
578
- system :
579
- runtime . character . system ??
580
- settings . SYSTEM_PROMPT ??
581
- undefined ,
582
- model,
583
- temperature,
584
- maxTokens : max_response_length ,
585
- } ,
586
- received : {
587
- response : ollamaResponse ?. slice ( 0 , 200 ) + "..." ,
588
- responseType : typeof ollamaResponse ,
589
- responseLength : ollamaResponse ?. length ,
590
- } ,
591
- } ) ;
592
-
593
431
response = ollamaResponse ;
594
432
}
595
433
elizaLogger . debug ( "Received response from Ollama model." ) ;
@@ -616,24 +454,6 @@ export async function generateText({
616
454
presencePenalty : presence_penalty ,
617
455
} ) ;
618
456
619
- console . log ( "\n[LLM Debug]" , {
620
- sent : {
621
- prompt : context . slice ( 0 , 200 ) + "..." ,
622
- system :
623
- runtime . character . system ??
624
- settings . SYSTEM_PROMPT ??
625
- undefined ,
626
- model,
627
- temperature,
628
- maxTokens : max_response_length ,
629
- } ,
630
- received : {
631
- response : heuristResponse ?. slice ( 0 , 200 ) + "..." ,
632
- responseType : typeof heuristResponse ,
633
- responseLength : heuristResponse ?. length ,
634
- } ,
635
- } ) ;
636
-
637
457
response = heuristResponse ;
638
458
elizaLogger . debug ( "Received response from Heurist model." ) ;
639
459
break ;
@@ -683,24 +503,6 @@ export async function generateText({
683
503
presencePenalty : presence_penalty ,
684
504
} ) ;
685
505
686
- console . log ( "\n[LLM Debug]" , {
687
- sent : {
688
- prompt : context . slice ( 0 , 200 ) + "..." ,
689
- system :
690
- runtime . character . system ??
691
- settings . SYSTEM_PROMPT ??
692
- undefined ,
693
- model,
694
- temperature,
695
- maxTokens : max_response_length ,
696
- } ,
697
- received : {
698
- response : openaiResponse ?. slice ( 0 , 200 ) + "..." ,
699
- responseType : typeof openaiResponse ,
700
- responseLength : openaiResponse ?. length ,
701
- } ,
702
- } ) ;
703
-
704
506
response = openaiResponse ;
705
507
elizaLogger . debug ( "Received response from GAIANET model." ) ;
706
508
break ;
@@ -727,24 +529,6 @@ export async function generateText({
727
529
presencePenalty : presence_penalty ,
728
530
} ) ;
729
531
730
- console . log ( "\n[LLM Debug]" , {
731
- sent : {
732
- prompt : context . slice ( 0 , 200 ) + "..." ,
733
- system :
734
- runtime . character . system ??
735
- settings . SYSTEM_PROMPT ??
736
- undefined ,
737
- model,
738
- temperature,
739
- maxTokens : max_response_length ,
740
- } ,
741
- received : {
742
- response : galadrielResponse ?. slice ( 0 , 200 ) + "..." ,
743
- responseType : typeof galadrielResponse ,
744
- responseLength : galadrielResponse ?. length ,
745
- } ,
746
- } ) ;
747
-
748
532
response = galadrielResponse ;
749
533
elizaLogger . debug ( "Received response from Galadriel model." ) ;
750
534
break ;
@@ -768,24 +552,6 @@ export async function generateText({
768
552
maxTokens : max_response_length ,
769
553
} ) ;
770
554
771
- console . log ( "\n[LLM Debug]" , {
772
- sent : {
773
- prompt : context . slice ( 0 , 200 ) + "..." ,
774
- system :
775
- runtime . character . system ??
776
- settings . SYSTEM_PROMPT ??
777
- undefined ,
778
- model,
779
- temperature,
780
- maxTokens : max_response_length ,
781
- } ,
782
- received : {
783
- response : veniceResponse ?. slice ( 0 , 200 ) + "..." ,
784
- responseType : typeof veniceResponse ,
785
- responseLength : veniceResponse ?. length ,
786
- } ,
787
- } ) ;
788
-
789
555
response = veniceResponse ;
790
556
elizaLogger . debug ( "Received response from Venice model." ) ;
791
557
break ;
0 commit comments