@@ -69,22 +69,22 @@ export async function generateText({
69
69
const apiKey = runtime . token ;
70
70
71
71
try {
72
- elizaLogger . log (
72
+ elizaLogger . debug (
73
73
`Trimming context to max length of ${ max_context_length } tokens.`
74
74
) ;
75
75
context = await trimTokens ( context , max_context_length , "gpt-4o" ) ;
76
76
77
77
let response : string ;
78
78
79
79
const _stop = stop || models [ provider ] . settings . stop ;
80
- elizaLogger . log (
80
+ elizaLogger . debug (
81
81
`Using provider: ${ provider } , model: ${ model } , temperature: ${ temperature } , max response length: ${ max_response_length } `
82
82
) ;
83
83
84
84
switch ( provider ) {
85
85
case ModelProviderName . OPENAI :
86
86
case ModelProviderName . LLAMACLOUD : {
87
- elizaLogger . log ( "Initializing OpenAI model." ) ;
87
+ elizaLogger . debug ( "Initializing OpenAI model." ) ;
88
88
const openai = createOpenAI ( { apiKey, baseURL : endpoint } ) ;
89
89
90
90
const { text : openaiResponse } = await aiGenerateText ( {
@@ -101,7 +101,7 @@ export async function generateText({
101
101
} ) ;
102
102
103
103
response = openaiResponse ;
104
- elizaLogger . log ( "Received response from OpenAI model." ) ;
104
+ elizaLogger . debug ( "Received response from OpenAI model." ) ;
105
105
break ;
106
106
}
107
107
@@ -125,7 +125,7 @@ export async function generateText({
125
125
break ; }
126
126
127
127
case ModelProviderName . ANTHROPIC : {
128
- elizaLogger . log ( "Initializing Anthropic model." ) ;
128
+ elizaLogger . debug ( "Initializing Anthropic model." ) ;
129
129
130
130
const anthropic = createAnthropic ( { apiKey } ) ;
131
131
@@ -143,12 +143,12 @@ export async function generateText({
143
143
} ) ;
144
144
145
145
response = anthropicResponse ;
146
- elizaLogger . log ( "Received response from Anthropic model." ) ;
146
+ elizaLogger . debug ( "Received response from Anthropic model." ) ;
147
147
break ;
148
148
}
149
149
150
150
case ModelProviderName . GROK : {
151
- elizaLogger . log ( "Initializing Grok model." ) ;
151
+ elizaLogger . debug ( "Initializing Grok model." ) ;
152
152
const grok = createOpenAI ( { apiKey, baseURL : endpoint } ) ;
153
153
154
154
const { text : grokResponse } = await aiGenerateText ( {
@@ -167,7 +167,7 @@ export async function generateText({
167
167
} ) ;
168
168
169
169
response = grokResponse ;
170
- elizaLogger . log ( "Received response from Grok model." ) ;
170
+ elizaLogger . debug ( "Received response from Grok model." ) ;
171
171
break ;
172
172
}
173
173
@@ -194,7 +194,7 @@ export async function generateText({
194
194
}
195
195
196
196
case ModelProviderName . LLAMALOCAL : {
197
- elizaLogger . log ( "Using local Llama model for text completion." ) ;
197
+ elizaLogger . debug ( "Using local Llama model for text completion." ) ;
198
198
response = await runtime
199
199
. getService < ITextGenerationService > (
200
200
ServiceType . TEXT_GENERATION
@@ -207,12 +207,12 @@ export async function generateText({
207
207
presence_penalty ,
208
208
max_response_length
209
209
) ;
210
- elizaLogger . log ( "Received response from local Llama model." ) ;
210
+ elizaLogger . debug ( "Received response from local Llama model." ) ;
211
211
break ;
212
212
}
213
213
214
214
case ModelProviderName . REDPILL : {
215
- elizaLogger . log ( "Initializing RedPill model." ) ;
215
+ elizaLogger . debug ( "Initializing RedPill model." ) ;
216
216
const serverUrl = models [ provider ] . endpoint ;
217
217
const openai = createOpenAI ( { apiKey, baseURL : serverUrl } ) ;
218
218
@@ -230,12 +230,12 @@ export async function generateText({
230
230
} ) ;
231
231
232
232
response = openaiResponse ;
233
- elizaLogger . log ( "Received response from OpenAI model." ) ;
233
+ elizaLogger . debug ( "Received response from OpenAI model." ) ;
234
234
break ;
235
235
}
236
236
237
237
case ModelProviderName . OPENROUTER : {
238
- elizaLogger . log ( "Initializing OpenRouter model." ) ;
238
+ elizaLogger . debug ( "Initializing OpenRouter model." ) ;
239
239
const serverUrl = models [ provider ] . endpoint ;
240
240
const openrouter = createOpenAI ( { apiKey, baseURL : serverUrl } ) ;
241
241
@@ -253,20 +253,20 @@ export async function generateText({
253
253
} ) ;
254
254
255
255
response = openrouterResponse ;
256
- elizaLogger . log ( "Received response from OpenRouter model." ) ;
256
+ elizaLogger . debug ( "Received response from OpenRouter model." ) ;
257
257
break ;
258
258
}
259
259
260
260
case ModelProviderName . OLLAMA :
261
261
{
262
- console . log ( "Initializing Ollama model." ) ;
262
+ console . debug ( "Initializing Ollama model." ) ;
263
263
264
264
const ollamaProvider = createOllama ( {
265
265
baseURL : models [ provider ] . endpoint + "/api" ,
266
266
} ) ;
267
267
const ollama = ollamaProvider ( model ) ;
268
268
269
- console . log ( "****** MODEL\n" , model ) ;
269
+ console . debug ( "****** MODEL\n" , model ) ;
270
270
271
271
const { text : ollamaResponse } = await aiGenerateText ( {
272
272
model : ollama ,
@@ -279,7 +279,7 @@ export async function generateText({
279
279
280
280
response = ollamaResponse ;
281
281
}
282
- console . log ( "Received response from Ollama model." ) ;
282
+ console . debug ( "Received response from Ollama model." ) ;
283
283
break ;
284
284
285
285
default : {
@@ -341,7 +341,7 @@ export async function generateShouldRespond({
341
341
let retryDelay = 1000 ;
342
342
while ( true ) {
343
343
try {
344
- elizaLogger . log (
344
+ elizaLogger . debug (
345
345
"Attempting to generate text with context:" ,
346
346
context
347
347
) ;
@@ -351,13 +351,13 @@ export async function generateShouldRespond({
351
351
modelClass,
352
352
} ) ;
353
353
354
- elizaLogger . log ( "Received response from generateText:" , response ) ;
354
+ elizaLogger . debug ( "Received response from generateText:" , response ) ;
355
355
const parsedResponse = parseShouldRespondFromText ( response . trim ( ) ) ;
356
356
if ( parsedResponse ) {
357
- elizaLogger . log ( "Parsed response:" , parsedResponse ) ;
357
+ elizaLogger . debug ( "Parsed response:" , parsedResponse ) ;
358
358
return parsedResponse ;
359
359
} else {
360
- elizaLogger . log ( "generateShouldRespond no response" ) ;
360
+ elizaLogger . debug ( "generateShouldRespond no response" ) ;
361
361
}
362
362
} catch ( error ) {
363
363
elizaLogger . error ( "Error in generateShouldRespond:" , error ) ;
@@ -640,7 +640,7 @@ export async function generateMessageResponse({
640
640
// try parsing the response as JSON, if null then try again
641
641
const parsedContent = parseJSONObjectFromText ( response ) as Content ;
642
642
if ( ! parsedContent ) {
643
- elizaLogger . log ( "parsedContent is null, retrying" ) ;
643
+ elizaLogger . debug ( "parsedContent is null, retrying" ) ;
644
644
continue ;
645
645
}
646
646
@@ -650,7 +650,7 @@ export async function generateMessageResponse({
650
650
// wait for 2 seconds
651
651
retryLength *= 2 ;
652
652
await new Promise ( ( resolve ) => setTimeout ( resolve , retryLength ) ) ;
653
- elizaLogger . log ( "Retrying..." ) ;
653
+ elizaLogger . debug ( "Retrying..." ) ;
654
654
}
655
655
}
656
656
}
0 commit comments