Skip to content

Commit f9d32e0

Browse files
authored
Merge pull request #249 from v1xingyue/main
add verbose config with logger
2 parents 3062cc8 + b402b33 commit f9d32e0

File tree

2 files changed

+29
-23
lines changed

2 files changed

+29
-23
lines changed

packages/core/src/generation.ts

+23-23
Original file line numberDiff line numberDiff line change
@@ -69,22 +69,22 @@ export async function generateText({
6969
const apiKey = runtime.token;
7070

7171
try {
72-
elizaLogger.log(
72+
elizaLogger.debug(
7373
`Trimming context to max length of ${max_context_length} tokens.`
7474
);
7575
context = await trimTokens(context, max_context_length, "gpt-4o");
7676

7777
let response: string;
7878

7979
const _stop = stop || models[provider].settings.stop;
80-
elizaLogger.log(
80+
elizaLogger.debug(
8181
`Using provider: ${provider}, model: ${model}, temperature: ${temperature}, max response length: ${max_response_length}`
8282
);
8383

8484
switch (provider) {
8585
case ModelProviderName.OPENAI:
8686
case ModelProviderName.LLAMACLOUD: {
87-
elizaLogger.log("Initializing OpenAI model.");
87+
elizaLogger.debug("Initializing OpenAI model.");
8888
const openai = createOpenAI({ apiKey, baseURL: endpoint });
8989

9090
const { text: openaiResponse } = await aiGenerateText({
@@ -101,7 +101,7 @@ export async function generateText({
101101
});
102102

103103
response = openaiResponse;
104-
elizaLogger.log("Received response from OpenAI model.");
104+
elizaLogger.debug("Received response from OpenAI model.");
105105
break;
106106
}
107107

@@ -125,7 +125,7 @@ export async function generateText({
125125
break; }
126126

127127
case ModelProviderName.ANTHROPIC: {
128-
elizaLogger.log("Initializing Anthropic model.");
128+
elizaLogger.debug("Initializing Anthropic model.");
129129

130130
const anthropic = createAnthropic({ apiKey });
131131

@@ -143,12 +143,12 @@ export async function generateText({
143143
});
144144

145145
response = anthropicResponse;
146-
elizaLogger.log("Received response from Anthropic model.");
146+
elizaLogger.debug("Received response from Anthropic model.");
147147
break;
148148
}
149149

150150
case ModelProviderName.GROK: {
151-
elizaLogger.log("Initializing Grok model.");
151+
elizaLogger.debug("Initializing Grok model.");
152152
const grok = createOpenAI({ apiKey, baseURL: endpoint });
153153

154154
const { text: grokResponse } = await aiGenerateText({
@@ -167,7 +167,7 @@ export async function generateText({
167167
});
168168

169169
response = grokResponse;
170-
elizaLogger.log("Received response from Grok model.");
170+
elizaLogger.debug("Received response from Grok model.");
171171
break;
172172
}
173173

@@ -194,7 +194,7 @@ export async function generateText({
194194
}
195195

196196
case ModelProviderName.LLAMALOCAL: {
197-
elizaLogger.log("Using local Llama model for text completion.");
197+
elizaLogger.debug("Using local Llama model for text completion.");
198198
response = await runtime
199199
.getService<ITextGenerationService>(
200200
ServiceType.TEXT_GENERATION
@@ -207,12 +207,12 @@ export async function generateText({
207207
presence_penalty,
208208
max_response_length
209209
);
210-
elizaLogger.log("Received response from local Llama model.");
210+
elizaLogger.debug("Received response from local Llama model.");
211211
break;
212212
}
213213

214214
case ModelProviderName.REDPILL: {
215-
elizaLogger.log("Initializing RedPill model.");
215+
elizaLogger.debug("Initializing RedPill model.");
216216
const serverUrl = models[provider].endpoint;
217217
const openai = createOpenAI({ apiKey, baseURL: serverUrl });
218218

@@ -230,12 +230,12 @@ export async function generateText({
230230
});
231231

232232
response = openaiResponse;
233-
elizaLogger.log("Received response from OpenAI model.");
233+
elizaLogger.debug("Received response from OpenAI model.");
234234
break;
235235
}
236236

237237
case ModelProviderName.OPENROUTER: {
238-
elizaLogger.log("Initializing OpenRouter model.");
238+
elizaLogger.debug("Initializing OpenRouter model.");
239239
const serverUrl = models[provider].endpoint;
240240
const openrouter = createOpenAI({ apiKey, baseURL: serverUrl });
241241

@@ -253,20 +253,20 @@ export async function generateText({
253253
});
254254

255255
response = openrouterResponse;
256-
elizaLogger.log("Received response from OpenRouter model.");
256+
elizaLogger.debug("Received response from OpenRouter model.");
257257
break;
258258
}
259259

260260
case ModelProviderName.OLLAMA:
261261
{
262-
console.log("Initializing Ollama model.");
262+
console.debug("Initializing Ollama model.");
263263

264264
const ollamaProvider = createOllama({
265265
baseURL: models[provider].endpoint + "/api",
266266
});
267267
const ollama = ollamaProvider(model);
268268

269-
console.log("****** MODEL\n", model);
269+
console.debug("****** MODEL\n", model);
270270

271271
const { text: ollamaResponse } = await aiGenerateText({
272272
model: ollama,
@@ -279,7 +279,7 @@ export async function generateText({
279279

280280
response = ollamaResponse;
281281
}
282-
console.log("Received response from Ollama model.");
282+
console.debug("Received response from Ollama model.");
283283
break;
284284

285285
default: {
@@ -341,7 +341,7 @@ export async function generateShouldRespond({
341341
let retryDelay = 1000;
342342
while (true) {
343343
try {
344-
elizaLogger.log(
344+
elizaLogger.debug(
345345
"Attempting to generate text with context:",
346346
context
347347
);
@@ -351,13 +351,13 @@ export async function generateShouldRespond({
351351
modelClass,
352352
});
353353

354-
elizaLogger.log("Received response from generateText:", response);
354+
elizaLogger.debug("Received response from generateText:", response);
355355
const parsedResponse = parseShouldRespondFromText(response.trim());
356356
if (parsedResponse) {
357-
elizaLogger.log("Parsed response:", parsedResponse);
357+
elizaLogger.debug("Parsed response:", parsedResponse);
358358
return parsedResponse;
359359
} else {
360-
elizaLogger.log("generateShouldRespond no response");
360+
elizaLogger.debug("generateShouldRespond no response");
361361
}
362362
} catch (error) {
363363
elizaLogger.error("Error in generateShouldRespond:", error);
@@ -640,7 +640,7 @@ export async function generateMessageResponse({
640640
// try parsing the response as JSON, if null then try again
641641
const parsedContent = parseJSONObjectFromText(response) as Content;
642642
if (!parsedContent) {
643-
elizaLogger.log("parsedContent is null, retrying");
643+
elizaLogger.debug("parsedContent is null, retrying");
644644
continue;
645645
}
646646

@@ -650,7 +650,7 @@ export async function generateMessageResponse({
650650
// wait for 2 seconds
651651
retryLength *= 2;
652652
await new Promise((resolve) => setTimeout(resolve, retryLength));
653-
elizaLogger.log("Retrying...");
653+
elizaLogger.debug("Retrying...");
654654
}
655655
}
656656
}

packages/core/src/logger.ts

+6
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
11
class ElizaLogger {
2+
constructor() {
3+
this.verbose = process.env.verbose === "true" || false;
4+
}
5+
6+
verbose = false;
27
closeByNewLine = true;
38
useIcons = true;
49
logsTitle = "LOGS";
@@ -222,6 +227,7 @@ class ElizaLogger {
222227
}
223228
}
224229
debug(...strings) {
230+
if (!this.verbose) return;
225231
const fg = "magenta";
226232
const bg = "";
227233
const icon = "\u1367";

0 commit comments

Comments
 (0)