Skip to content

Commit 06edacd

Browse files
committed
fix eslint, switch image handling to false
1 parent 2629806 commit 06edacd

File tree

3 files changed

+60
-52
lines changed

3 files changed

+60
-52
lines changed

core/eslint.config.mjs

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ export default [
3737

3838
// Disable no-undef as TypeScript handles this better
3939
"no-undef": "off",
40-
40+
"@typescript-eslint/no-unsafe-function-type": "off",
4141
// Customize TypeScript rules
4242
"@typescript-eslint/no-explicit-any": "off",
4343
"@typescript-eslint/no-unused-vars": [

core/src/clients/telegram/src/messageManager.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ export class MessageManager {
108108
("document" in message &&
109109
message.document?.mime_type?.startsWith("image/"))
110110
) {
111-
return true;
111+
return false;
112112
}
113113

114114
// Use AI to decide for text or captions

core/src/core/generation.ts

+58-50
Original file line numberDiff line numberDiff line change
@@ -69,57 +69,63 @@ export async function generateText({
6969
switch (provider) {
7070
case ModelProvider.OPENAI:
7171
case ModelProvider.LLAMACLOUD:
72-
console.log("Initializing OpenAI model.");
73-
const openai = createOpenAI({ apiKey });
74-
75-
const { text: openaiResponse } = await aiGenerateText({
76-
model: openai.languageModel(model),
77-
prompt: context,
78-
temperature: temperature,
79-
maxTokens: max_response_length,
80-
frequencyPenalty: frequency_penalty,
81-
presencePenalty: presence_penalty,
82-
});
83-
84-
response = openaiResponse;
85-
console.log("Received response from OpenAI model.");
86-
break;
72+
{
73+
console.log("Initializing OpenAI model.");
74+
const openai = createOpenAI({ apiKey });
75+
76+
const { text: openaiResponse } = await aiGenerateText({
77+
model: openai.languageModel(model),
78+
prompt: context,
79+
temperature: temperature,
80+
maxTokens: max_response_length,
81+
frequencyPenalty: frequency_penalty,
82+
presencePenalty: presence_penalty,
83+
});
84+
85+
response = openaiResponse;
86+
console.log("Received response from OpenAI model.");
87+
break;
88+
}
8789

8890
case ModelProvider.ANTHROPIC:
89-
console.log("Initializing Anthropic model.");
90-
const anthropicVertex = createAnthropicVertex();
91-
92-
const { text: anthropicResponse } = await aiGenerateText({
93-
model: anthropicVertex(model),
94-
prompt: context,
95-
temperature: temperature,
96-
maxTokens: max_response_length,
97-
frequencyPenalty: frequency_penalty,
98-
presencePenalty: presence_penalty,
99-
});
100-
101-
response = anthropicResponse;
102-
console.log("Received response from Anthropic model.");
103-
break;
91+
{
92+
console.log("Initializing Anthropic model.");
93+
const anthropicVertex = createAnthropicVertex();
94+
95+
const { text: anthropicResponse } = await aiGenerateText({
96+
model: anthropicVertex(model),
97+
prompt: context,
98+
temperature: temperature,
99+
maxTokens: max_response_length,
100+
frequencyPenalty: frequency_penalty,
101+
presencePenalty: presence_penalty,
102+
});
103+
104+
response = anthropicResponse;
105+
console.log("Received response from Anthropic model.");
106+
break;
107+
}
104108

105109
case ModelProvider.GROK:
106-
console.log("Initializing Grok model.");
107-
const grok = createGroq({ apiKey });
108-
109-
const { text: grokResponse } = await aiGenerateText({
110-
model: grok.languageModel(model, {
111-
parallelToolCalls: false,
112-
}),
113-
prompt: context,
114-
temperature: temperature,
115-
maxTokens: max_response_length,
116-
frequencyPenalty: frequency_penalty,
117-
presencePenalty: presence_penalty,
118-
});
119-
120-
response = grokResponse;
121-
console.log("Received response from Grok model.");
122-
break;
110+
{
111+
console.log("Initializing Grok model.");
112+
const grok = createGroq({ apiKey });
113+
114+
const { text: grokResponse } = await aiGenerateText({
115+
model: grok.languageModel(model, {
116+
parallelToolCalls: false,
117+
}),
118+
prompt: context,
119+
temperature: temperature,
120+
maxTokens: max_response_length,
121+
frequencyPenalty: frequency_penalty,
122+
presencePenalty: presence_penalty,
123+
});
124+
125+
response = grokResponse;
126+
console.log("Received response from Grok model.");
127+
break;
128+
}
123129

124130
case ModelProvider.LLAMALOCAL:
125131
console.log("Using local Llama model for text completion.");
@@ -135,9 +141,11 @@ export async function generateText({
135141
break;
136142

137143
default:
138-
const errorMessage = `Unsupported provider: ${provider}`;
139-
console.error(errorMessage);
140-
throw new Error(errorMessage);
144+
{
145+
const errorMessage = `Unsupported provider: ${provider}`;
146+
console.error(errorMessage);
147+
throw new Error(errorMessage);
148+
}
141149
}
142150

143151
return response;

0 commit comments

Comments
 (0)