Skip to content

Commit 46139a1

Browse files
fix anthropic
1 parent 250b070 commit 46139a1

File tree

2 files changed

+69
-71
lines changed

2 files changed

+69
-71
lines changed

core/src/core/generation.ts

+63-65
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,8 @@ import models from "./models.ts";
1313

1414
import { generateText as aiGenerateText } from "ai";
1515
import { createAnthropicVertex } from "anthropic-vertex-ai";
16-
16+
import Anthropic from "@anthropic-ai/sdk";
17+
import { createAnthropic } from "@ai-sdk/anthropic";
1718
/**
1819
* Send a message to the model for a text generateText - receive a string back and parse how you'd like
1920
* @param opts - The options for the generateText request.
@@ -68,64 +69,62 @@ export async function generateText({
6869

6970
switch (provider) {
7071
case ModelProvider.OPENAI:
71-
case ModelProvider.LLAMACLOUD:
72-
{
73-
console.log("Initializing OpenAI model.");
74-
const openai = createOpenAI({ apiKey });
75-
76-
const { text: openaiResponse } = await aiGenerateText({
77-
model: openai.languageModel(model),
78-
prompt: context,
79-
temperature: temperature,
80-
maxTokens: max_response_length,
81-
frequencyPenalty: frequency_penalty,
82-
presencePenalty: presence_penalty,
83-
});
84-
85-
response = openaiResponse;
86-
console.log("Received response from OpenAI model.");
87-
break;
88-
}
89-
90-
case ModelProvider.ANTHROPIC:
91-
{
92-
console.log("Initializing Anthropic model.");
93-
const anthropicVertex = createAnthropicVertex();
94-
95-
const { text: anthropicResponse } = await aiGenerateText({
96-
model: anthropicVertex(model),
97-
prompt: context,
98-
temperature: temperature,
99-
maxTokens: max_response_length,
100-
frequencyPenalty: frequency_penalty,
101-
presencePenalty: presence_penalty,
102-
});
103-
104-
response = anthropicResponse;
105-
console.log("Received response from Anthropic model.");
106-
break;
107-
}
108-
109-
case ModelProvider.GROK:
110-
{
111-
console.log("Initializing Grok model.");
112-
const grok = createGroq({ apiKey });
113-
114-
const { text: grokResponse } = await aiGenerateText({
115-
model: grok.languageModel(model, {
116-
parallelToolCalls: false,
117-
}),
118-
prompt: context,
119-
temperature: temperature,
120-
maxTokens: max_response_length,
121-
frequencyPenalty: frequency_penalty,
122-
presencePenalty: presence_penalty,
123-
});
124-
125-
response = grokResponse;
126-
console.log("Received response from Grok model.");
127-
break;
128-
}
72+
case ModelProvider.LLAMACLOUD: {
73+
console.log("Initializing OpenAI model.");
74+
const openai = createOpenAI({ apiKey });
75+
76+
const { text: openaiResponse } = await aiGenerateText({
77+
model: openai.languageModel(model),
78+
prompt: context,
79+
temperature: temperature,
80+
maxTokens: max_response_length,
81+
frequencyPenalty: frequency_penalty,
82+
presencePenalty: presence_penalty,
83+
});
84+
85+
response = openaiResponse;
86+
console.log("Received response from OpenAI model.");
87+
break;
88+
}
89+
90+
case ModelProvider.ANTHROPIC: {
91+
console.log("Initializing Anthropic model.");
92+
93+
const anthropic = createAnthropic({ apiKey });
94+
95+
const { text: anthropicResponse } = await aiGenerateText({
96+
model: anthropic.languageModel(model),
97+
prompt: context,
98+
temperature: temperature,
99+
maxTokens: max_response_length,
100+
frequencyPenalty: frequency_penalty,
101+
presencePenalty: presence_penalty,
102+
});
103+
104+
response = anthropicResponse;
105+
console.log("Received response from Anthropic model.");
106+
break;
107+
}
108+
109+
case ModelProvider.GROK: {
110+
console.log("Initializing Grok model.");
111+
const grok = createGroq({ apiKey });
112+
113+
const { text: grokResponse } = await aiGenerateText({
114+
model: grok.languageModel(model, {
115+
parallelToolCalls: false,
116+
}),
117+
prompt: context,
118+
temperature: temperature,
119+
maxTokens: max_response_length,
120+
frequencyPenalty: frequency_penalty,
121+
presencePenalty: presence_penalty,
122+
});
123+
124+
response = grokResponse;
125+
console.log("Received response from Grok model.");
126+
break;
127+
}
129128

130129
case ModelProvider.LLAMALOCAL:
131130
console.log("Using local Llama model for text completion.");
@@ -140,12 +139,11 @@ export async function generateText({
140139
console.log("Received response from local Llama model.");
141140
break;
142141

143-
default:
144-
{
145-
const errorMessage = `Unsupported provider: ${provider}`;
146-
console.error(errorMessage);
147-
throw new Error(errorMessage);
148-
}
142+
default: {
143+
const errorMessage = `Unsupported provider: ${provider}`;
144+
console.error(errorMessage);
145+
throw new Error(errorMessage);
146+
}
149147
}
150148

151149
return response;

core/src/core/models.ts

+6-6
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@ const models: Models = {
4040
},
4141
endpoint: "https://api.anthropic.com/v1",
4242
model: {
43-
[ModelClass.SMALL]: "claude-3-haiku",
44-
[ModelClass.MEDIUM]: "claude-3-5-sonnet",
45-
[ModelClass.LARGE]: "claude-3-opus",
43+
[ModelClass.SMALL]: "claude-3-5-sonnet-20241022",
44+
[ModelClass.MEDIUM]: "claude-3-5-sonnet-20241022",
45+
[ModelClass.LARGE]: "claude-3-opus-20240229",
4646
},
4747
},
4848
[ModelProvider.CLAUDE_VERTEX]: {
@@ -56,9 +56,9 @@ const models: Models = {
5656
},
5757
endpoint: "https://api.anthropic.com/v1", // TODO: check
5858
model: {
59-
[ModelClass.SMALL]: "claude-3-haiku",
60-
[ModelClass.MEDIUM]: "claude-3-5-sonnet",
61-
[ModelClass.LARGE]: "claude-3-opus",
59+
[ModelClass.SMALL]: "claude-3-5-sonnet-20241022",
60+
[ModelClass.MEDIUM]: "claude-3-5-sonnet-20241022",
61+
[ModelClass.LARGE]: "claude-3-opus-20240229",
6262
},
6363
},
6464
[ModelProvider.GROK]: {

0 commit comments

Comments
 (0)