@@ -13,7 +13,8 @@ import models from "./models.ts";
13
13
14
14
import { generateText as aiGenerateText } from "ai" ;
15
15
import { createAnthropicVertex } from "anthropic-vertex-ai" ;
16
-
16
+ import Anthropic from "@anthropic-ai/sdk" ;
17
+ import { createAnthropic } from "@ai-sdk/anthropic" ;
17
18
/**
18
19
* Send a message to the model for a text generateText - receive a string back and parse how you'd like
19
20
* @param opts - The options for the generateText request.
@@ -68,64 +69,62 @@ export async function generateText({
68
69
69
70
switch ( provider ) {
70
71
case ModelProvider . OPENAI :
71
- case ModelProvider . LLAMACLOUD :
72
- {
73
- console . log ( "Initializing OpenAI model." ) ;
74
- const openai = createOpenAI ( { apiKey } ) ;
75
-
76
- const { text : openaiResponse } = await aiGenerateText ( {
77
- model : openai . languageModel ( model ) ,
78
- prompt : context ,
79
- temperature : temperature ,
80
- maxTokens : max_response_length ,
81
- frequencyPenalty : frequency_penalty ,
82
- presencePenalty : presence_penalty ,
83
- } ) ;
84
-
85
- response = openaiResponse ;
86
- console . log ( "Received response from OpenAI model." ) ;
87
- break ;
88
- }
89
-
90
- case ModelProvider . ANTHROPIC :
91
- {
92
- console . log ( "Initializing Anthropic model." ) ;
93
- const anthropicVertex = createAnthropicVertex ( ) ;
94
-
95
- const { text : anthropicResponse } = await aiGenerateText ( {
96
- model : anthropicVertex ( model ) ,
97
- prompt : context ,
98
- temperature : temperature ,
99
- maxTokens : max_response_length ,
100
- frequencyPenalty : frequency_penalty ,
101
- presencePenalty : presence_penalty ,
102
- } ) ;
103
-
104
- response = anthropicResponse ;
105
- console . log ( "Received response from Anthropic model." ) ;
106
- break ;
107
- }
108
-
109
- case ModelProvider . GROK :
110
- {
111
- console . log ( "Initializing Grok model." ) ;
112
- const grok = createGroq ( { apiKey } ) ;
113
-
114
- const { text : grokResponse } = await aiGenerateText ( {
115
- model : grok . languageModel ( model , {
116
- parallelToolCalls : false ,
117
- } ) ,
118
- prompt : context ,
119
- temperature : temperature ,
120
- maxTokens : max_response_length ,
121
- frequencyPenalty : frequency_penalty ,
122
- presencePenalty : presence_penalty ,
123
- } ) ;
124
-
125
- response = grokResponse ;
126
- console . log ( "Received response from Grok model." ) ;
127
- break ;
128
- }
72
+ case ModelProvider . LLAMACLOUD : {
73
+ console . log ( "Initializing OpenAI model." ) ;
74
+ const openai = createOpenAI ( { apiKey } ) ;
75
+
76
+ const { text : openaiResponse } = await aiGenerateText ( {
77
+ model : openai . languageModel ( model ) ,
78
+ prompt : context ,
79
+ temperature : temperature ,
80
+ maxTokens : max_response_length ,
81
+ frequencyPenalty : frequency_penalty ,
82
+ presencePenalty : presence_penalty ,
83
+ } ) ;
84
+
85
+ response = openaiResponse ;
86
+ console . log ( "Received response from OpenAI model." ) ;
87
+ break ;
88
+ }
89
+
90
+ case ModelProvider . ANTHROPIC : {
91
+ console . log ( "Initializing Anthropic model." ) ;
92
+
93
+ const anthropic = createAnthropic ( { apiKey } ) ;
94
+
95
+ const { text : anthropicResponse } = await aiGenerateText ( {
96
+ model : anthropic . languageModel ( model ) ,
97
+ prompt : context ,
98
+ temperature : temperature ,
99
+ maxTokens : max_response_length ,
100
+ frequencyPenalty : frequency_penalty ,
101
+ presencePenalty : presence_penalty ,
102
+ } ) ;
103
+
104
+ response = anthropicResponse ;
105
+ console . log ( "Received response from Anthropic model." ) ;
106
+ break ;
107
+ }
108
+
109
+ case ModelProvider . GROK : {
110
+ console . log ( "Initializing Grok model." ) ;
111
+ const grok = createGroq ( { apiKey } ) ;
112
+
113
+ const { text : grokResponse } = await aiGenerateText ( {
114
+ model : grok . languageModel ( model , {
115
+ parallelToolCalls : false ,
116
+ } ) ,
117
+ prompt : context ,
118
+ temperature : temperature ,
119
+ maxTokens : max_response_length ,
120
+ frequencyPenalty : frequency_penalty ,
121
+ presencePenalty : presence_penalty ,
122
+ } ) ;
123
+
124
+ response = grokResponse ;
125
+ console . log ( "Received response from Grok model." ) ;
126
+ break ;
127
+ }
129
128
130
129
case ModelProvider . LLAMALOCAL :
131
130
console . log ( "Using local Llama model for text completion." ) ;
@@ -140,12 +139,11 @@ export async function generateText({
140
139
console . log ( "Received response from local Llama model." ) ;
141
140
break ;
142
141
143
- default :
144
- {
145
- const errorMessage = `Unsupported provider: ${ provider } ` ;
146
- console . error ( errorMessage ) ;
147
- throw new Error ( errorMessage ) ;
148
- }
142
+ default : {
143
+ const errorMessage = `Unsupported provider: ${ provider } ` ;
144
+ console . error ( errorMessage ) ;
145
+ throw new Error ( errorMessage ) ;
146
+ }
149
147
}
150
148
151
149
return response ;
0 commit comments