@@ -69,57 +69,63 @@ export async function generateText({
69
69
switch ( provider ) {
70
70
case ModelProvider . OPENAI :
71
71
case ModelProvider . LLAMACLOUD :
72
- console . log ( "Initializing OpenAI model." ) ;
73
- const openai = createOpenAI ( { apiKey } ) ;
74
-
75
- const { text : openaiResponse } = await aiGenerateText ( {
76
- model : openai . languageModel ( model ) ,
77
- prompt : context ,
78
- temperature : temperature ,
79
- maxTokens : max_response_length ,
80
- frequencyPenalty : frequency_penalty ,
81
- presencePenalty : presence_penalty ,
82
- } ) ;
83
-
84
- response = openaiResponse ;
85
- console . log ( "Received response from OpenAI model." ) ;
86
- break ;
72
+ {
73
+ console . log ( "Initializing OpenAI model." ) ;
74
+ const openai = createOpenAI ( { apiKey } ) ;
75
+
76
+ const { text : openaiResponse } = await aiGenerateText ( {
77
+ model : openai . languageModel ( model ) ,
78
+ prompt : context ,
79
+ temperature : temperature ,
80
+ maxTokens : max_response_length ,
81
+ frequencyPenalty : frequency_penalty ,
82
+ presencePenalty : presence_penalty ,
83
+ } ) ;
84
+
85
+ response = openaiResponse ;
86
+ console . log ( "Received response from OpenAI model." ) ;
87
+ break ;
88
+ }
87
89
88
90
case ModelProvider . ANTHROPIC :
89
- console . log ( "Initializing Anthropic model." ) ;
90
- const anthropicVertex = createAnthropicVertex ( ) ;
91
-
92
- const { text : anthropicResponse } = await aiGenerateText ( {
93
- model : anthropicVertex ( model ) ,
94
- prompt : context ,
95
- temperature : temperature ,
96
- maxTokens : max_response_length ,
97
- frequencyPenalty : frequency_penalty ,
98
- presencePenalty : presence_penalty ,
99
- } ) ;
100
-
101
- response = anthropicResponse ;
102
- console . log ( "Received response from Anthropic model." ) ;
103
- break ;
91
+ {
92
+ console . log ( "Initializing Anthropic model." ) ;
93
+ const anthropicVertex = createAnthropicVertex ( ) ;
94
+
95
+ const { text : anthropicResponse } = await aiGenerateText ( {
96
+ model : anthropicVertex ( model ) ,
97
+ prompt : context ,
98
+ temperature : temperature ,
99
+ maxTokens : max_response_length ,
100
+ frequencyPenalty : frequency_penalty ,
101
+ presencePenalty : presence_penalty ,
102
+ } ) ;
103
+
104
+ response = anthropicResponse ;
105
+ console . log ( "Received response from Anthropic model." ) ;
106
+ break ;
107
+ }
104
108
105
109
case ModelProvider . GROK :
106
- console . log ( "Initializing Grok model." ) ;
107
- const grok = createGroq ( { apiKey } ) ;
108
-
109
- const { text : grokResponse } = await aiGenerateText ( {
110
- model : grok . languageModel ( model , {
111
- parallelToolCalls : false ,
112
- } ) ,
113
- prompt : context ,
114
- temperature : temperature ,
115
- maxTokens : max_response_length ,
116
- frequencyPenalty : frequency_penalty ,
117
- presencePenalty : presence_penalty ,
118
- } ) ;
119
-
120
- response = grokResponse ;
121
- console . log ( "Received response from Grok model." ) ;
122
- break ;
110
+ {
111
+ console . log ( "Initializing Grok model." ) ;
112
+ const grok = createGroq ( { apiKey } ) ;
113
+
114
+ const { text : grokResponse } = await aiGenerateText ( {
115
+ model : grok . languageModel ( model , {
116
+ parallelToolCalls : false ,
117
+ } ) ,
118
+ prompt : context ,
119
+ temperature : temperature ,
120
+ maxTokens : max_response_length ,
121
+ frequencyPenalty : frequency_penalty ,
122
+ presencePenalty : presence_penalty ,
123
+ } ) ;
124
+
125
+ response = grokResponse ;
126
+ console . log ( "Received response from Grok model." ) ;
127
+ break ;
128
+ }
123
129
124
130
case ModelProvider . LLAMALOCAL :
125
131
console . log ( "Using local Llama model for text completion." ) ;
@@ -135,9 +141,11 @@ export async function generateText({
135
141
break ;
136
142
137
143
default :
138
- const errorMessage = `Unsupported provider: ${ provider } ` ;
139
- console . error ( errorMessage ) ;
140
- throw new Error ( errorMessage ) ;
144
+ {
145
+ const errorMessage = `Unsupported provider: ${ provider } ` ;
146
+ console . error ( errorMessage ) ;
147
+ throw new Error ( errorMessage ) ;
148
+ }
141
149
}
142
150
143
151
return response ;
0 commit comments