@@ -80,47 +80,68 @@ export async function generateText({
80
80
81
81
// allow character.json settings => secrets to override models
82
82
// FIXME: add MODEL_MEDIUM support
83
- switch ( provider ) {
83
+ switch ( provider ) {
84
84
// if runtime.getSetting("LLAMACLOUD_MODEL_LARGE") is true and modelProvider is LLAMACLOUD, then use the large model
85
- case ModelProviderName . LLAMACLOUD : {
86
- switch ( modelClass ) {
87
- case ModelClass . LARGE : {
88
- model = runtime . getSetting ( "LLAMACLOUD_MODEL_LARGE" ) || model ;
89
- }
90
- break ;
91
- case ModelClass . SMALL : {
92
- model = runtime . getSetting ( "LLAMACLOUD_MODEL_SMALL" ) || model ;
85
+ case ModelProviderName . LLAMACLOUD :
86
+ {
87
+ switch ( modelClass ) {
88
+ case ModelClass . LARGE :
89
+ {
90
+ model =
91
+ runtime . getSetting ( "LLAMACLOUD_MODEL_LARGE" ) ||
92
+ model ;
93
+ }
94
+ break ;
95
+ case ModelClass . SMALL :
96
+ {
97
+ model =
98
+ runtime . getSetting ( "LLAMACLOUD_MODEL_SMALL" ) ||
99
+ model ;
100
+ }
101
+ break ;
93
102
}
94
- break ;
95
103
}
96
- }
97
- break ;
98
- case ModelProviderName . TOGETHER : {
99
- switch ( modelClass ) {
100
- case ModelClass . LARGE : {
101
- model = runtime . getSetting ( "TOGETHER_MODEL_LARGE" ) || model ;
102
- }
103
- break ;
104
- case ModelClass . SMALL : {
105
- model = runtime . getSetting ( "TOGETHER_MODEL_SMALL" ) || model ;
104
+ break ;
105
+ case ModelProviderName . TOGETHER :
106
+ {
107
+ switch ( modelClass ) {
108
+ case ModelClass . LARGE :
109
+ {
110
+ model =
111
+ runtime . getSetting ( "TOGETHER_MODEL_LARGE" ) ||
112
+ model ;
113
+ }
114
+ break ;
115
+ case ModelClass . SMALL :
116
+ {
117
+ model =
118
+ runtime . getSetting ( "TOGETHER_MODEL_SMALL" ) ||
119
+ model ;
120
+ }
121
+ break ;
106
122
}
107
- break ;
108
123
}
109
- }
110
- break ;
111
- case ModelProviderName . OPENROUTER : {
112
- switch ( modelClass ) {
113
- case ModelClass . LARGE : {
114
- model = runtime . getSetting ( "LARGE_OPENROUTER_MODEL" ) || model ;
115
- }
116
- break ;
117
- case ModelClass . SMALL : {
118
- model = runtime . getSetting ( "SMALL_OPENROUTER_MODEL" ) || model ;
124
+ break ;
125
+ case ModelProviderName . OPENROUTER :
126
+ {
127
+ switch ( modelClass ) {
128
+ case ModelClass . LARGE :
129
+ {
130
+ model =
131
+ runtime . getSetting ( "LARGE_OPENROUTER_MODEL" ) ||
132
+ model ;
133
+ }
134
+ break ;
135
+ case ModelClass . SMALL :
136
+ {
137
+ model =
138
+ runtime . getSetting ( "SMALL_OPENROUTER_MODEL" ) ||
139
+ model ;
140
+ }
141
+ break ;
119
142
}
120
- break ;
121
143
}
122
- }
123
- break ;
144
+ break ;
124
145
}
125
146
126
147
elizaLogger . info ( "Selected model:" , model ) ;
@@ -157,7 +178,11 @@ export async function generateText({
157
178
case ModelProviderName . HYPERBOLIC :
158
179
case ModelProviderName . TOGETHER : {
159
180
elizaLogger . debug ( "Initializing OpenAI model." ) ;
160
- const openai = createOpenAI ( { apiKey, baseURL : endpoint } ) ;
181
+ const openai = createOpenAI ( {
182
+ apiKey,
183
+ baseURL : endpoint ,
184
+ fetch : runtime . fetch ,
185
+ } ) ;
161
186
162
187
const { text : openaiResponse } = await aiGenerateText ( {
163
188
model : openai . languageModel ( model ) ,
@@ -178,7 +203,9 @@ export async function generateText({
178
203
}
179
204
180
205
case ModelProviderName . GOOGLE : {
181
- const google = createGoogleGenerativeAI ( ) ;
206
+ const google = createGoogleGenerativeAI ( {
207
+ fetch : runtime . fetch ,
208
+ } ) ;
182
209
183
210
const { text : googleResponse } = await aiGenerateText ( {
184
211
model : google ( model ) ,
@@ -201,7 +228,10 @@ export async function generateText({
201
228
case ModelProviderName . ANTHROPIC : {
202
229
elizaLogger . debug ( "Initializing Anthropic model." ) ;
203
230
204
- const anthropic = createAnthropic ( { apiKey } ) ;
231
+ const anthropic = createAnthropic ( {
232
+ apiKey,
233
+ fetch : runtime . fetch ,
234
+ } ) ;
205
235
206
236
const { text : anthropicResponse } = await aiGenerateText ( {
207
237
model : anthropic . languageModel ( model ) ,
@@ -224,7 +254,10 @@ export async function generateText({
224
254
case ModelProviderName . CLAUDE_VERTEX : {
225
255
elizaLogger . debug ( "Initializing Claude Vertex model." ) ;
226
256
227
- const anthropic = createAnthropic ( { apiKey } ) ;
257
+ const anthropic = createAnthropic ( {
258
+ apiKey,
259
+ fetch : runtime . fetch ,
260
+ } ) ;
228
261
229
262
const { text : anthropicResponse } = await aiGenerateText ( {
230
263
model : anthropic . languageModel ( model ) ,
@@ -248,7 +281,11 @@ export async function generateText({
248
281
249
282
case ModelProviderName . GROK : {
250
283
elizaLogger . debug ( "Initializing Grok model." ) ;
251
- const grok = createOpenAI ( { apiKey, baseURL : endpoint } ) ;
284
+ const grok = createOpenAI ( {
285
+ apiKey,
286
+ baseURL : endpoint ,
287
+ fetch : runtime . fetch ,
288
+ } ) ;
252
289
253
290
const { text : grokResponse } = await aiGenerateText ( {
254
291
model : grok . languageModel ( model , {
@@ -271,7 +308,7 @@ export async function generateText({
271
308
}
272
309
273
310
case ModelProviderName . GROQ : {
274
- const groq = createGroq ( { apiKey } ) ;
311
+ const groq = createGroq ( { apiKey, fetch : runtime . fetch } ) ;
275
312
276
313
const { text : groqResponse } = await aiGenerateText ( {
277
314
model : groq . languageModel ( model ) ,
@@ -318,7 +355,11 @@ export async function generateText({
318
355
case ModelProviderName . REDPILL : {
319
356
elizaLogger . debug ( "Initializing RedPill model." ) ;
320
357
const serverUrl = models [ provider ] . endpoint ;
321
- const openai = createOpenAI ( { apiKey, baseURL : serverUrl } ) ;
358
+ const openai = createOpenAI ( {
359
+ apiKey,
360
+ baseURL : serverUrl ,
361
+ fetch : runtime . fetch ,
362
+ } ) ;
322
363
323
364
const { text : redpillResponse } = await aiGenerateText ( {
324
365
model : openai . languageModel ( model ) ,
@@ -341,7 +382,11 @@ export async function generateText({
341
382
case ModelProviderName . OPENROUTER : {
342
383
elizaLogger . debug ( "Initializing OpenRouter model." ) ;
343
384
const serverUrl = models [ provider ] . endpoint ;
344
- const openrouter = createOpenAI ( { apiKey, baseURL : serverUrl } ) ;
385
+ const openrouter = createOpenAI ( {
386
+ apiKey,
387
+ baseURL : serverUrl ,
388
+ fetch : runtime . fetch ,
389
+ } ) ;
345
390
346
391
const { text : openrouterResponse } = await aiGenerateText ( {
347
392
model : openrouter . languageModel ( model ) ,
@@ -367,6 +412,7 @@ export async function generateText({
367
412
368
413
const ollamaProvider = createOllama ( {
369
414
baseURL : models [ provider ] . endpoint + "/api" ,
415
+ fetch : runtime . fetch ,
370
416
} ) ;
371
417
const ollama = ollamaProvider ( model ) ;
372
418
@@ -391,6 +437,7 @@ export async function generateText({
391
437
const heurist = createOpenAI ( {
392
438
apiKey : apiKey ,
393
439
baseURL : endpoint ,
440
+ fetch : runtime . fetch ,
394
441
} ) ;
395
442
396
443
const { text : heuristResponse } = await aiGenerateText ( {
@@ -436,7 +483,11 @@ export async function generateText({
436
483
437
484
elizaLogger . debug ( "Using GAIANET model with baseURL:" , baseURL ) ;
438
485
439
- const openai = createOpenAI ( { apiKey, baseURL : endpoint } ) ;
486
+ const openai = createOpenAI ( {
487
+ apiKey,
488
+ baseURL : endpoint ,
489
+ fetch : runtime . fetch ,
490
+ } ) ;
440
491
441
492
const { text : openaiResponse } = await aiGenerateText ( {
442
493
model : openai . languageModel ( model ) ,
@@ -461,6 +512,7 @@ export async function generateText({
461
512
const galadriel = createOpenAI ( {
462
513
apiKey : apiKey ,
463
514
baseURL : endpoint ,
515
+ fetch : runtime . fetch ,
464
516
} ) ;
465
517
466
518
const { text : galadrielResponse } = await aiGenerateText ( {
0 commit comments