@@ -78,47 +78,68 @@ export async function generateText({
78
78
79
79
// allow character.json settings => secrets to override models
80
80
// FIXME: add MODEL_MEDIUM support
81
- switch ( provider ) {
81
+ switch ( provider ) {
82
82
// if runtime.getSetting("LLAMACLOUD_MODEL_LARGE") is true and modelProvider is LLAMACLOUD, then use the large model
83
- case ModelProviderName . LLAMACLOUD : {
84
- switch ( modelClass ) {
85
- case ModelClass . LARGE : {
86
- model = runtime . getSetting ( "LLAMACLOUD_MODEL_LARGE" ) || model ;
87
- }
88
- break ;
89
- case ModelClass . SMALL : {
90
- model = runtime . getSetting ( "LLAMACLOUD_MODEL_SMALL" ) || model ;
83
+ case ModelProviderName . LLAMACLOUD :
84
+ {
85
+ switch ( modelClass ) {
86
+ case ModelClass . LARGE :
87
+ {
88
+ model =
89
+ runtime . getSetting ( "LLAMACLOUD_MODEL_LARGE" ) ||
90
+ model ;
91
+ }
92
+ break ;
93
+ case ModelClass . SMALL :
94
+ {
95
+ model =
96
+ runtime . getSetting ( "LLAMACLOUD_MODEL_SMALL" ) ||
97
+ model ;
98
+ }
99
+ break ;
91
100
}
92
- break ;
93
101
}
94
- }
95
- break ;
96
- case ModelProviderName . TOGETHER : {
97
- switch ( modelClass ) {
98
- case ModelClass . LARGE : {
99
- model = runtime . getSetting ( "TOGETHER_MODEL_LARGE" ) || model ;
100
- }
101
- break ;
102
- case ModelClass . SMALL : {
103
- model = runtime . getSetting ( "TOGETHER_MODEL_SMALL" ) || model ;
102
+ break ;
103
+ case ModelProviderName . TOGETHER :
104
+ {
105
+ switch ( modelClass ) {
106
+ case ModelClass . LARGE :
107
+ {
108
+ model =
109
+ runtime . getSetting ( "TOGETHER_MODEL_LARGE" ) ||
110
+ model ;
111
+ }
112
+ break ;
113
+ case ModelClass . SMALL :
114
+ {
115
+ model =
116
+ runtime . getSetting ( "TOGETHER_MODEL_SMALL" ) ||
117
+ model ;
118
+ }
119
+ break ;
104
120
}
105
- break ;
106
121
}
107
- }
108
- break ;
109
- case ModelProviderName . OPENROUTER : {
110
- switch ( modelClass ) {
111
- case ModelClass . LARGE : {
112
- model = runtime . getSetting ( "LARGE_OPENROUTER_MODEL" ) || model ;
113
- }
114
- break ;
115
- case ModelClass . SMALL : {
116
- model = runtime . getSetting ( "SMALL_OPENROUTER_MODEL" ) || model ;
122
+ break ;
123
+ case ModelProviderName . OPENROUTER :
124
+ {
125
+ switch ( modelClass ) {
126
+ case ModelClass . LARGE :
127
+ {
128
+ model =
129
+ runtime . getSetting ( "LARGE_OPENROUTER_MODEL" ) ||
130
+ model ;
131
+ }
132
+ break ;
133
+ case ModelClass . SMALL :
134
+ {
135
+ model =
136
+ runtime . getSetting ( "SMALL_OPENROUTER_MODEL" ) ||
137
+ model ;
138
+ }
139
+ break ;
117
140
}
118
- break ;
119
141
}
120
- }
121
- break ;
142
+ break ;
122
143
}
123
144
124
145
elizaLogger . info ( "Selected model:" , model ) ;
@@ -155,7 +176,11 @@ export async function generateText({
155
176
case ModelProviderName . HYPERBOLIC :
156
177
case ModelProviderName . TOGETHER : {
157
178
elizaLogger . debug ( "Initializing OpenAI model." ) ;
158
- const openai = createOpenAI ( { apiKey, baseURL : endpoint } ) ;
179
+ const openai = createOpenAI ( {
180
+ apiKey,
181
+ baseURL : endpoint ,
182
+ fetch : runtime . fetch ,
183
+ } ) ;
159
184
160
185
const { text : openaiResponse } = await aiGenerateText ( {
161
186
model : openai . languageModel ( model ) ,
@@ -176,7 +201,9 @@ export async function generateText({
176
201
}
177
202
178
203
case ModelProviderName . GOOGLE : {
179
- const google = createGoogleGenerativeAI ( ) ;
204
+ const google = createGoogleGenerativeAI ( {
205
+ fetch : runtime . fetch ,
206
+ } ) ;
180
207
181
208
const { text : googleResponse } = await aiGenerateText ( {
182
209
model : google ( model ) ,
@@ -199,7 +226,10 @@ export async function generateText({
199
226
case ModelProviderName . ANTHROPIC : {
200
227
elizaLogger . debug ( "Initializing Anthropic model." ) ;
201
228
202
- const anthropic = createAnthropic ( { apiKey } ) ;
229
+ const anthropic = createAnthropic ( {
230
+ apiKey,
231
+ fetch : runtime . fetch ,
232
+ } ) ;
203
233
204
234
const { text : anthropicResponse } = await aiGenerateText ( {
205
235
model : anthropic . languageModel ( model ) ,
@@ -222,7 +252,10 @@ export async function generateText({
222
252
case ModelProviderName . CLAUDE_VERTEX : {
223
253
elizaLogger . debug ( "Initializing Claude Vertex model." ) ;
224
254
225
- const anthropic = createAnthropic ( { apiKey } ) ;
255
+ const anthropic = createAnthropic ( {
256
+ apiKey,
257
+ fetch : runtime . fetch ,
258
+ } ) ;
226
259
227
260
const { text : anthropicResponse } = await aiGenerateText ( {
228
261
model : anthropic . languageModel ( model ) ,
@@ -246,7 +279,11 @@ export async function generateText({
246
279
247
280
case ModelProviderName . GROK : {
248
281
elizaLogger . debug ( "Initializing Grok model." ) ;
249
- const grok = createOpenAI ( { apiKey, baseURL : endpoint } ) ;
282
+ const grok = createOpenAI ( {
283
+ apiKey,
284
+ baseURL : endpoint ,
285
+ fetch : runtime . fetch ,
286
+ } ) ;
250
287
251
288
const { text : grokResponse } = await aiGenerateText ( {
252
289
model : grok . languageModel ( model , {
@@ -269,7 +306,7 @@ export async function generateText({
269
306
}
270
307
271
308
case ModelProviderName . GROQ : {
272
- const groq = createGroq ( { apiKey } ) ;
309
+ const groq = createGroq ( { apiKey, fetch : runtime . fetch } ) ;
273
310
274
311
const { text : groqResponse } = await aiGenerateText ( {
275
312
model : groq . languageModel ( model ) ,
@@ -316,7 +353,11 @@ export async function generateText({
316
353
case ModelProviderName . REDPILL : {
317
354
elizaLogger . debug ( "Initializing RedPill model." ) ;
318
355
const serverUrl = models [ provider ] . endpoint ;
319
- const openai = createOpenAI ( { apiKey, baseURL : serverUrl } ) ;
356
+ const openai = createOpenAI ( {
357
+ apiKey,
358
+ baseURL : serverUrl ,
359
+ fetch : runtime . fetch ,
360
+ } ) ;
320
361
321
362
const { text : redpillResponse } = await aiGenerateText ( {
322
363
model : openai . languageModel ( model ) ,
@@ -339,7 +380,11 @@ export async function generateText({
339
380
case ModelProviderName . OPENROUTER : {
340
381
elizaLogger . debug ( "Initializing OpenRouter model." ) ;
341
382
const serverUrl = models [ provider ] . endpoint ;
342
- const openrouter = createOpenAI ( { apiKey, baseURL : serverUrl } ) ;
383
+ const openrouter = createOpenAI ( {
384
+ apiKey,
385
+ baseURL : serverUrl ,
386
+ fetch : runtime . fetch ,
387
+ } ) ;
343
388
344
389
const { text : openrouterResponse } = await aiGenerateText ( {
345
390
model : openrouter . languageModel ( model ) ,
@@ -365,6 +410,7 @@ export async function generateText({
365
410
366
411
const ollamaProvider = createOllama ( {
367
412
baseURL : models [ provider ] . endpoint + "/api" ,
413
+ fetch : runtime . fetch ,
368
414
} ) ;
369
415
const ollama = ollamaProvider ( model ) ;
370
416
@@ -389,6 +435,7 @@ export async function generateText({
389
435
const heurist = createOpenAI ( {
390
436
apiKey : apiKey ,
391
437
baseURL : endpoint ,
438
+ fetch : runtime . fetch ,
392
439
} ) ;
393
440
394
441
const { text : heuristResponse } = await aiGenerateText ( {
@@ -434,7 +481,11 @@ export async function generateText({
434
481
435
482
elizaLogger . debug ( "Using GAIANET model with baseURL:" , baseURL ) ;
436
483
437
- const openai = createOpenAI ( { apiKey, baseURL : endpoint } ) ;
484
+ const openai = createOpenAI ( {
485
+ apiKey,
486
+ baseURL : endpoint ,
487
+ fetch : runtime . fetch ,
488
+ } ) ;
438
489
439
490
const { text : openaiResponse } = await aiGenerateText ( {
440
491
model : openai . languageModel ( model ) ,
@@ -459,6 +510,7 @@ export async function generateText({
459
510
const galadriel = createOpenAI ( {
460
511
apiKey : apiKey ,
461
512
baseURL : endpoint ,
513
+ fetch : runtime . fetch ,
462
514
} ) ;
463
515
464
516
const { text : galadrielResponse } = await aiGenerateText ( {
0 commit comments