@@ -134,7 +134,7 @@ export async function generateText({
134
134
case ModelProvider . GROQ : {
135
135
console . log ( "Initializing Groq model." ) ;
136
136
const groq = createGroq ( { apiKey } ) ;
137
-
137
+
138
138
const { text : groqResponse } = await aiGenerateText ( {
139
139
model : groq . languageModel ( model ) ,
140
140
prompt : context ,
@@ -143,26 +143,51 @@ export async function generateText({
143
143
frequencyPenalty : frequency_penalty ,
144
144
presencePenalty : presence_penalty ,
145
145
} ) ;
146
-
146
+
147
147
response = groqResponse ;
148
148
console . log ( "Received response from Groq model." ) ;
149
149
break ;
150
150
}
151
151
152
- case ModelProvider . LLAMALOCAL :
152
+ case ModelProvider . LLAMALOCAL : {
153
153
prettyConsole . log (
154
- "Using local Llama model for text completion."
154
+ "Using local Llama model for text completion."
155
155
) ;
156
156
response = await runtime . llamaService . queueTextCompletion (
157
- context ,
158
- temperature ,
159
- _stop ,
160
- frequency_penalty ,
161
- presence_penalty ,
162
- max_response_length
157
+ context ,
158
+ temperature ,
159
+ _stop ,
160
+ frequency_penalty ,
161
+ presence_penalty ,
162
+ max_response_length
163
163
) ;
164
164
prettyConsole . log ( "Received response from local Llama model." ) ;
165
165
break ;
166
+ }
167
+
168
+ case ModelProvider . REDPILL : {
169
+ prettyConsole . log ( "Initializing RedPill model." ) ;
170
+ const serverUrl = models [ provider ] . endpoint ;
171
+ const openai = createOpenAI ( { apiKey, baseURL : serverUrl } ) ;
172
+
173
+ console . log ( '****** MODEL\n' , model )
174
+ console . log ( '****** CONTEXT\n' , context )
175
+
176
+ const { text : openaiResponse } = await aiGenerateText ( {
177
+ model : openai . languageModel ( model ) ,
178
+ prompt : context ,
179
+ temperature : temperature ,
180
+ maxTokens : max_response_length ,
181
+ frequencyPenalty : frequency_penalty ,
182
+ presencePenalty : presence_penalty ,
183
+ } ) ;
184
+
185
+ console . log ( "****** RESPONSE\n" , openaiResponse ) ;
186
+
187
+ response = openaiResponse ;
188
+ prettyConsole . log ( "Received response from OpenAI model." ) ;
189
+ break ;
190
+ }
166
191
167
192
default : {
168
193
const errorMessage = `Unsupported provider: ${ provider } ` ;
0 commit comments