File tree 4 files changed +39
-0
lines changed
4 files changed +39
-0
lines changed Original file line number Diff line number Diff line change 2
2
DISCORD_APPLICATION_ID =
3
3
DISCORD_API_TOKEN = # Bot token
4
4
OPENAI_API_KEY = sk-* # OpenAI API key, starting with sk-
5
+ GROQ_API_KEY = gsk_*
6
+
5
7
ELEVENLABS_XI_API_KEY = # API key from elevenlabs
6
8
7
9
# ELEVENLABS SETTINGS
Original file line number Diff line number Diff line change @@ -131,6 +131,24 @@ export async function generateText({
131
131
break ;
132
132
}
133
133
134
+ case ModelProvider . GROQ : {
135
+ console . log ( "Initializing Groq model." ) ;
136
+ const groq = createGroq ( { apiKey } ) ;
137
+
138
+ const { text : groqResponse } = await aiGenerateText ( {
139
+ model : groq . languageModel ( model ) ,
140
+ prompt : context ,
141
+ temperature : temperature ,
142
+ maxTokens : max_response_length ,
143
+ frequencyPenalty : frequency_penalty ,
144
+ presencePenalty : presence_penalty ,
145
+ } ) ;
146
+
147
+ response = groqResponse ;
148
+ console . log ( "Received response from Groq model." ) ;
149
+ break ;
150
+ }
151
+
134
152
case ModelProvider . LLAMALOCAL :
135
153
prettyConsole . log (
136
154
"Using local Llama model for text completion."
Original file line number Diff line number Diff line change @@ -4,6 +4,7 @@ type Models = {
4
4
[ ModelProvider . OPENAI ] : Model ;
5
5
[ ModelProvider . ANTHROPIC ] : Model ;
6
6
[ ModelProvider . GROK ] : Model ;
7
+ [ ModelProvider . GROQ ] : Model ;
7
8
[ ModelProvider . LLAMACLOUD ] : Model ;
8
9
[ ModelProvider . LLAMALOCAL ] : Model ;
9
10
[ ModelProvider . GOOGLE ] : Model ;
@@ -78,6 +79,23 @@ const models: Models = {
78
79
[ ModelClass . EMBEDDING ] : "grok-2-beta" , // not sure about this one
79
80
} ,
80
81
} ,
82
+ [ ModelProvider . GROQ ] : {
83
+ endpoint : "https://api.groq.com/openai/v1" ,
84
+ settings : {
85
+ stop : [ ] ,
86
+ maxInputTokens : 128000 ,
87
+ maxOutputTokens : 8000 ,
88
+ frequency_penalty : 0.0 ,
89
+ presence_penalty : 0.0 ,
90
+ temperature : 0.3 ,
91
+ } ,
92
+ model : {
93
+ [ ModelClass . SMALL ] : "llama-3.1-8b-instant" ,
94
+ [ ModelClass . MEDIUM ] : "llama-3.1-70b-versatile" ,
95
+ [ ModelClass . LARGE ] : "llama-3.2-90b-text-preview" ,
96
+ [ ModelClass . EMBEDDING ] : "llama-3.1-8b-instant" ,
97
+ } ,
98
+ } ,
81
99
[ ModelProvider . LLAMACLOUD ] : {
82
100
settings : {
83
101
stop : [ ] ,
Original file line number Diff line number Diff line change @@ -102,6 +102,7 @@ export enum ModelProvider {
102
102
OPENAI = "openai" ,
103
103
ANTHROPIC = "anthropic" ,
104
104
GROK = "grok" ,
105
+ GROQ = "groq" ,
105
106
LLAMACLOUD = "llama_cloud" ,
106
107
LLAMALOCAL = "llama_local" ,
107
108
GOOGLE = "google" ,
You can’t perform that action at this time.
0 commit comments