Skip to content

Commit aca0cdd

Browse files
authored
Merge pull request #194 from juke/Groq-API-Integration
Groq api integration
2 parents 2e440a0 + 96236b2 commit aca0cdd

File tree

4 files changed

+39
-0
lines changed

4 files changed

+39
-0
lines changed

core/.env.example

+2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
DISCORD_APPLICATION_ID=
33
DISCORD_API_TOKEN= # Bot token
44
OPENAI_API_KEY=sk-* # OpenAI API key, starting with sk-
5+
GROQ_API_KEY=gsk_*
6+
57
ELEVENLABS_XI_API_KEY= # API key from elevenlabs
68

79
# ELEVENLABS SETTINGS

core/src/core/generation.ts

+18
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,24 @@ export async function generateText({
131131
break;
132132
}
133133

134+
case ModelProvider.GROQ: {
135+
console.log("Initializing Groq model.");
136+
const groq = createGroq({ apiKey });
137+
138+
const { text: groqResponse } = await aiGenerateText({
139+
model: groq.languageModel(model),
140+
prompt: context,
141+
temperature: temperature,
142+
maxTokens: max_response_length,
143+
frequencyPenalty: frequency_penalty,
144+
presencePenalty: presence_penalty,
145+
});
146+
147+
response = groqResponse;
148+
console.log("Received response from Groq model.");
149+
break;
150+
}
151+
134152
case ModelProvider.LLAMALOCAL:
135153
prettyConsole.log(
136154
"Using local Llama model for text completion."

core/src/core/models.ts

+18
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ type Models = {
44
[ModelProvider.OPENAI]: Model;
55
[ModelProvider.ANTHROPIC]: Model;
66
[ModelProvider.GROK]: Model;
7+
[ModelProvider.GROQ]: Model;
78
[ModelProvider.LLAMACLOUD]: Model;
89
[ModelProvider.LLAMALOCAL]: Model;
910
[ModelProvider.GOOGLE]: Model;
@@ -78,6 +79,23 @@ const models: Models = {
7879
[ModelClass.EMBEDDING]: "grok-2-beta", // not sure about this one
7980
},
8081
},
82+
[ModelProvider.GROQ]: {
83+
endpoint: "https://api.groq.com/openai/v1",
84+
settings: {
85+
stop: [],
86+
maxInputTokens: 128000,
87+
maxOutputTokens: 8000,
88+
frequency_penalty: 0.0,
89+
presence_penalty: 0.0,
90+
temperature: 0.3,
91+
},
92+
model: {
93+
[ModelClass.SMALL]: "llama-3.1-8b-instant",
94+
[ModelClass.MEDIUM]: "llama-3.1-70b-versatile",
95+
[ModelClass.LARGE]: "llama-3.2-90b-text-preview",
96+
[ModelClass.EMBEDDING]: "llama-3.1-8b-instant",
97+
},
98+
},
8199
[ModelProvider.LLAMACLOUD]: {
82100
settings: {
83101
stop: [],

core/src/core/types.ts

+1
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ export enum ModelProvider {
102102
OPENAI = "openai",
103103
ANTHROPIC = "anthropic",
104104
GROK = "grok",
105+
GROQ = "groq",
105106
LLAMACLOUD = "llama_cloud",
106107
LLAMALOCAL = "llama_local",
107108
GOOGLE = "google",

0 commit comments

Comments
 (0)