diff --git a/.env.example b/.env.example
index 0d808f655b0..7c93b6966e6 100644
--- a/.env.example
+++ b/.env.example
@@ -87,6 +87,12 @@ SMALL_REDPILL_MODEL=  # Default: gpt-4o-mini
 MEDIUM_REDPILL_MODEL= # Default: gpt-4o
 LARGE_REDPILL_MODEL=  # Default: gpt-4o
 
+# Grok Configuration
+SMALL_GROK_MODEL=       # Default: grok-2-1212
+MEDIUM_GROK_MODEL=      # Default: grok-2-1212
+LARGE_GROK_MODEL=       # Default: grok-2-1212
+EMBEDDING_GROK_MODEL=   # Default: grok-2-1212
+
 # Ollama Configuration
 OLLAMA_SERVER_URL= # Default: localhost:11434
 OLLAMA_MODEL=
diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts
index 06c7e564d32..e57746530cd 100644
--- a/packages/core/src/models.ts
+++ b/packages/core/src/models.ts
@@ -87,10 +87,10 @@ export const models: Models = {
         },
         endpoint: "https://api.x.ai/v1",
         model: {
-            [ModelClass.SMALL]: "grok-beta",
-            [ModelClass.MEDIUM]: "grok-beta",
-            [ModelClass.LARGE]: "grok-beta",
-            [ModelClass.EMBEDDING]: "grok-beta", // not sure about this one
+            [ModelClass.SMALL]: settings.SMALL_GROK_MODEL || "grok-2-1212",
+            [ModelClass.MEDIUM]: settings.MEDIUM_GROK_MODEL || "grok-2-1212",
+            [ModelClass.LARGE]: settings.LARGE_GROK_MODEL || "grok-2-1212",
+            [ModelClass.EMBEDDING]: settings.EMBEDDING_GROK_MODEL || "grok-2-1212", // not sure about this one
         },
     },
     [ModelProviderName.GROQ]: {