@@ -100,32 +100,32 @@ MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruc
100
100
LARGE_HYPERBOLIC_MODEL = # Default: meta-llama/Meta-Llama-3.1-405-Instruct
101
101
102
102
# Infera Configuration
103
- INFERA_API_KEY = # visit api.infera.org/docs to obtain an API key under /signup_user
104
- INFERA_MODEL = # Default: llama3.2:latest
105
- INFERA_SERVER_URL = # Default: https://api.infera.org/
106
- SMALL_INFERA_MODEL = #Recommended: llama3.2:latest
107
- MEDIUM_INFERA_MODEL = #Recommended: mistral-nemo:latest
108
- LARGE_INFERA_MODEL = #Recommended: mistral-small:latest
109
-
110
- # Venice Configuration
111
- VENICE_API_KEY = # generate from venice settings
112
- SMALL_VENICE_MODEL = # Default: llama-3.3-70b
113
- MEDIUM_VENICE_MODEL = # Default: llama-3.3-70b
114
- LARGE_VENICE_MODEL = # Default: llama-3.1-405b
115
- IMAGE_VENICE_MODEL = # Default: fluently-xl
116
-
117
- # Nineteen.ai Configuration
118
- NINETEEN_AI_API_KEY = # Get a free api key from https://nineteen.ai/app/api
119
- SMALL_NINETEEN_AI_MODEL = # Default: unsloth/Llama-3.2-3B-Instruct
120
- MEDIUM_NINETEEN_AI_MODEL = # Default: unsloth/Meta-Llama-3.1-8B-Instruct
121
- LARGE_NINETEEN_AI_MODEL = # Default: hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4
122
- IMAGE_NINETEEN_AI_MODE = # Default: dataautogpt3/ProteusV0.4-Lightning
123
-
124
- # Akash Chat API Configuration docs: https://chatapi.akash.network/documentation
125
- AKASH_CHAT_API_KEY = # Get from https://chatapi.akash.network/
126
- SMALL_AKASH_CHAT_API_MODEL = # Default: Meta-Llama-3-2-3B-Instruct
127
- MEDIUM_AKASH_CHAT_API_MODEL = # Default: Meta-Llama-3-3-70B-Instruct
128
- LARGE_AKASH_CHAT_API_MODEL = # Default: Meta-Llama-3-1-405B-Instruct-FP8
103
+ INFERA_API_KEY = # visit api.infera.org/docs to obtain an API key under /signup_user
104
+ INFERA_MODEL = # Default: llama3.2:latest
105
+ INFERA_SERVER_URL = # Default: https://api.infera.org/
106
+ SMALL_INFERA_MODEL = #Recommended: llama3.2:latest
107
+ MEDIUM_INFERA_MODEL = #Recommended: mistral-nemo:latest
108
+ LARGE_INFERA_MODEL = #Recommended: mistral-small:latest
109
+
110
+ # Venice Configuration
111
+ VENICE_API_KEY = # generate from venice settings
112
+ SMALL_VENICE_MODEL = # Default: llama-3.3-70b
113
+ MEDIUM_VENICE_MODEL = # Default: llama-3.3-70b
114
+ LARGE_VENICE_MODEL = # Default: llama-3.1-405b
115
+ IMAGE_VENICE_MODEL = # Default: fluently-xl
116
+
117
+ # Nineteen.ai Configuration
118
+ NINETEEN_AI_API_KEY = # Get a free api key from https://nineteen.ai/app/api
119
+ SMALL_NINETEEN_AI_MODEL = # Default: unsloth/Llama-3.2-3B-Instruct
120
+ MEDIUM_NINETEEN_AI_MODEL = # Default: unsloth/Meta-Llama-3.1-8B-Instruct
121
+ LARGE_NINETEEN_AI_MODEL = # Default: hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4
122
+ IMAGE_NINETEEN_AI_MODE = # Default: dataautogpt3/ProteusV0.4-Lightning
123
+
124
+ # Akash Chat API Configuration docs: https://chatapi.akash.network/documentation
125
+ AKASH_CHAT_API_KEY = # Get from https://chatapi.akash.network/
126
+ SMALL_AKASH_CHAT_API_MODEL = # Default: Meta-Llama-3-2-3B-Instruct
127
+ MEDIUM_AKASH_CHAT_API_MODEL = # Default: Meta-Llama-3-3-70B-Instruct
128
+ LARGE_AKASH_CHAT_API_MODEL = # Default: Meta-Llama-3-1-405B-Instruct-FP8
129
129
130
130
# Livepeer configuration
131
131
LIVEPEER_GATEWAY_URL = # Free inference gateways and docs: https://livepeer-eliza.com/
@@ -184,6 +184,12 @@ MEDIUM_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest
184
184
LARGE_GOOGLE_MODEL = # Default: gemini-1.5-pro-latest
185
185
EMBEDDING_GOOGLE_MODEL = # Default: text-embedding-004
186
186
187
+ # Mistral Configuration
188
+ MISTRAL_MODEL =
189
+ SMALL_MISTRAL_MODEL = # Default: mistral-small-latest
190
+ MEDIUM_MISTRAL_MODEL = # Default: mistral-large-latest
191
+ LARGE_MISTRAL_MODEL = # Default: mistral-large-latest
192
+
187
193
# Groq Configuration
188
194
GROQ_API_KEY = # Starts with gsk_
189
195
SMALL_GROQ_MODEL = # Default: llama-3.1-8b-instant
@@ -232,6 +238,13 @@ MEDIUM_VOLENGINE_MODEL= # Default: doubao-pro-128k
232
238
LARGE_VOLENGINE_MODEL = # Default: doubao-pro-256k
233
239
VOLENGINE_EMBEDDING_MODEL = # Default: doubao-embedding
234
240
241
+ # DeepSeek Configuration
242
+ DEEPSEEK_API_URL = # Default: https://api.deepseek.com
243
+ SMALL_DEEPSEEK_MODEL = # Default: deepseek-chat
244
+ MEDIUM_DEEPSEEK_MODEL = # Default: deepseek-chat
245
+ LARGE_DEEPSEEK_MODEL = # Default: deepseek-chat
246
+
247
+
235
248
# fal.ai Configuration
236
249
FAL_API_KEY =
237
250
FAL_AI_LORA_PATH =
@@ -538,4 +551,8 @@ AKASH_MANIFEST_MODE=auto
538
551
# Default: Will use the SDL directory
539
552
AKASH_MANIFEST_PATH =
540
553
# Values: "strict" | "lenient" | "none" - Default: "strict"
541
- AKASH_MANIFEST_VALIDATION_LEVEL = strict
554
+ AKASH_MANIFEST_VALIDATION_LEVEL = strict
555
+
556
+ # Quai Network Ecosystem
557
+ QUAI_PRIVATE_KEY =
558
+ QUAI_RPC_URL = https://rpc.quai.network
0 commit comments