Skip to content

Commit 565f4e7

Browse files
authored
Merge pull request #2067 from daizhengxue/main
feat: Add DeepSeek AI provider support to Eliza
2 parents 7e455cf + 93f0ae9 commit 565f4e7

File tree

6 files changed

+2455
-2267
lines changed

6 files changed

+2455
-2267
lines changed

.env.example

+34-27
Original file line numberDiff line numberDiff line change
@@ -100,32 +100,32 @@ MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruc
100100
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct
101101

102102
# Infera Configuration
103-
INFERA_API_KEY= # visit api.infera.org/docs to obtain an API key under /signup_user
104-
INFERA_MODEL= # Default: llama3.2:latest
105-
INFERA_SERVER_URL= # Default: https://api.infera.org/
106-
SMALL_INFERA_MODEL= #Recommended: llama3.2:latest
107-
MEDIUM_INFERA_MODEL= #Recommended: mistral-nemo:latest
108-
LARGE_INFERA_MODEL= #Recommended: mistral-small:latest
109-
110-
# Venice Configuration
111-
VENICE_API_KEY= # generate from venice settings
112-
SMALL_VENICE_MODEL= # Default: llama-3.3-70b
113-
MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b
114-
LARGE_VENICE_MODEL= # Default: llama-3.1-405b
115-
IMAGE_VENICE_MODEL= # Default: fluently-xl
116-
117-
# Nineteen.ai Configuration
118-
NINETEEN_AI_API_KEY= # Get a free api key from https://nineteen.ai/app/api
119-
SMALL_NINETEEN_AI_MODEL= # Default: unsloth/Llama-3.2-3B-Instruct
120-
MEDIUM_NINETEEN_AI_MODEL= # Default: unsloth/Meta-Llama-3.1-8B-Instruct
121-
LARGE_NINETEEN_AI_MODEL= # Default: hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4
122-
IMAGE_NINETEEN_AI_MODE= # Default: dataautogpt3/ProteusV0.4-Lightning
123-
124-
# Akash Chat API Configuration docs: https://chatapi.akash.network/documentation
125-
AKASH_CHAT_API_KEY= # Get from https://chatapi.akash.network/
126-
SMALL_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-2-3B-Instruct
127-
MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct
128-
LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8
103+
INFERA_API_KEY= # visit api.infera.org/docs to obtain an API key under /signup_user
104+
INFERA_MODEL= # Default: llama3.2:latest
105+
INFERA_SERVER_URL= # Default: https://api.infera.org/
106+
SMALL_INFERA_MODEL= #Recommended: llama3.2:latest
107+
MEDIUM_INFERA_MODEL= #Recommended: mistral-nemo:latest
108+
LARGE_INFERA_MODEL= #Recommended: mistral-small:latest
109+
110+
# Venice Configuration
111+
VENICE_API_KEY= # generate from venice settings
112+
SMALL_VENICE_MODEL= # Default: llama-3.3-70b
113+
MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b
114+
LARGE_VENICE_MODEL= # Default: llama-3.1-405b
115+
IMAGE_VENICE_MODEL= # Default: fluently-xl
116+
117+
# Nineteen.ai Configuration
118+
NINETEEN_AI_API_KEY= # Get a free api key from https://nineteen.ai/app/api
119+
SMALL_NINETEEN_AI_MODEL= # Default: unsloth/Llama-3.2-3B-Instruct
120+
MEDIUM_NINETEEN_AI_MODEL= # Default: unsloth/Meta-Llama-3.1-8B-Instruct
121+
LARGE_NINETEEN_AI_MODEL= # Default: hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4
122+
IMAGE_NINETEEN_AI_MODE= # Default: dataautogpt3/ProteusV0.4-Lightning
123+
124+
# Akash Chat API Configuration docs: https://chatapi.akash.network/documentation
125+
AKASH_CHAT_API_KEY= # Get from https://chatapi.akash.network/
126+
SMALL_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-2-3B-Instruct
127+
MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct
128+
LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8
129129

130130
# Livepeer configuration
131131
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
@@ -238,6 +238,13 @@ MEDIUM_VOLENGINE_MODEL= # Default: doubao-pro-128k
238238
LARGE_VOLENGINE_MODEL= # Default: doubao-pro-256k
239239
VOLENGINE_EMBEDDING_MODEL= # Default: doubao-embedding
240240

241+
# DeepSeek Configuration
242+
DEEPSEEK_API_URL= # Default: https://api.deepseek.com
243+
SMALL_DEEPSEEK_MODEL= # Default: deepseek-chat
244+
MEDIUM_DEEPSEEK_MODEL= # Default: deepseek-chat
245+
LARGE_DEEPSEEK_MODEL= # Default: deepseek-chat
246+
247+
241248
# fal.ai Configuration
242249
FAL_API_KEY=
243250
FAL_AI_LORA_PATH=
@@ -545,4 +552,4 @@ AKASH_MANIFEST_VALIDATION_LEVEL=strict
545552

546553
# Quai Network Ecosystem
547554
QUAI_PRIVATE_KEY=
548-
QUAI_RPC_URL=https://rpc.quai.network
555+
QUAI_RPC_URL=https://rpc.quai.network

agent/src/index.ts

-3
Original file line numberDiff line numberDiff line change
@@ -777,9 +777,6 @@ export async function createAgent(
777777
getSecret(character, "QUAI_PRIVATE_KEY")
778778
? quaiPlugin
779779
: null,
780-
getSecret(character, "QUAI_PRIVATE_KEY")
781-
? quaiPlugin
782-
: null,
783780
].filter(Boolean),
784781
providers: [],
785782
actions: [],

packages/core/src/generation.ts

+59
Original file line numberDiff line numberDiff line change
@@ -966,6 +966,37 @@ export async function generateText({
966966
break;
967967
}
968968

969+
case ModelProviderName.DEEPSEEK: {
970+
elizaLogger.debug("Initializing Deepseek model.");
971+
const serverUrl = models[provider].endpoint;
972+
const deepseek = createOpenAI({
973+
apiKey,
974+
baseURL: serverUrl,
975+
fetch: runtime.fetch,
976+
});
977+
978+
const { text: deepseekResponse } = await aiGenerateText({
979+
model: deepseek.languageModel(model),
980+
prompt: context,
981+
temperature: temperature,
982+
system:
983+
runtime.character.system ??
984+
settings.SYSTEM_PROMPT ??
985+
undefined,
986+
tools: tools,
987+
onStepFinish: onStepFinish,
988+
maxSteps: maxSteps,
989+
maxTokens: max_response_length,
990+
frequencyPenalty: frequency_penalty,
991+
presencePenalty: presence_penalty,
992+
experimental_telemetry: experimental_telemetry,
993+
});
994+
995+
response = deepseekResponse;
996+
elizaLogger.debug("Received response from Deepseek model.");
997+
break;
998+
}
999+
9691000
default: {
9701001
const errorMessage = `Unsupported provider: ${provider}`;
9711002
elizaLogger.error(errorMessage);
@@ -1893,6 +1924,8 @@ export async function handleProvider(
18931924
return await handleOpenRouter(options);
18941925
case ModelProviderName.OLLAMA:
18951926
return await handleOllama(options);
1927+
case ModelProviderName.DEEPSEEK:
1928+
return await handleDeepSeek(options);
18961929
default: {
18971930
const errorMessage = `Unsupported provider: ${provider}`;
18981931
elizaLogger.error(errorMessage);
@@ -2152,6 +2185,32 @@ async function handleOllama({
21522185
});
21532186
}
21542187

2188+
/**
2189+
* Handles object generation for DeepSeek models.
2190+
*
2191+
* @param {ProviderOptions} options - Options specific to DeepSeek.
2192+
* @returns {Promise<GenerateObjectResult<unknown>>} - A promise that resolves to generated objects.
2193+
*/
2194+
async function handleDeepSeek({
2195+
model,
2196+
apiKey,
2197+
schema,
2198+
schemaName,
2199+
schemaDescription,
2200+
mode,
2201+
modelOptions,
2202+
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
2203+
const openai = createOpenAI({ apiKey, baseURL: models.deepseek.endpoint });
2204+
return await aiGenerateObject({
2205+
model: openai.languageModel(model),
2206+
schema,
2207+
schemaName,
2208+
schemaDescription,
2209+
mode,
2210+
...modelOptions,
2211+
});
2212+
}
2213+
21552214
// Add type definition for Together AI response
21562215
interface TogetherAIImageResponse {
21572216
data: Array<{

packages/core/src/models.ts

+33-1
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,7 @@ export const models: Models = {
415415
frequency_penalty: 0.4,
416416
presence_penalty: 0.4,
417417
temperature: 0.7,
418-
}
418+
},
419419
},
420420
},
421421
[ModelProviderName.REDPILL]: {
@@ -966,6 +966,38 @@ export const models: Models = {
966966
},
967967
},
968968
},
969+
[ModelProviderName.DEEPSEEK]: {
970+
endpoint: settings.DEEPSEEK_API_URL || "https://api.deepseek.com",
971+
model: {
972+
[ModelClass.SMALL]: {
973+
name: settings.SMALL_DEEPSEEK_MODEL || "deepseek-chat",
974+
stop: [],
975+
maxInputTokens: 128000,
976+
maxOutputTokens: 8192,
977+
frequency_penalty: 0.0,
978+
presence_penalty: 0.0,
979+
temperature: 0.7,
980+
},
981+
[ModelClass.MEDIUM]: {
982+
name: settings.MEDIUM_DEEPSEEK_MODEL || "deepseek-chat",
983+
stop: [],
984+
maxInputTokens: 128000,
985+
maxOutputTokens: 8192,
986+
frequency_penalty: 0.0,
987+
presence_penalty: 0.0,
988+
temperature: 0.7,
989+
},
990+
[ModelClass.LARGE]: {
991+
name: settings.LARGE_DEEPSEEK_MODEL || "deepseek-chat",
992+
stop: [],
993+
maxInputTokens: 128000,
994+
maxOutputTokens: 8192,
995+
frequency_penalty: 0.0,
996+
presence_penalty: 0.0,
997+
temperature: 0.7,
998+
},
999+
},
1000+
},
9691001
};
9701002

9711003
export function getModelSettings(

packages/core/src/types.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,7 @@ export type Models = {
227227
[ModelProviderName.NINETEEN_AI]: Model;
228228
[ModelProviderName.AKASH_CHAT_API]: Model;
229229
[ModelProviderName.LIVEPEER]: Model;
230+
[ModelProviderName.DEEPSEEK]: Model;
230231
[ModelProviderName.INFERA]: Model;
231232
};
232233

@@ -260,8 +261,8 @@ export enum ModelProviderName {
260261
NINETEEN_AI = "nineteen_ai",
261262
AKASH_CHAT_API = "akash_chat_api",
262263
LIVEPEER = "livepeer",
263-
LETZAI = "letzai",
264-
INFERA = "infera",
264+
DEEPSEEK="deepseek",
265+
INFERA="infera"
265266
}
266267

267268
/**

0 commit comments

Comments
 (0)