Skip to content

Commit 21b1e45

Browse files
committed
Merge branch 'deepseek-provider'
2 parents ea9d1c0 + d7f2c18 commit 21b1e45

File tree

6 files changed

+103
-73
lines changed

6 files changed

+103
-73
lines changed

.env.example

+10
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ LARGE_OPENAI_MODEL= # Default: gpt-4o
1616
EMBEDDING_OPENAI_MODEL= # Default: text-embedding-3-small
1717
IMAGE_OPENAI_MODEL= # Default: dall-e-3
1818

19+
1920
# Eternal AI's Decentralized Inference API
2021
ETERNALAI_URL=
2122
ETERNALAI_MODEL= # Default: "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16"
@@ -37,6 +38,8 @@ SMALL_HYPERBOLIC_MODEL= # Default: meta-llama/Llama-3.2-3B-Instruct
3738
MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruct
3839
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct
3940

41+
DEEPSEEK_API_KEY= # DeepSeek API key
42+
4043
# Livepeer configuration
4144
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
4245
LIVEPEER_IMAGE_MODEL= # Default: ByteDance/SDXL-Lightning
@@ -175,6 +178,13 @@ MEDIUM_VOLENGINE_MODEL= # Default: doubao-pro-128k
175178
LARGE_VOLENGINE_MODEL= # Default: doubao-pro-256k
176179
VOLENGINE_EMBEDDING_MODEL= # Default: doubao-embedding
177180

181+
# DeepSeek Configuration
182+
DEEPSEEK_API_URL= # Default: https://api.deepseek.com
183+
SMALL_DEEPSEEK_MODEL= # Default: deepseek-chat
184+
MEDIUM_DEEPSEEK_MODEL= # Default: deepseek-chat
185+
LARGE_DEEPSEEK_MODEL= # Default: deepseek-chat
186+
187+
178188
# EVM
179189
EVM_PRIVATE_KEY=
180190
EVM_PROVIDER_URL=

agent/src/index.ts

+5
Original file line numberDiff line numberDiff line change
@@ -349,6 +349,11 @@ export function getTokenForProvider(
349349
character.settings?.secrets?.GOOGLE_GENERATIVE_AI_API_KEY ||
350350
settings.GOOGLE_GENERATIVE_AI_API_KEY
351351
);
352+
case ModelProviderName.DEEPSEEK:
353+
return (
354+
character.settings?.secrets?.DEEPSEEK_API_KEY ||
355+
settings.DEEPSEEK_API_KEY
356+
);
352357
default:
353358
const errorMessage = `Failed to get token - unsupported model provider: ${provider}`;
354359
elizaLogger.error(errorMessage);

packages/core/src/generation.ts

+59
Original file line numberDiff line numberDiff line change
@@ -795,6 +795,37 @@ export async function generateText({
795795
break;
796796
}
797797

798+
case ModelProviderName.DEEPSEEK: {
799+
elizaLogger.debug("Initializing Deepseek model.");
800+
const serverUrl = models[provider].endpoint;
801+
const deepseek = createOpenAI({
802+
apiKey,
803+
baseURL: serverUrl,
804+
fetch: runtime.fetch,
805+
});
806+
807+
const { text: deepseekResponse } = await aiGenerateText({
808+
model: deepseek.languageModel(model),
809+
prompt: context,
810+
temperature: temperature,
811+
system:
812+
runtime.character.system ??
813+
settings.SYSTEM_PROMPT ??
814+
undefined,
815+
tools: tools,
816+
onStepFinish: onStepFinish,
817+
maxSteps: maxSteps,
818+
maxTokens: max_response_length,
819+
frequencyPenalty: frequency_penalty,
820+
presencePenalty: presence_penalty,
821+
experimental_telemetry: experimental_telemetry,
822+
});
823+
824+
response = deepseekResponse;
825+
elizaLogger.debug("Received response from Deepseek model.");
826+
break;
827+
}
828+
798829
default: {
799830
const errorMessage = `Unsupported provider: ${provider}`;
800831
elizaLogger.error(errorMessage);
@@ -1664,6 +1695,8 @@ export async function handleProvider(
16641695
return await handleOpenRouter(options);
16651696
case ModelProviderName.OLLAMA:
16661697
return await handleOllama(options);
1698+
case ModelProviderName.DEEPSEEK:
1699+
return await handleDeepSeek(options);
16671700
default: {
16681701
const errorMessage = `Unsupported provider: ${provider}`;
16691702
elizaLogger.error(errorMessage);
@@ -1886,6 +1919,32 @@ async function handleOllama({
18861919
});
18871920
}
18881921

1922+
/**
1923+
* Handles object generation for DeepSeek models.
1924+
*
1925+
* @param {ProviderOptions} options - Options specific to DeepSeek.
1926+
* @returns {Promise<GenerateObjectResult<unknown>>} - A promise that resolves to generated objects.
1927+
*/
1928+
async function handleDeepSeek({
1929+
model,
1930+
apiKey,
1931+
schema,
1932+
schemaName,
1933+
schemaDescription,
1934+
mode,
1935+
modelOptions,
1936+
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
1937+
const openai = createOpenAI({ apiKey, baseURL: models.deepseek.endpoint });
1938+
return await aiGenerateObject({
1939+
model: openai.languageModel(model),
1940+
schema,
1941+
schemaName,
1942+
schemaDescription,
1943+
mode,
1944+
...modelOptions,
1945+
});
1946+
}
1947+
18891948
// Add type definition for Together AI response
18901949
interface TogetherAIImageResponse {
18911950
data: Array<{

packages/core/src/models.ts

+16
Original file line numberDiff line numberDiff line change
@@ -514,6 +514,22 @@ export const models: Models = {
514514
[ModelClass.IMAGE]: settings.LIVEPEER_IMAGE_MODEL || "ByteDance/SDXL-Lightning",
515515
},
516516
},
517+
[ModelProviderName.DEEPSEEK]: {
518+
endpoint: settings.DEEPSEEK_API_URL || "https://api.deepseek.com",
519+
settings: {
520+
stop: [],
521+
maxInputTokens: 128000,
522+
maxOutputTokens: 8192,
523+
frequency_penalty: 0.0,
524+
presence_penalty: 0.0,
525+
temperature: 0.7,
526+
},
527+
model: {
528+
[ModelClass.SMALL]: settings.SMALL_DEEPSEEK_MODEL || "deepseek-chat",
529+
[ModelClass.MEDIUM]: settings.MEDIUM_DEEPSEEK_MODEL || "deepseek-chat",
530+
[ModelClass.LARGE]: settings.LARGE_DEEPSEEK_MODEL || "deepseek-chat",
531+
},
532+
},
517533
};
518534

519535
export function getModel(provider: ModelProviderName, type: ModelClass) {

packages/core/src/types.ts

+2
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,7 @@ export type Models = {
213213
[ModelProviderName.VENICE]: Model;
214214
[ModelProviderName.AKASH_CHAT_API]: Model;
215215
[ModelProviderName.LIVEPEER]: Model;
216+
[ModelProviderName.DEEPSEEK]: Model;
216217
};
217218

218219
/**
@@ -243,6 +244,7 @@ export enum ModelProviderName {
243244
VENICE = "venice",
244245
AKASH_CHAT_API = "akash_chat_api",
245246
LIVEPEER = "livepeer",
247+
DEEPSEEK = "deepseek",
246248
}
247249

248250
/**

pnpm-lock.yaml

+11-73
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)