Skip to content

Commit 4d1e66c

Browse files
authored
Merge pull request #335 from tsubasakong/main
feat: Add Heurist API Integration as New Model Provider
2 parents c4e8601 + 2e3d507 commit 4d1e66c

File tree

6 files changed

+90
-25474
lines changed

6 files changed

+90
-25474
lines changed

.env.example

+3
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,9 @@ LARGE_OLLAMA_MODEL= #default hermes3:70b
5555
# For asking Claude stuff
5656
ANTHROPIC_API_KEY=
5757

58+
# Heurist API
59+
HEURIST_API_KEY=
60+
5861
WALLET_PRIVATE_KEY=EXAMPLE_WALLET_PRIVATE_KEY
5962
WALLET_PUBLIC_KEY=EXAMPLE_WALLET_PUBLIC_KEY
6063

packages/agent/src/index.ts

+5
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,11 @@ export function getTokenForProvider(
150150
character.settings?.secrets?.GROK_API_KEY ||
151151
settings.GROK_API_KEY
152152
);
153+
case ModelProviderName.HEURIST:
154+
return (
155+
character.settings?.secrets?.HEURIST_API_KEY ||
156+
settings.HEURIST_API_KEY
157+
);
153158
}
154159
}
155160

packages/core/src/generation.ts

+58-7
Original file line numberDiff line numberDiff line change
@@ -312,6 +312,25 @@ export async function generateText({
312312
console.debug("Received response from Ollama model.");
313313
break;
314314

315+
case ModelProviderName.HEURIST: {
316+
elizaLogger.debug("Initializing Heurist model.");
317+
const heurist = createOpenAI({ apiKey: apiKey, baseURL: endpoint });
318+
319+
const { text: heuristResponse } = await aiGenerateText({
320+
model: heurist.languageModel(model),
321+
prompt: context,
322+
system: runtime.character.system ?? settings.SYSTEM_PROMPT ?? undefined,
323+
temperature: temperature,
324+
maxTokens: max_response_length,
325+
frequencyPenalty: frequency_penalty,
326+
presencePenalty: presence_penalty,
327+
});
328+
329+
response = heuristResponse;
330+
elizaLogger.debug("Received response from Heurist model.");
331+
break;
332+
}
333+
315334
default: {
316335
const errorMessage = `Unsupported provider: ${provider}`;
317336
elizaLogger.error(errorMessage);
@@ -681,6 +700,12 @@ export const generateImage = async (
681700
width: number;
682701
height: number;
683702
count?: number;
703+
negativePrompt?: string;
704+
numIterations?: number;
705+
guidanceScale?: number;
706+
seed?: number;
707+
modelId?: string;
708+
jobId?: string;
684709
},
685710
runtime: IAgentRuntime
686711
): Promise<{
@@ -696,14 +721,40 @@ export const generateImage = async (
696721

697722
const model = getModel(runtime.character.modelProvider, ModelClass.IMAGE);
698723
const modelSettings = models[runtime.character.modelProvider].imageSettings;
699-
// some fallbacks for backwards compat, should remove in the future
700-
const apiKey =
701-
runtime.token ??
702-
runtime.getSetting("TOGETHER_API_KEY") ??
703-
runtime.getSetting("OPENAI_API_KEY");
704-
724+
const apiKey = runtime.token ?? runtime.getSetting("HEURIST_API_KEY") ?? runtime.getSetting("TOGETHER_API_KEY") ?? runtime.getSetting("OPENAI_API_KEY");
705725
try {
706-
if (runtime.character.modelProvider === ModelProviderName.LLAMACLOUD) {
726+
if (runtime.character.modelProvider === ModelProviderName.HEURIST) {
727+
const response = await fetch('http://sequencer.heurist.xyz/submit_job', {
728+
method: 'POST',
729+
headers: {
730+
'Authorization': `Bearer ${apiKey}`,
731+
'Content-Type': 'application/json',
732+
},
733+
body: JSON.stringify({
734+
job_id: data.jobId || crypto.randomUUID(),
735+
model_input: {
736+
SD: {
737+
prompt: data.prompt,
738+
neg_prompt: data.negativePrompt,
739+
num_iterations: data.numIterations || 20,
740+
width: data.width || 512,
741+
height: data.height || 512,
742+
guidance_scale: data.guidanceScale,
743+
seed: data.seed || -1,
744+
}
745+
},
746+
model_id: data.modelId || 'PepeXL', // Default to SD 1.5 if not specified
747+
})
748+
});
749+
750+
if (!response.ok) {
751+
throw new Error(`Heurist image generation failed: ${response.statusText}`);
752+
}
753+
754+
const result = await response.json();
755+
return { success: true, data: [result.url] };
756+
}
757+
else if (runtime.character.modelProvider === ModelProviderName.LLAMACLOUD) {
707758
const together = new Together({ apiKey: apiKey as string });
708759
const response = await together.images.create({
709760
model: "black-forest-labs/FLUX.1-schnell",

packages/core/src/models.ts

+22
Original file line numberDiff line numberDiff line change
@@ -217,8 +217,30 @@ const models: Models = {
217217
settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large",
218218
},
219219
},
220+
[ModelProviderName.HEURIST]: {
221+
settings: {
222+
stop: [],
223+
maxInputTokens: 128000,
224+
maxOutputTokens: 8192,
225+
repetition_penalty: 0.0,
226+
temperature: 0.7,
227+
},
228+
imageSettings: {
229+
steps: 20,
230+
},
231+
endpoint: "https://llm-gateway.heurist.xyz",
232+
model: {
233+
[ModelClass.SMALL]: "meta-llama/llama-3-70b-instruct",
234+
[ModelClass.MEDIUM]: "meta-llama/llama-3-70b-instruct",
235+
[ModelClass.LARGE]: "meta-llama/llama-3.1-405b-instruct",
236+
[ModelClass.EMBEDDING]: "" , //Add later,
237+
[ModelClass.IMAGE]: "PepeXL",
238+
},
239+
}
220240
};
221241

242+
243+
222244
export function getModel(provider: ModelProviderName, type: ModelClass) {
223245
return models[provider].model[type];
224246
}

packages/core/src/types.ts

+2
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ export type Models = {
114114
[ModelProviderName.REDPILL]: Model;
115115
[ModelProviderName.OPENROUTER]: Model;
116116
[ModelProviderName.OLLAMA]: Model;
117+
[ModelProviderName.HEURIST]: Model;
117118
};
118119

119120
export enum ModelProviderName {
@@ -128,6 +129,7 @@ export enum ModelProviderName {
128129
REDPILL = "redpill",
129130
OPENROUTER = "openrouter",
130131
OLLAMA = "ollama",
132+
HEURIST = "heurist",
131133
}
132134

133135
/**

0 commit comments

Comments
 (0)