Skip to content

Commit d55a3c7

Browse files
UD1stoTitan-Node
andauthored
feat: Integrate Livepeer LLM provider (elizaOS#2154)
* add livepeer on index.ts as llm provider * updated livepeer models * add livepeer as llm provider * add retry logic on livepeer img gen * add handlelivepeer * update test * add livepeer model keys on .example.env * Merge pull request #2 from Titan-Node/livepeer-doc-updates Updated docs for Livepeer LLM integration * add endpoint on livepeer on models.ts * edit livepeer model config at model.ts * Add Livepeer to image gen plugin environments Fixes this error ``` Error handling message: Error: Image generation configuration validation failed: : At least one of ANTHROPIC_API_KEY, NINETEEN_AI_API_KEY, TOGETHER_API_KEY, HEURIST_API_KEY, FAL_API_KEY, OPENAI_API_KEY or VENICE_API_KEY is required at validateImageGenConfig (file:///root/eliza-test/eliza-livepeer-integration/packages/plugin-image-generation/dist/index.js:38:19) ``` * add comments on livepeer model sizes * remove retry logic from livepeer generate text and img * Fixed .env naming convention and fixed mismatch bug within code * add bearer on livepeer calls * change in parsing to accomodate for new livepeer update * addadd nineteen api key on the message --------- Co-authored-by: Titan Node <hello@titan-node.com>
1 parent f70c1cd commit d55a3c7

File tree

9 files changed

+218
-30
lines changed

9 files changed

+218
-30
lines changed

.env.example

+6-2
Original file line numberDiff line numberDiff line change
@@ -141,8 +141,12 @@ MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct
141141
LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8
142142

143143
# Livepeer configuration
144-
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
145-
LIVEPEER_IMAGE_MODEL= # Default: ByteDance/SDXL-Lightning
144+
145+
LIVEPEER_GATEWAY_URL=https://dream-gateway.livepeer.cloud # Free inference gateways and docs: https://livepeer-eliza.com/
146+
IMAGE_LIVEPEER_MODEL= # Default: ByteDance/SDXL-Lightning
147+
SMALL_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct
148+
MEDIUM_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct
149+
LARGE_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct
146150

147151
# Speech Synthesis
148152
ELEVENLABS_XI_API_KEY= # API key from elevenlabs

agent/src/index.ts

+5
Original file line numberDiff line numberDiff line change
@@ -512,6 +512,11 @@ export function getTokenForProvider(
512512
character.settings?.secrets?.DEEPSEEK_API_KEY ||
513513
settings.DEEPSEEK_API_KEY
514514
);
515+
case ModelProviderName.LIVEPEER:
516+
return (
517+
character.settings?.secrets?.LIVEPEER_GATEWAY_URL ||
518+
settings.LIVEPEER_GATEWAY_URL
519+
);
515520
default:
516521
const errorMessage = `Failed to get token - unsupported model provider: ${provider}`;
517522
elizaLogger.error(errorMessage);

docs/docs/advanced/fine-tuning.md

+40-18
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ enum ModelProviderName {
2626
REDPILL,
2727
OPENROUTER,
2828
HEURIST,
29+
LIVEPEER,
2930
}
3031
```
3132

@@ -272,24 +273,45 @@ const llamaLocalSettings = {
272273

273274
```typescript
274275
const heuristSettings = {
275-
settings: {
276-
stop: [],
277-
maxInputTokens: 32768,
278-
maxOutputTokens: 8192,
279-
repetition_penalty: 0.0,
280-
temperature: 0.7,
281-
},
282-
imageSettings: {
283-
steps: 20,
284-
},
285-
endpoint: "https://llm-gateway.heurist.xyz",
286-
model: {
287-
[ModelClass.SMALL]: "hermes-3-llama3.1-8b",
288-
[ModelClass.MEDIUM]: "mistralai/mixtral-8x7b-instruct",
289-
[ModelClass.LARGE]: "nvidia/llama-3.1-nemotron-70b-instruct",
290-
[ModelClass.EMBEDDING]: "", // Add later
291-
[ModelClass.IMAGE]: "FLUX.1-dev",
292-
},
276+
settings: {
277+
stop: [],
278+
maxInputTokens: 32768,
279+
maxOutputTokens: 8192,
280+
repetition_penalty: 0.0,
281+
temperature: 0.7,
282+
},
283+
imageSettings: {
284+
steps: 20,
285+
},
286+
endpoint: "https://llm-gateway.heurist.xyz",
287+
model: {
288+
[ModelClass.SMALL]: "hermes-3-llama3.1-8b",
289+
[ModelClass.MEDIUM]: "mistralai/mixtral-8x7b-instruct",
290+
[ModelClass.LARGE]: "nvidia/llama-3.1-nemotron-70b-instruct",
291+
[ModelClass.EMBEDDING]: "", // Add later
292+
[ModelClass.IMAGE]: "FLUX.1-dev",
293+
},
294+
};
295+
```
296+
297+
### Livepeer Provider
298+
299+
```typescript
300+
const livepeerSettings = {
301+
settings: {
302+
stop: [],
303+
maxInputTokens: 128000,
304+
maxOutputTokens: 8192,
305+
repetition_penalty: 0.4,
306+
temperature: 0.7,
307+
},
308+
endpoint: "https://dream-gateway.livepeer.cloud",
309+
model: {
310+
[ModelClass.SMALL]: "meta-llama/Meta-Llama-3.1-8B-Instruct",
311+
[ModelClass.MEDIUM]: "meta-llama/Meta-Llama-3.1-8B-Instruct",
312+
[ModelClass.LARGE]: "meta-llama/Llama-3.3-70B-Instruct",
313+
[ModelClass.IMAGE]: "ByteDance/SDXL-Lightning",
314+
},
293315
};
294316
```
295317

docs/docs/core/characterfile.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ The character's display name for identification and in conversations.
9292

9393
#### `modelProvider` (required)
9494

95-
Specifies the AI model provider. Supported options from [ModelProviderName](/api/enumerations/modelprovidername) include `anthropic`, `llama_local`, `openai`, and others.
95+
Specifies the AI model provider. Supported options from [ModelProviderName](/api/enumerations/modelprovidername) include `anthropic`, `llama_local`, `openai`, `livepeer`, and others.
9696

9797
#### `clients` (required)
9898

docs/docs/quickstart.md

+7
Original file line numberDiff line numberDiff line change
@@ -92,10 +92,17 @@ Eliza supports multiple AI models:
9292
- **Heurist**: Set `modelProvider: "heurist"` in your character file. Most models are uncensored.
9393
- LLM: Select available LLMs [here](https://docs.heurist.ai/dev-guide/supported-models#large-language-models-llms) and configure `SMALL_HEURIST_MODEL`,`MEDIUM_HEURIST_MODEL`,`LARGE_HEURIST_MODEL`
9494
- Image Generation: Select available Stable Diffusion or Flux models [here](https://docs.heurist.ai/dev-guide/supported-models#image-generation-models) and configure `HEURIST_IMAGE_MODEL` (default is FLUX.1-dev)
95+
<<<<<<< HEAD
9596
- **Llama**: Set `OLLAMA_MODEL` to your chosen model
9697
- **Grok**: Set `GROK_API_KEY` to your Grok API key and set `modelProvider: "grok"` in your character file
9798
- **OpenAI**: Set `OPENAI_API_KEY` to your OpenAI API key and set `modelProvider: "openai"` in your character file
9899
- **Livepeer**: Set `LIVEPEER_IMAGE_MODEL` to your chosen Livepeer image model, available models [here](https://livepeer-eliza.com/)
100+
=======
101+
- **Llama**: Set `XAI_MODEL=meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo`
102+
- **Grok**: Set `XAI_MODEL=grok-beta`
103+
- **OpenAI**: Set `XAI_MODEL=gpt-4o-mini` or `gpt-4o`
104+
- **Livepeer**: Set `SMALL_LIVEPEER_MODEL`,`MEDIUM_LIVEPEER_MODEL`,`LARGE_LIVEPEER_MODEL` and `IMAGE_LIVEPEER_MODEL` to your desired models listed [here](https://livepeer-eliza.com/).
105+
>>>>>>> 95f56e6b4 (Merge pull request #2 from Titan-Node/livepeer-doc-updates)
99106
100107
You set which model to use inside the character JSON file
101108

packages/core/__tests__/models.test.ts

+35
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ vi.mock("../settings", () => {
1818
LLAMACLOUD_MODEL_LARGE: "mock-llama-large",
1919
TOGETHER_MODEL_SMALL: "mock-together-small",
2020
TOGETHER_MODEL_LARGE: "mock-together-large",
21+
LIVEPEER_GATEWAY_URL: "http://gateway.test-gateway",
22+
IMAGE_LIVEPEER_MODEL: "ByteDance/SDXL-Lightning",
2123
},
2224
loadEnv: vi.fn(),
2325
};
@@ -125,6 +127,26 @@ describe("Model Provider Configuration", () => {
125127
);
126128
});
127129
});
130+
describe("Livepeer Provider", () => {
131+
test("should have correct endpoint configuration", () => {
132+
expect(models[ModelProviderName.LIVEPEER].endpoint).toBe("http://gateway.test-gateway");
133+
});
134+
135+
test("should have correct model mappings", () => {
136+
const livepeerModels = models[ModelProviderName.LIVEPEER].model;
137+
expect(livepeerModels[ModelClass.SMALL]).toBe("meta-llama/Meta-Llama-3.1-8B-Instruct");
138+
expect(livepeerModels[ModelClass.MEDIUM]).toBe("meta-llama/Meta-Llama-3.1-8B-Instruct");
139+
expect(livepeerModels[ModelClass.LARGE]).toBe("meta-llama/Meta-Llama-3.1-8B-Instruct");
140+
expect(livepeerModels[ModelClass.IMAGE]).toBe("ByteDance/SDXL-Lightning");
141+
});
142+
143+
test("should have correct settings configuration", () => {
144+
const settings = models[ModelProviderName.LIVEPEER].settings;
145+
expect(settings.maxInputTokens).toBe(128000);
146+
expect(settings.maxOutputTokens).toBe(8192);
147+
expect(settings.temperature).toBe(0);
148+
});
149+
});
128150
});
129151

130152
describe("Model Retrieval Functions", () => {
@@ -224,3 +246,16 @@ describe("Environment Variable Integration", () => {
224246
);
225247
});
226248
});
249+
250+
describe("Generation with Livepeer", () => {
251+
test("should have correct image generation settings", () => {
252+
const livepeerConfig = models[ModelProviderName.LIVEPEER];
253+
expect(livepeerConfig.model[ModelClass.IMAGE]).toBe("ByteDance/SDXL-Lightning");
254+
expect(livepeerConfig.settings.temperature).toBe(0);
255+
});
256+
257+
test("should use default image model", () => {
258+
delete process.env.IMAGE_LIVEPEER_MODEL;
259+
expect(models[ModelProviderName.LIVEPEER].model[ModelClass.IMAGE]).toBe("ByteDance/SDXL-Lightning");
260+
});
261+
});

packages/core/src/generation.ts

+85-2
Original file line numberDiff line numberDiff line change
@@ -1188,6 +1188,55 @@ export async function generateText({
11881188
break;
11891189
}
11901190

1191+
case ModelProviderName.LIVEPEER: {
1192+
elizaLogger.debug("Initializing Livepeer model.");
1193+
1194+
if (!endpoint) {
1195+
throw new Error("Livepeer Gateway URL is not defined");
1196+
}
1197+
1198+
const requestBody = {
1199+
model: model,
1200+
messages: [
1201+
{
1202+
role: "system",
1203+
content: runtime.character.system ?? settings.SYSTEM_PROMPT ?? "You are a helpful assistant"
1204+
},
1205+
{
1206+
role: "user",
1207+
content: context
1208+
}
1209+
],
1210+
max_tokens: max_response_length,
1211+
stream: false
1212+
};
1213+
1214+
const fetchResponse = await runtime.fetch(endpoint+'/llm', {
1215+
method: "POST",
1216+
headers: {
1217+
"accept": "text/event-stream",
1218+
"Content-Type": "application/json",
1219+
"Authorization": "Bearer eliza-app-llm"
1220+
},
1221+
body: JSON.stringify(requestBody)
1222+
});
1223+
1224+
if (!fetchResponse.ok) {
1225+
const errorText = await fetchResponse.text();
1226+
throw new Error(`Livepeer request failed (${fetchResponse.status}): ${errorText}`);
1227+
}
1228+
1229+
const json = await fetchResponse.json();
1230+
1231+
if (!json?.choices?.[0]?.message?.content) {
1232+
throw new Error("Invalid response format from Livepeer");
1233+
}
1234+
1235+
response = json.choices[0].message.content.replace(/<\|start_header_id\|>assistant<\|end_header_id\|>\n\n/, '');
1236+
elizaLogger.debug("Successfully received response from Livepeer model");
1237+
break;
1238+
}
1239+
11911240
default: {
11921241
const errorMessage = `Unsupported provider: ${provider}`;
11931242
elizaLogger.error(errorMessage);
@@ -1721,7 +1770,6 @@ export const generateImage = async (
17211770
}
17221771
},
17231772
});
1724-
17251773
// Convert the returned image URLs to base64 to match existing functionality
17261774
const base64Promises = result.data.images.map(async (image) => {
17271775
const response = await fetch(image.url);
@@ -1822,15 +1870,18 @@ export const generateImage = async (
18221870
if (!baseUrl.protocol.startsWith("http")) {
18231871
throw new Error("Invalid Livepeer Gateway URL protocol");
18241872
}
1873+
18251874
const response = await fetch(
18261875
`${baseUrl.toString()}text-to-image`,
18271876
{
18281877
method: "POST",
18291878
headers: {
18301879
"Content-Type": "application/json",
1880+
Authorization: "Bearer eliza-app-img",
18311881
},
18321882
body: JSON.stringify({
1833-
model_id: model,
1883+
model_id:
1884+
data.modelId || "ByteDance/SDXL-Lightning",
18341885
prompt: data.prompt,
18351886
width: data.width || 1024,
18361887
height: data.height || 1024,
@@ -2108,6 +2159,8 @@ export async function handleProvider(
21082159
return await handleOllama(options);
21092160
case ModelProviderName.DEEPSEEK:
21102161
return await handleDeepSeek(options);
2162+
case ModelProviderName.LIVEPEER:
2163+
return await handleLivepeer(options);
21112164
default: {
21122165
const errorMessage = `Unsupported provider: ${provider}`;
21132166
elizaLogger.error(errorMessage);
@@ -2395,6 +2448,36 @@ async function handleDeepSeek({
23952448
});
23962449
}
23972450

2451+
async function handleLivepeer({
2452+
model,
2453+
apiKey,
2454+
schema,
2455+
schemaName,
2456+
schemaDescription,
2457+
mode,
2458+
modelOptions,
2459+
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
2460+
console.log("Livepeer provider api key:", apiKey);
2461+
if (!apiKey) {
2462+
throw new Error("Livepeer provider requires LIVEPEER_GATEWAY_URL to be configured");
2463+
}
2464+
2465+
const livepeerClient = createOpenAI({
2466+
apiKey,
2467+
baseURL: apiKey // Use the apiKey as the baseURL since it contains the gateway URL
2468+
});
2469+
2470+
return await aiGenerateObject({
2471+
model: livepeerClient.languageModel(model),
2472+
schema,
2473+
schemaName,
2474+
schemaDescription,
2475+
mode,
2476+
...modelOptions,
2477+
});
2478+
}
2479+
2480+
23982481
// Add type definition for Together AI response
23992482
interface TogetherAIImageResponse {
24002483
data: Array<{

packages/core/src/models.ts

+32-5
Original file line numberDiff line numberDiff line change
@@ -932,11 +932,38 @@ export const models: Models = {
932932
},
933933
},
934934
[ModelProviderName.LIVEPEER]: {
935-
// livepeer endpoint is handled from the sdk
935+
endpoint: settings.LIVEPEER_GATEWAY_URL,
936936
model: {
937+
[ModelClass.SMALL]: {
938+
name:
939+
settings.SMALL_LIVEPEER_MODEL ||
940+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
941+
stop: [],
942+
maxInputTokens: 8000,
943+
maxOutputTokens: 8192,
944+
temperature: 0,
945+
},
946+
[ModelClass.MEDIUM]: {
947+
name:
948+
settings.MEDIUM_LIVEPEER_MODEL ||
949+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
950+
stop: [],
951+
maxInputTokens: 8000,
952+
maxOutputTokens: 8192,
953+
temperature: 0,
954+
},
955+
[ModelClass.LARGE]: {
956+
name:
957+
settings.LARGE_LIVEPEER_MODEL ||
958+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
959+
stop: [],
960+
maxInputTokens: 8000,
961+
maxOutputTokens: 8192,
962+
temperature: 0,
963+
},
937964
[ModelClass.IMAGE]: {
938965
name:
939-
settings.LIVEPEER_IMAGE_MODEL || "ByteDance/SDXL-Lightning",
966+
settings.IMAGE_LIVEPEER_MODEL || "ByteDance/SDXL-Lightning",
940967
},
941968
},
942969
},
@@ -948,21 +975,21 @@ export const models: Models = {
948975
stop: [],
949976
maxInputTokens: 128000,
950977
maxOutputTokens: 8192,
951-
temperature: 0.6,
978+
temperature: 0,
952979
},
953980
[ModelClass.MEDIUM]: {
954981
name: settings.MEDIUM_INFERA_MODEL || "mistral-nemo:latest",
955982
stop: [],
956983
maxInputTokens: 128000,
957984
maxOutputTokens: 8192,
958-
temperature: 0.6,
985+
temperature: 0,
959986
},
960987
[ModelClass.LARGE]: {
961988
name: settings.LARGE_INFERA_MODEL || "mistral-small:latest",
962989
stop: [],
963990
maxInputTokens: 128000,
964991
maxOutputTokens: 8192,
965-
temperature: 0.6,
992+
temperature: 0,
966993
},
967994
},
968995
},

0 commit comments

Comments
 (0)