Skip to content

Commit f97f2d8

Browse files
authored
Merge branch 'develop' into develop
2 parents 965ad06 + c4d173d commit f97f2d8

File tree

22 files changed

+760
-99
lines changed

22 files changed

+760
-99
lines changed

.env.example

+11-2
Original file line numberDiff line numberDiff line change
@@ -141,8 +141,12 @@ MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct
141141
LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8
142142

143143
# Livepeer configuration
144-
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
145-
LIVEPEER_IMAGE_MODEL= # Default: ByteDance/SDXL-Lightning
144+
145+
LIVEPEER_GATEWAY_URL=https://dream-gateway.livepeer.cloud # Free inference gateways and docs: https://livepeer-eliza.com/
146+
IMAGE_LIVEPEER_MODEL= # Default: ByteDance/SDXL-Lightning
147+
SMALL_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct
148+
MEDIUM_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct
149+
LARGE_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct
146150

147151
# Speech Synthesis
148152
ELEVENLABS_XI_API_KEY= # API key from elevenlabs
@@ -350,6 +354,11 @@ COINBASE_GENERATED_WALLET_ID= # Not your address but the wallet ID from ge
350354
COINBASE_GENERATED_WALLET_HEX_SEED= # Not your address but the wallet hex seed from generating a wallet through the plugin and calling export
351355
COINBASE_NOTIFICATION_URI= # For webhook plugin the uri you want to send the webhook to for dummy ones use https://webhook.site
352356

357+
# Coinbase AgentKit
358+
COINBASE_AGENT_KIT_NETWORK= # defaults to 'base-sepolia'
359+
CDP_API_KEY_NAME=
360+
CDP_API_KEY_PRIVATE_KEY=
361+
353362
# Coinbase Charity Configuration
354363
IS_CHARITABLE=false # Set to true to enable charity donations
355364
CHARITY_ADDRESS_BASE=0x1234567890123456789012345678901234567890

.github/workflows/smoke-tests.yml

+12-5
Original file line numberDiff line numberDiff line change
@@ -10,17 +10,24 @@ on:
1010
jobs:
1111
smoke-tests:
1212
runs-on: ubuntu-latest
13+
container:
14+
image: node:23-bullseye
1315
steps:
1416
- uses: actions/checkout@v4
1517

16-
- uses: pnpm/action-setup@v3
18+
- name: Cache pnpm
19+
uses: actions/cache@v4
1720
with:
18-
version: 9.15.0
21+
path: |
22+
~/.pnpm-store
23+
**/node_modules
24+
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
25+
restore-keys: ${{ runner.os }}-pnpm-
1926

20-
- uses: actions/setup-node@v4
27+
- name: Setup pnpm
28+
uses: pnpm/action-setup@v3
2129
with:
22-
node-version: "23.3.0"
23-
cache: "pnpm"
30+
version: 9.15.0
2431

2532
- name: Run smoke tests
2633
run: pnpm run smokeTests

.gitignore

+2
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,8 @@ tsup.config.bundled_*.mjs
5252
.turbo
5353
.cursorrules
5454
.pnpm-store
55+
instructions.md
56+
wallet_data.txt
5557

5658
coverage
5759
.eslintcache

agent/package.json

+1
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
"@elizaos/core": "workspace:*",
3636
"@elizaos/plugin-0g": "workspace:*",
3737
"@elizaos/plugin-abstract": "workspace:*",
38+
"@elizaos/plugin-agentkit": "workspace:*",
3839
"@elizaos/plugin-aptos": "workspace:*",
3940
"@elizaos/plugin-birdeye": "workspace:*",
4041
"@elizaos/plugin-coingecko": "workspace:*",

agent/src/index.ts

+5
Original file line numberDiff line numberDiff line change
@@ -512,6 +512,11 @@ export function getTokenForProvider(
512512
character.settings?.secrets?.DEEPSEEK_API_KEY ||
513513
settings.DEEPSEEK_API_KEY
514514
);
515+
case ModelProviderName.LIVEPEER:
516+
return (
517+
character.settings?.secrets?.LIVEPEER_GATEWAY_URL ||
518+
settings.LIVEPEER_GATEWAY_URL
519+
);
515520
default:
516521
const errorMessage = `Failed to get token - unsupported model provider: ${provider}`;
517522
elizaLogger.error(errorMessage);

characters/tate.character.json

-63
This file was deleted.

client/src/lib/info.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
{"version": "0.1.8+build.1"}
1+
{"version": "0.1.9-alpha.1"}

docs/docs/advanced/fine-tuning.md

+40-18
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ enum ModelProviderName {
2626
REDPILL,
2727
OPENROUTER,
2828
HEURIST,
29+
LIVEPEER,
2930
}
3031
```
3132

@@ -272,24 +273,45 @@ const llamaLocalSettings = {
272273

273274
```typescript
274275
const heuristSettings = {
275-
settings: {
276-
stop: [],
277-
maxInputTokens: 32768,
278-
maxOutputTokens: 8192,
279-
repetition_penalty: 0.0,
280-
temperature: 0.7,
281-
},
282-
imageSettings: {
283-
steps: 20,
284-
},
285-
endpoint: "https://llm-gateway.heurist.xyz",
286-
model: {
287-
[ModelClass.SMALL]: "hermes-3-llama3.1-8b",
288-
[ModelClass.MEDIUM]: "mistralai/mixtral-8x7b-instruct",
289-
[ModelClass.LARGE]: "nvidia/llama-3.1-nemotron-70b-instruct",
290-
[ModelClass.EMBEDDING]: "", // Add later
291-
[ModelClass.IMAGE]: "FLUX.1-dev",
292-
},
276+
settings: {
277+
stop: [],
278+
maxInputTokens: 32768,
279+
maxOutputTokens: 8192,
280+
repetition_penalty: 0.0,
281+
temperature: 0.7,
282+
},
283+
imageSettings: {
284+
steps: 20,
285+
},
286+
endpoint: "https://llm-gateway.heurist.xyz",
287+
model: {
288+
[ModelClass.SMALL]: "hermes-3-llama3.1-8b",
289+
[ModelClass.MEDIUM]: "mistralai/mixtral-8x7b-instruct",
290+
[ModelClass.LARGE]: "nvidia/llama-3.1-nemotron-70b-instruct",
291+
[ModelClass.EMBEDDING]: "", // Add later
292+
[ModelClass.IMAGE]: "FLUX.1-dev",
293+
},
294+
};
295+
```
296+
297+
### Livepeer Provider
298+
299+
```typescript
300+
const livepeerSettings = {
301+
settings: {
302+
stop: [],
303+
maxInputTokens: 128000,
304+
maxOutputTokens: 8192,
305+
repetition_penalty: 0.4,
306+
temperature: 0.7,
307+
},
308+
endpoint: "https://dream-gateway.livepeer.cloud",
309+
model: {
310+
[ModelClass.SMALL]: "meta-llama/Meta-Llama-3.1-8B-Instruct",
311+
[ModelClass.MEDIUM]: "meta-llama/Meta-Llama-3.1-8B-Instruct",
312+
[ModelClass.LARGE]: "meta-llama/Llama-3.3-70B-Instruct",
313+
[ModelClass.IMAGE]: "ByteDance/SDXL-Lightning",
314+
},
293315
};
294316
```
295317

docs/docs/core/characterfile.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ The character's display name for identification and in conversations.
9292

9393
#### `modelProvider` (required)
9494

95-
Specifies the AI model provider. Supported options from [ModelProviderName](/api/enumerations/modelprovidername) include `anthropic`, `llama_local`, `openai`, and others.
95+
Specifies the AI model provider. Supported options from [ModelProviderName](/api/enumerations/modelprovidername) include `anthropic`, `llama_local`, `openai`, `livepeer`, and others.
9696

9797
#### `clients` (required)
9898

docs/docs/quickstart.md

+7
Original file line numberDiff line numberDiff line change
@@ -92,10 +92,17 @@ Eliza supports multiple AI models:
9292
- **Heurist**: Set `modelProvider: "heurist"` in your character file. Most models are uncensored.
9393
- LLM: Select available LLMs [here](https://docs.heurist.ai/dev-guide/supported-models#large-language-models-llms) and configure `SMALL_HEURIST_MODEL`,`MEDIUM_HEURIST_MODEL`,`LARGE_HEURIST_MODEL`
9494
- Image Generation: Select available Stable Diffusion or Flux models [here](https://docs.heurist.ai/dev-guide/supported-models#image-generation-models) and configure `HEURIST_IMAGE_MODEL` (default is FLUX.1-dev)
95+
<<<<<<< HEAD
9596
- **Llama**: Set `OLLAMA_MODEL` to your chosen model
9697
- **Grok**: Set `GROK_API_KEY` to your Grok API key and set `modelProvider: "grok"` in your character file
9798
- **OpenAI**: Set `OPENAI_API_KEY` to your OpenAI API key and set `modelProvider: "openai"` in your character file
9899
- **Livepeer**: Set `LIVEPEER_IMAGE_MODEL` to your chosen Livepeer image model, available models [here](https://livepeer-eliza.com/)
100+
=======
101+
- **Llama**: Set `XAI_MODEL=meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo`
102+
- **Grok**: Set `XAI_MODEL=grok-beta`
103+
- **OpenAI**: Set `XAI_MODEL=gpt-4o-mini` or `gpt-4o`
104+
- **Livepeer**: Set `SMALL_LIVEPEER_MODEL`,`MEDIUM_LIVEPEER_MODEL`,`LARGE_LIVEPEER_MODEL` and `IMAGE_LIVEPEER_MODEL` to your desired models listed [here](https://livepeer-eliza.com/).
105+
>>>>>>> 95f56e6b4 (Merge pull request #2 from Titan-Node/livepeer-doc-updates)
99106
100107
You set which model to use inside the character JSON file
101108

packages/core/__tests__/models.test.ts

+35
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ vi.mock("../settings", () => {
1818
LLAMACLOUD_MODEL_LARGE: "mock-llama-large",
1919
TOGETHER_MODEL_SMALL: "mock-together-small",
2020
TOGETHER_MODEL_LARGE: "mock-together-large",
21+
LIVEPEER_GATEWAY_URL: "http://gateway.test-gateway",
22+
IMAGE_LIVEPEER_MODEL: "ByteDance/SDXL-Lightning",
2123
},
2224
loadEnv: vi.fn(),
2325
};
@@ -125,6 +127,26 @@ describe("Model Provider Configuration", () => {
125127
);
126128
});
127129
});
130+
describe("Livepeer Provider", () => {
131+
test("should have correct endpoint configuration", () => {
132+
expect(models[ModelProviderName.LIVEPEER].endpoint).toBe("http://gateway.test-gateway");
133+
});
134+
135+
test("should have correct model mappings", () => {
136+
const livepeerModels = models[ModelProviderName.LIVEPEER].model;
137+
expect(livepeerModels[ModelClass.SMALL]).toBe("meta-llama/Meta-Llama-3.1-8B-Instruct");
138+
expect(livepeerModels[ModelClass.MEDIUM]).toBe("meta-llama/Meta-Llama-3.1-8B-Instruct");
139+
expect(livepeerModels[ModelClass.LARGE]).toBe("meta-llama/Meta-Llama-3.1-8B-Instruct");
140+
expect(livepeerModels[ModelClass.IMAGE]).toBe("ByteDance/SDXL-Lightning");
141+
});
142+
143+
test("should have correct settings configuration", () => {
144+
const settings = models[ModelProviderName.LIVEPEER].settings;
145+
expect(settings.maxInputTokens).toBe(128000);
146+
expect(settings.maxOutputTokens).toBe(8192);
147+
expect(settings.temperature).toBe(0);
148+
});
149+
});
128150
});
129151

130152
describe("Model Retrieval Functions", () => {
@@ -224,3 +246,16 @@ describe("Environment Variable Integration", () => {
224246
);
225247
});
226248
});
249+
250+
describe("Generation with Livepeer", () => {
251+
test("should have correct image generation settings", () => {
252+
const livepeerConfig = models[ModelProviderName.LIVEPEER];
253+
expect(livepeerConfig.model[ModelClass.IMAGE]).toBe("ByteDance/SDXL-Lightning");
254+
expect(livepeerConfig.settings.temperature).toBe(0);
255+
});
256+
257+
test("should use default image model", () => {
258+
delete process.env.IMAGE_LIVEPEER_MODEL;
259+
expect(models[ModelProviderName.LIVEPEER].model[ModelClass.IMAGE]).toBe("ByteDance/SDXL-Lightning");
260+
});
261+
});

0 commit comments

Comments
 (0)