Skip to content

Commit c1624b8

Browse files
committed
Merge branch 'develop' into pr-842
2 parents 0ee9e07 + bc5e50e commit c1624b8

File tree

16 files changed

+359
-32
lines changed

16 files changed

+359
-32
lines changed

.env.example

+5
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ DISCORD_VOICE_CHANNEL_ID= # The ID of the voice channel the bot should joi
99

1010
# AI Model API Keys
1111
OPENAI_API_KEY= # OpenAI API key, starting with sk-
12+
OPENAI_API_URL= # OpenAI API Endpoint (optional), Default: https://api.openai.com/v1
1213
SMALL_OPENAI_MODEL= # Default: gpt-4o-mini
1314
MEDIUM_OPENAI_MODEL= # Default: gpt-4o
1415
LARGE_OPENAI_MODEL= # Default: gpt-4o
@@ -35,6 +36,10 @@ SMALL_HYPERBOLIC_MODEL= # Default: meta-llama/Llama-3.2-3B-Instruct
3536
MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruct
3637
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct
3738

39+
# Livepeer configuration
40+
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
41+
LIVEPEER_IMAGE_MODEL= # Default: ByteDance/SDXL-Lightning
42+
3843
# Speech Synthesis
3944
ELEVENLABS_XI_API_KEY= # API key from elevenlabs
4045

agent/src/index.ts

+2-1
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,8 @@ export async function createAgent(
557557
getSecret(character, "FAL_API_KEY") ||
558558
getSecret(character, "OPENAI_API_KEY") ||
559559
getSecret(character, "VENICE_API_KEY") ||
560-
getSecret(character, "HEURIST_API_KEY")
560+
getSecret(character, "HEURIST_API_KEY") ||
561+
getSecret(character, "LIVEPEER_GATEWAY_URL")
561562
? imageGenerationPlugin
562563
: null,
563564
getSecret(character, "FAL_API_KEY") ? ThreeDGenerationPlugin : null,

docs/api/enumerations/ModelProviderName.md

+10
Original file line numberDiff line numberDiff line change
@@ -233,3 +233,13 @@ Available model providers
233233
#### Defined in
234234

235235
[packages/core/src/types.ts:240](https://github.com/elizaOS/eliza/blob/main/packages/core/src/types.ts#L240)
236+
237+
***
238+
239+
### LIVEPEER
240+
241+
> **LIVEPEER**: `"livepeer"`
242+
243+
#### Defined in
244+
245+
[packages/core/src/types.ts:241](https://github.com/elizaOS/eliza/blob/main/packages/core/src/types.ts#L241)

docs/api/type-aliases/Models.md

+4
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,10 @@ Model configurations by provider
100100

101101
> **akash\_chat\_api**: [`Model`](Model.md)
102102
103+
### livepeer
104+
105+
> **livepeer**: [`Model`](Model.md)
106+
103107
## Defined in
104108

105109
[packages/core/src/types.ts:188](https://github.com/elizaOS/eliza/blob/main/packages/core/src/types.ts#L188)

docs/docs/api/enumerations/ModelProviderName.md

+10
Original file line numberDiff line numberDiff line change
@@ -119,3 +119,13 @@
119119
#### Defined in
120120

121121
[packages/core/src/types.ts:132](https://github.com/elizaos/eliza/blob/4d1e66cbf7deea87a8a67525670a963cd00108bc/packages/core/src/types.ts#L132)
122+
123+
---
124+
125+
### LIVEPEER
126+
127+
> **LIVEPEER**: `"livepeer"`
128+
129+
#### Defined in
130+
131+
[packages/core/src/types.ts:133](https://github.com/elizaos/eliza/blob/4d1e66cbf7deea87a8a67525670a963cd00108bc/packages/core/src/types.ts#L133)

docs/docs/api/type-aliases/Models.md

+4
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,10 @@
5252

5353
> **heurist**: [`Model`](Model.md)
5454
55+
### livepeer
56+
57+
> **livepeer**: [`Model`](Model.md)
58+
5559
## Defined in
5660

5761
[packages/core/src/types.ts:105](https://github.com/elizaos/eliza/blob/7fcf54e7fb2ba027d110afcc319c0b01b3f181dc/packages/core/src/types.ts#L105)

docs/docs/guides/configuration.md

+3
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,9 @@ TOGETHER_API_KEY=
7272
# Heurist Settings
7373
HEURIST_API_KEY=
7474

75+
# Livepeer Settings
76+
LIVEPEER_GATEWAY_URL=
77+
7578
# Local Model Settings
7679
XAI_MODEL=meta-llama/Llama-3.1-7b-instruct
7780
```

docs/docs/quickstart.md

+4-2
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@ pnpm build
8282
OPENAI_API_KEY= # OpenAI API key
8383
GROK_API_KEY= # Grok API key
8484
ELEVENLABS_XI_API_KEY= # API key from elevenlabs (for voice)
85+
LIVEPEER_GATEWAY_URL= # Livepeer gateway URL
8586
```
8687
8788
## Choose Your Model
@@ -94,6 +95,7 @@ Eliza supports multiple AI models:
9495
- **Llama**: Set `XAI_MODEL=meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo`
9596
- **Grok**: Set `XAI_MODEL=grok-beta`
9697
- **OpenAI**: Set `XAI_MODEL=gpt-4o-mini` or `gpt-4o`
98+
- **Livepeer**: Set `LIVEPEER_IMAGE_MODEL` to your chosen Livepeer image model, available models [here](https://livepeer-eliza.com/)
9799
98100
You set which model to use inside the character JSON file
99101
@@ -216,8 +218,8 @@ pnpm start --characters="characters/trump.character.json,characters/tate.charact
216218
- Ensure Node.js 23.3.0 is installed
217219
- Use `node -v` to check version
218220
- Consider using [nvm](https://github.com/nvm-sh/nvm) to manage Node versions
219-
220-
NOTE: pnpm may be bundled with a different node version, ignoring nvm. If this is the case, you can use
221+
222+
NOTE: pnpm may be bundled with a different node version, ignoring nvm. If this is the case, you can use
221223
```bash
222224
pnpm env use --global 23.3.0
223225
```

packages/client-direct/src/index.ts

+157
Original file line numberDiff line numberDiff line change
@@ -445,6 +445,163 @@ export class DirectClient {
445445
}
446446
}
447447
);
448+
449+
this.app.post("/:agentId/speak", async (req, res) => {
450+
const agentId = req.params.agentId;
451+
const roomId = stringToUuid(req.body.roomId ?? "default-room-" + agentId);
452+
const userId = stringToUuid(req.body.userId ?? "user");
453+
const text = req.body.text;
454+
455+
if (!text) {
456+
res.status(400).send("No text provided");
457+
return;
458+
}
459+
460+
let runtime = this.agents.get(agentId);
461+
462+
// if runtime is null, look for runtime with the same name
463+
if (!runtime) {
464+
runtime = Array.from(this.agents.values()).find(
465+
(a) => a.character.name.toLowerCase() === agentId.toLowerCase()
466+
);
467+
}
468+
469+
if (!runtime) {
470+
res.status(404).send("Agent not found");
471+
return;
472+
}
473+
474+
try {
475+
// Process message through agent (same as /message endpoint)
476+
await runtime.ensureConnection(
477+
userId,
478+
roomId,
479+
req.body.userName,
480+
req.body.name,
481+
"direct"
482+
);
483+
484+
const messageId = stringToUuid(Date.now().toString());
485+
486+
const content: Content = {
487+
text,
488+
attachments: [],
489+
source: "direct",
490+
inReplyTo: undefined,
491+
};
492+
493+
const userMessage = {
494+
content,
495+
userId,
496+
roomId,
497+
agentId: runtime.agentId,
498+
};
499+
500+
const memory: Memory = {
501+
id: messageId,
502+
agentId: runtime.agentId,
503+
userId,
504+
roomId,
505+
content,
506+
createdAt: Date.now(),
507+
};
508+
509+
await runtime.messageManager.createMemory(memory);
510+
511+
const state = await runtime.composeState(userMessage, {
512+
agentName: runtime.character.name,
513+
});
514+
515+
const context = composeContext({
516+
state,
517+
template: messageHandlerTemplate,
518+
});
519+
520+
const response = await generateMessageResponse({
521+
runtime: runtime,
522+
context,
523+
modelClass: ModelClass.LARGE,
524+
});
525+
526+
// save response to memory
527+
const responseMessage = {
528+
...userMessage,
529+
userId: runtime.agentId,
530+
content: response,
531+
};
532+
533+
await runtime.messageManager.createMemory(responseMessage);
534+
535+
if (!response) {
536+
res.status(500).send("No response from generateMessageResponse");
537+
return;
538+
}
539+
540+
let message = null as Content | null;
541+
542+
await runtime.evaluate(memory, state);
543+
544+
const _result = await runtime.processActions(
545+
memory,
546+
[responseMessage],
547+
state,
548+
async (newMessages) => {
549+
message = newMessages;
550+
return [memory];
551+
}
552+
);
553+
554+
// Get the text to convert to speech
555+
const textToSpeak = response.text;
556+
557+
// Convert to speech using ElevenLabs
558+
const elevenLabsApiUrl = `https://api.elevenlabs.io/v1/text-to-speech/${process.env.ELEVENLABS_VOICE_ID}`;
559+
const apiKey = process.env.ELEVENLABS_XI_API_KEY;
560+
561+
if (!apiKey) {
562+
throw new Error("ELEVENLABS_XI_API_KEY not configured");
563+
}
564+
565+
const speechResponse = await fetch(elevenLabsApiUrl, {
566+
method: "POST",
567+
headers: {
568+
"Content-Type": "application/json",
569+
"xi-api-key": apiKey,
570+
},
571+
body: JSON.stringify({
572+
text: textToSpeak,
573+
model_id: process.env.ELEVENLABS_MODEL_ID || "eleven_multilingual_v2",
574+
voice_settings: {
575+
stability: parseFloat(process.env.ELEVENLABS_VOICE_STABILITY || "0.5"),
576+
similarity_boost: parseFloat(process.env.ELEVENLABS_VOICE_SIMILARITY_BOOST || "0.9"),
577+
style: parseFloat(process.env.ELEVENLABS_VOICE_STYLE || "0.66"),
578+
use_speaker_boost: process.env.ELEVENLABS_VOICE_USE_SPEAKER_BOOST === "true",
579+
},
580+
}),
581+
});
582+
583+
if (!speechResponse.ok) {
584+
throw new Error(`ElevenLabs API error: ${speechResponse.statusText}`);
585+
}
586+
587+
const audioBuffer = await speechResponse.arrayBuffer();
588+
589+
// Set appropriate headers for audio streaming
590+
res.set({
591+
'Content-Type': 'audio/mpeg',
592+
'Transfer-Encoding': 'chunked'
593+
});
594+
595+
res.send(Buffer.from(audioBuffer));
596+
597+
} catch (error) {
598+
console.error("Error processing message or generating speech:", error);
599+
res.status(500).json({
600+
error: "Error processing message or generating speech",
601+
details: error.message
602+
});
603+
}
604+
});
448605
}
449606

450607
// agent/src/index.ts:startAgent calls this

packages/client-github/src/index.ts

+2-5
Original file line numberDiff line numberDiff line change
@@ -82,11 +82,8 @@ export class GitHubClient {
8282
`Successfully cloned repository from ${repositoryUrl}`
8383
);
8484
return;
85-
} catch (error) {
86-
elizaLogger.error(
87-
`Failed to clone repository from ${repositoryUrl}. Retrying...`,
88-
error
89-
);
85+
} catch {
86+
elizaLogger.error(`Failed to clone repository from ${repositoryUrl}. Retrying...`);
9087
retries++;
9188
if (retries === maxRetries) {
9289
throw new Error(

packages/client-twitter/src/utils.ts

+14-14
Original file line numberDiff line numberDiff line change
@@ -171,10 +171,10 @@ export async function sendTweet(
171171
twitterUsername: string,
172172
inReplyTo: string
173173
): Promise<Memory[]> {
174-
const tweetChunks = splitTweetContent(
175-
content.text,
176-
client.twitterConfig.MAX_TWEET_LENGTH
177-
);
174+
const maxTweetLength = client.twitterConfig.MAX_TWEET_LENGTH;
175+
const isLongTweet = maxTweetLength > 280;
176+
177+
const tweetChunks = splitTweetContent(content.text, maxTweetLength);
178178
const sentTweets: Tweet[] = [];
179179
let previousTweetId = inReplyTo;
180180

@@ -212,20 +212,20 @@ export async function sendTweet(
212212
})
213213
);
214214
}
215-
const result = await client.requestQueue.add(
216-
async () =>
217-
await client.twitterClient.sendTweet(
218-
chunk.trim(),
219-
previousTweetId,
220-
mediaData
221-
)
215+
const result = await client.requestQueue.add(async () =>
216+
isLongTweet
217+
? client.twitterClient.sendLongTweet(chunk.trim(), previousTweetId, mediaData)
218+
: client.twitterClient.sendTweet(chunk.trim(), previousTweetId, mediaData)
222219
);
220+
223221
const body = await result.json();
222+
const tweetResult = isLongTweet
223+
? body.data.notetweet_create.tweet_results.result
224+
: body.data.create_tweet.tweet_results.result;
224225

225226
// if we have a response
226-
if (body?.data?.create_tweet?.tweet_results?.result) {
227+
if (tweetResult) {
227228
// Parse the response
228-
const tweetResult = body.data.create_tweet.tweet_results.result;
229229
const finalTweet: Tweet = {
230230
id: tweetResult.rest_id,
231231
text: tweetResult.legacy.full_text,
@@ -245,7 +245,7 @@ export async function sendTweet(
245245
sentTweets.push(finalTweet);
246246
previousTweetId = finalTweet.id;
247247
} else {
248-
console.error("Error sending chunk", chunk, "response:", body);
248+
elizaLogger.error("Error sending tweet chunk:", { chunk, response: body });
249249
}
250250

251251
// Wait a bit between tweets to avoid rate limiting issues

packages/core/src/embedding.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ export async function embed(runtime: IAgentRuntime, input: string) {
189189
if (config.provider === EmbeddingProvider.OpenAI) {
190190
return await getRemoteEmbedding(input, {
191191
model: config.model,
192-
endpoint: "https://api.openai.com/v1",
192+
endpoint: settings.OPENAI_API_URL || "https://api.openai.com/v1",
193193
apiKey: settings.OPENAI_API_KEY,
194194
dimensions: config.dimensions,
195195
});

0 commit comments

Comments
 (0)