Skip to content

Commit 6a0cb86

Browse files
committed
add system prompt to vercel completions
1 parent d6e84eb commit 6a0cb86

File tree

1 file changed

+6
-0
lines changed

1 file changed

+6
-0
lines changed

core/src/core/generation.ts

+6
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import { generateText as aiGenerateText } from "ai";
1515

1616
import { createAnthropic } from "@ai-sdk/anthropic";
1717
import { prettyConsole } from "../index.ts";
18+
import settings from "./settings.ts";
1819

1920
/**
2021
* Send a message to the model for a text generateText - receive a string back and parse how you'd like
@@ -79,6 +80,7 @@ export async function generateText({
7980
const { text: openaiResponse } = await aiGenerateText({
8081
model: openai.languageModel(model),
8182
prompt: context,
83+
system: runtime.character.system ?? settings.SYSTEM_PROMPT ?? undefined,
8284
temperature: temperature,
8385
maxTokens: max_response_length,
8486
frequencyPenalty: frequency_penalty,
@@ -100,6 +102,7 @@ export async function generateText({
100102
const { text: anthropicResponse } = await aiGenerateText({
101103
model: anthropic.languageModel(model),
102104
prompt: context,
105+
system: runtime.character.system ?? settings.SYSTEM_PROMPT ?? undefined,
103106
temperature: temperature,
104107
maxTokens: max_response_length,
105108
frequencyPenalty: frequency_penalty,
@@ -121,6 +124,7 @@ export async function generateText({
121124
parallelToolCalls: false,
122125
}),
123126
prompt: context,
127+
system: runtime.character.system ?? settings.SYSTEM_PROMPT ?? undefined,
124128
temperature: temperature,
125129
maxTokens: max_response_length,
126130
frequencyPenalty: frequency_penalty,
@@ -140,6 +144,7 @@ export async function generateText({
140144
model: groq.languageModel(model),
141145
prompt: context,
142146
temperature: temperature,
147+
system: runtime.character.system ?? settings.SYSTEM_PROMPT ?? undefined,
143148
maxTokens: max_response_length,
144149
frequencyPenalty: frequency_penalty,
145150
presencePenalty: presence_penalty,
@@ -178,6 +183,7 @@ export async function generateText({
178183
model: openai.languageModel(model),
179184
prompt: context,
180185
temperature: temperature,
186+
system: runtime.character.system ?? settings.SYSTEM_PROMPT ?? undefined,
181187
maxTokens: max_response_length,
182188
frequencyPenalty: frequency_penalty,
183189
presencePenalty: presence_penalty,

0 commit comments

Comments
 (0)