Skip to content

Commit 0894671

Browse files
authored
Merge pull request #310 from alanneary17/llama-temps
Increased llama and llama based model temperatures
2 parents e235713 + 6152402 commit 0894671

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

packages/core/src/models.ts

+6-6
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ const models: Models = {
5959
maxOutputTokens: 8192,
6060
frequency_penalty: 0.0,
6161
presence_penalty: 0.0,
62-
temperature: 0.3,
62+
temperature: 0.7,
6363
},
6464
endpoint: "https://api.x.ai/v1",
6565
model: {
@@ -77,7 +77,7 @@ const models: Models = {
7777
maxOutputTokens: 8000,
7878
frequency_penalty: 0.0,
7979
presence_penalty: 0.0,
80-
temperature: 0.3,
80+
temperature: 0.7,
8181
},
8282
model: {
8383
[ModelClass.SMALL]: "llama-3.1-8b-instant",
@@ -92,7 +92,7 @@ const models: Models = {
9292
maxInputTokens: 128000,
9393
maxOutputTokens: 8192,
9494
repetition_penalty: 0.0,
95-
temperature: 0.3,
95+
temperature: 0.7,
9696
},
9797
imageSettings: {
9898
steps: 4,
@@ -113,7 +113,7 @@ const models: Models = {
113113
maxInputTokens: 32768,
114114
maxOutputTokens: 8192,
115115
repetition_penalty: 0.0,
116-
temperature: 0.3,
116+
temperature: 0.7,
117117
},
118118
model: {
119119
[ModelClass.SMALL]:
@@ -134,7 +134,7 @@ const models: Models = {
134134
maxOutputTokens: 8192,
135135
frequency_penalty: 0.0,
136136
presence_penalty: 0.0,
137-
temperature: 0.3,
137+
temperature: 0.7,
138138
},
139139
model: {
140140
[ModelClass.SMALL]: "gemini-1.5-flash-latest",
@@ -197,7 +197,7 @@ const models: Models = {
197197
maxOutputTokens: 8192,
198198
frequency_penalty: 0.0,
199199
presence_penalty: 0.0,
200-
temperature: 0.3,
200+
temperature: 0.7,
201201
},
202202
endpoint: settings.OLLAMA_SERVER_URL || "http://localhost:11434",
203203
model: {

0 commit comments

Comments
 (0)