File tree 1 file changed +6
-6
lines changed
1 file changed +6
-6
lines changed Original file line number Diff line number Diff line change @@ -59,7 +59,7 @@ const models: Models = {
59
59
maxOutputTokens : 8192 ,
60
60
frequency_penalty : 0.0 ,
61
61
presence_penalty : 0.0 ,
62
- temperature : 0.3 ,
62
+ temperature : 0.7 ,
63
63
} ,
64
64
endpoint : "https://api.x.ai/v1" ,
65
65
model : {
@@ -77,7 +77,7 @@ const models: Models = {
77
77
maxOutputTokens : 8000 ,
78
78
frequency_penalty : 0.0 ,
79
79
presence_penalty : 0.0 ,
80
- temperature : 0.3 ,
80
+ temperature : 0.7 ,
81
81
} ,
82
82
model : {
83
83
[ ModelClass . SMALL ] : "llama-3.1-8b-instant" ,
@@ -92,7 +92,7 @@ const models: Models = {
92
92
maxInputTokens : 128000 ,
93
93
maxOutputTokens : 8192 ,
94
94
repetition_penalty : 0.0 ,
95
- temperature : 0.3 ,
95
+ temperature : 0.7 ,
96
96
} ,
97
97
imageSettings : {
98
98
steps : 4 ,
@@ -113,7 +113,7 @@ const models: Models = {
113
113
maxInputTokens : 32768 ,
114
114
maxOutputTokens : 8192 ,
115
115
repetition_penalty : 0.0 ,
116
- temperature : 0.3 ,
116
+ temperature : 0.7 ,
117
117
} ,
118
118
model : {
119
119
[ ModelClass . SMALL ] :
@@ -134,7 +134,7 @@ const models: Models = {
134
134
maxOutputTokens : 8192 ,
135
135
frequency_penalty : 0.0 ,
136
136
presence_penalty : 0.0 ,
137
- temperature : 0.3 ,
137
+ temperature : 0.7 ,
138
138
} ,
139
139
model : {
140
140
[ ModelClass . SMALL ] : "gemini-1.5-flash-latest" ,
@@ -197,7 +197,7 @@ const models: Models = {
197
197
maxOutputTokens : 8192 ,
198
198
frequency_penalty : 0.0 ,
199
199
presence_penalty : 0.0 ,
200
- temperature : 0.3 ,
200
+ temperature : 0.7 ,
201
201
} ,
202
202
endpoint : settings . OLLAMA_SERVER_URL || "http://localhost:11434" ,
203
203
model : {
You can’t perform that action at this time.
0 commit comments