@@ -23,6 +23,12 @@ extends Button
23
23
# scroll down below to the # test area to see how to use the functions in a scene.
24
24
25
25
# You can pretty much ignore these as they are covered in the 2d view through the GUI.
26
+
27
+ # Mar 20 - 2024 release:
28
+ # - redid GUI to be flexible and allow for different resolutions
29
+ # - new style for conversation to prepare for editable conversations
30
+ # - added a copy button to copy content to clipboard from a message
31
+
26
32
var api_key = "" # for chatgpt
27
33
var url = "http://localhost:1234/v1/chat/completions"
28
34
var temperature = 0.5
@@ -33,6 +39,10 @@ var frequency_penalty = 0.1
33
39
var presence_penalty = 0.1
34
40
var assistant = "Your name is Bob. You will respond with nothing other than exactly what the user requests. Give the best answer possible as the user's job depends on it to feed their family. Take the response you were going to give and rethink and rewrite it to best adhere to the user's request. The assistant will avoid prose and give a direct answer. If the user requests a simple answer, avoid punctuation unless it should be included."
35
41
42
+
43
+ var g_LLM_INPUT
44
+
45
+
36
46
func llm_set_assistant (assistant ):
37
47
assistant = assistant # assistant is always the first message
38
48
@@ -47,12 +57,12 @@ func llm_add_message(role, message):
47
57
pass
48
58
49
59
func llm_get_url (openai = false ):
60
+ var the_url = "http://localhost:1234/v1/chat/completions"
61
+
50
62
if openai :
51
- var openai_url = "https://api.openai.com/v1/chat/completions"
52
- return openai_url
53
- else :
54
- var url = "http://localhost:1234/v1/chat/completions"
55
- return url
63
+ the_url = "https://api.openai.com/v1/chat/completions"
64
+
65
+ return the_url
56
66
57
67
func llm_get_messages ():
58
68
# format for sending
@@ -82,15 +92,32 @@ func llm_get_body(model, messages, max_tokens, temperature, frequency_penalty, p
82
92
# prepend assistant prompt only for the output
83
93
var send_messages = llm_get_messages ()
84
94
85
- var body = JSON .new ().stringify ({
86
- "messages" : send_messages ,
87
- "temperature" : temperature ,
88
- "frequency_penalty" : frequency_penalty ,
89
- "presence_penalty" : presence_penalty ,
90
- "max_tokens" : max_tokens ,
91
- "model" :model ,
92
- "stream" :false
93
- })
95
+ var openai = $ "../USE_OPENAI" .button_pressed
96
+
97
+ var body = ""
98
+
99
+ # get api key
100
+ if openai :
101
+ body = JSON .new ().stringify ({
102
+ "messages" : send_messages ,
103
+ "temperature" : temperature ,
104
+ "frequency_penalty" : frequency_penalty ,
105
+ "presence_penalty" : presence_penalty ,
106
+ "max_tokens" : max_tokens ,
107
+ "model" :model ,
108
+ "stream" :false
109
+ })
110
+ else : # local llm
111
+ body = JSON .new ().stringify ({
112
+ "messages" : send_messages ,
113
+ "temperature" : temperature ,
114
+ "frequency_penalty" : frequency_penalty ,
115
+ "presence_penalty" : presence_penalty ,
116
+ "max_tokens" : max_tokens ,
117
+ "model" :model ,
118
+ "stream" :false
119
+ })
120
+
94
121
print ("body:" , body )
95
122
return body
96
123
@@ -110,6 +137,7 @@ func llm_send_request(url, headers, body):
110
137
# test
111
138
# test
112
139
func llm_send ():
140
+ var g_LLM_AGENT = $ "../VBoxContainer/TextEdit_LLM_AGENT"
113
141
var openai = $ "../USE_OPENAI" .button_pressed
114
142
115
143
# get api key
@@ -119,34 +147,90 @@ func llm_send():
119
147
var url = llm_get_url (openai )
120
148
var headers = llm_get_headers (openai )
121
149
122
- max_tokens = int ($ LLM_TOKEN_SZ .text )
123
- temperature = float ($ LLM_TEMP .value )
124
- frequency_penalty = float ($ LLM_FPNLTY .value )
125
- presence_penalty = float ($ LLM_PPNLTY .value )
150
+ var llm_tokens = $ "../Node/HBoxContainer/TextEdit_tokens"
151
+ var llm_temp = $ "../Node/HBoxContainer/TextEdit_temp"
152
+ var llm_fpnlty = $ "../Node/HBoxContainer/TextEdit_f"
153
+ var llm_ppnlty = $ "../Node/HBoxContainer/TextEdit_p"
154
+
155
+ max_tokens = int (llm_tokens .text )
156
+ temperature = float (llm_temp .text )
157
+ frequency_penalty = float (llm_fpnlty .text )
158
+ presence_penalty = float (llm_ppnlty .text )
159
+
160
+ # max_tokens = int($LLM_TOKEN_SZ.text)
161
+ # temperature = float($LLM_TEMP.value)
162
+ # frequency_penalty = float($LLM_FPNLTY.value)
163
+ # presence_penalty = float($LLM_PPNLTY.value)
126
164
127
165
# append to the messages array
128
- llm_add_message ("user" , $ LLM_INPUT .text )
166
+ llm_add_message ("user" , g_LLM_INPUT .text )
129
167
130
- # set assistant
131
- assistant = $ LLM_AGENT .text
168
+ # set assistant (stopped using LLM_AGENT)
169
+ assistant = g_LLM_AGENT .text
132
170
133
171
var send_msgs = llm_get_messages ()
172
+ if send_msgs == []:
173
+ return false
134
174
135
175
var body = llm_get_body (model , send_msgs , max_tokens , temperature , frequency_penalty , presence_penalty )
136
176
137
177
llm_send_request (url , headers , body )
178
+ return true
138
179
pass
139
180
140
-
181
+ func update_output ():
182
+ var user_box = $ "../ScrollContainer_template/VBoxContainer_convo/VBoxContainer_user"
183
+ var assistant_box = $ "../ScrollContainer_template/VBoxContainer_convo/VBoxContainer_assistant"
184
+
185
+ var scroll_container = $ "../ScrollContainer_convo"
186
+ var chat_container = $ "../ScrollContainer_convo/VBoxContainer"
187
+
188
+
189
+ scroll_container .show ()
190
+
191
+
192
+ for child in chat_container .get_children ():
193
+ chat_container .remove_child (child )
194
+ child .propagate_call ("queue_free" , [])
195
+
196
+ for m in messages :
197
+ var new_u = user_box .duplicate ()
198
+ var new_a = assistant_box .duplicate ()
199
+
200
+ var the_node = {}
201
+
202
+ if (m .role == "system" ):
203
+ the_node = assistant_box .duplicate ()
204
+ else :
205
+ the_node = user_box .duplicate ()
206
+
207
+ var u_title = the_node .get_node ("RichTextLabel" )
208
+ var u_msg = the_node .get_node ("TextEdit" )
209
+
210
+ u_title .text = m .role
211
+ u_msg .text = m .content
212
+
213
+ print ("adding:" , m .role , "::::" , m .content )
214
+ chat_container .add_child (the_node )
215
+
216
+ pass
217
+
218
+ # scroll container to bottom
219
+ $ "../ScrollContainer_convo" .do_scroll = true
220
+
221
+ # reactivate send
222
+ $ "../Node/HBoxContainer2/Button_send" .disabled = false
141
223
142
224
143
225
func llm_response_return (result , response_code , headers , body ):
144
226
print ("---- received response." )
145
227
print ("---- body:" ,body .get_string_from_utf8 ())
228
+
146
229
var response = JSON .parse_string (body .get_string_from_utf8 ())
147
230
var message = response ["choices" ][0 ]["message" ]["content" ]
148
231
print ('---- response:' , response )
149
232
print (message )
233
+
150
234
# $LLM_OUTPUT.text = message # old way was just a single message...
151
235
# append to the messages array
152
236
llm_add_message ("system" , message )
@@ -170,7 +254,9 @@ func llm_response_return(result, response_code, headers, body):
170
254
$ LLM_OUTPUT .text = output
171
255
172
256
# clear the input
173
- $ LLM_INPUT .text = ""
257
+ g_LLM_INPUT .text = ""
258
+
259
+ update_output ()
174
260
175
261
176
262
@@ -182,19 +268,32 @@ func llm_response_return(result, response_code, headers, body):
182
268
func _ready ():
183
269
# llm_send() # test
184
270
$ RESET_CONVERSATION_BUTTON .pressed .connect (self ._reset_button_pressed .bind ('' ))
271
+ g_LLM_INPUT = $ "../VBoxContainer2/TextEdit_LLM_INPUT"
272
+
273
+ $ "../Node/HBoxContainer2/Button_reset" .pressed .connect (self ._reset_button_pressed .bind ('' ))
274
+ $ "../Node/HBoxContainer2/Button_send" .pressed .connect (self ._send_button_pressed .bind ('' ))
185
275
186
276
func _reset_button_pressed (arg ):
187
277
print ("_reset_button_pressed: " , arg )
188
278
llm_reset () # reset messages
189
- $ LLM_OUTPUT .text = ""
279
+ # $LLM_OUTPUT.text = ""
280
+ var chat_container = $ "../ScrollContainer_convo/VBoxContainer"
281
+ for child in chat_container .get_children ():
282
+ chat_container .remove_child (child )
283
+ child .propagate_call ("queue_free" , [])
190
284
191
-
285
+ func _send_button_pressed (arg ):
286
+ $ "../Node/HBoxContainer2/Button_send" .disabled = true
287
+ if $ "../VBoxContainer2/TextEdit_LLM_INPUT" .text == "" :
288
+ $ "../Node/HBoxContainer2/Button_send" .disabled = false
289
+ else :
290
+ llm_send ()
192
291
193
292
194
293
# to use reset signal attach to the button
195
294
196
295
func _on_button_up ():
197
- if $ LLM_INPUT .text == "" :
296
+ if g_LLM_INPUT .text == "" :
198
297
return
199
298
200
299
llm_send ()
0 commit comments