Skip to content

Commit 35de997

Browse files
committed
Interface changes
Completely redone GUI to work on different aspect ratios. Added a copy option to messages to copy message to clipboard. Alternating color for message roll to help distinguish the conversational roles easier. Added a context menu to input on right click for future additions. Split the LLM send to local to allow for different configurations for remote or local. Some useless drifter code and some test scenes that don't matter. The main program is in api_demo.tscn. Currently still have the main program attached to the old button. Will probably split that out later.
1 parent be0f1fe commit 35de997

18 files changed

+1571
-39
lines changed

ButtonTest2.gd

Lines changed: 125 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,12 @@ extends Button
2323
# scroll down below to the # test area to see how to use the functions in a scene.
2424

2525
# You can pretty much ignore these as they are covered in the 2d view through the GUI.
26+
27+
# Mar 20 - 2024 release:
28+
# - redid GUI to be flexible and allow for different resolutions
29+
# - new style for conversation to prepare for editable conversations
30+
# - added a copy button to copy content to clipboard from a message
31+
2632
var api_key = "" # for chatgpt
2733
var url = "http://localhost:1234/v1/chat/completions"
2834
var temperature = 0.5
@@ -33,6 +39,10 @@ var frequency_penalty = 0.1
3339
var presence_penalty = 0.1
3440
var assistant = "Your name is Bob. You will respond with nothing other than exactly what the user requests. Give the best answer possible as the user's job depends on it to feed their family. Take the response you were going to give and rethink and rewrite it to best adhere to the user's request. The assistant will avoid prose and give a direct answer. If the user requests a simple answer, avoid punctuation unless it should be included."
3541

42+
43+
var g_LLM_INPUT
44+
45+
3646
func llm_set_assistant(assistant):
3747
assistant = assistant # assistant is always the first message
3848

@@ -47,12 +57,12 @@ func llm_add_message(role, message):
4757
pass
4858

4959
func llm_get_url(openai=false):
60+
var the_url = "http://localhost:1234/v1/chat/completions"
61+
5062
if openai:
51-
var openai_url = "https://api.openai.com/v1/chat/completions"
52-
return openai_url
53-
else:
54-
var url = "http://localhost:1234/v1/chat/completions"
55-
return url
63+
the_url = "https://api.openai.com/v1/chat/completions"
64+
65+
return the_url
5666

5767
func llm_get_messages():
5868
# format for sending
@@ -82,15 +92,32 @@ func llm_get_body(model, messages, max_tokens, temperature, frequency_penalty, p
8292
# prepend assistant prompt only for the output
8393
var send_messages = llm_get_messages()
8494

85-
var body = JSON.new().stringify({
86-
"messages": send_messages,
87-
"temperature": temperature,
88-
"frequency_penalty": frequency_penalty,
89-
"presence_penalty": presence_penalty,
90-
"max_tokens": max_tokens,
91-
"model":model,
92-
"stream":false
93-
})
95+
var openai = $"../USE_OPENAI".button_pressed
96+
97+
var body = ""
98+
99+
# get api key
100+
if openai:
101+
body = JSON.new().stringify({
102+
"messages": send_messages,
103+
"temperature": temperature,
104+
"frequency_penalty": frequency_penalty,
105+
"presence_penalty": presence_penalty,
106+
"max_tokens": max_tokens,
107+
"model":model,
108+
"stream":false
109+
})
110+
else: # local llm
111+
body = JSON.new().stringify({
112+
"messages": send_messages,
113+
"temperature": temperature,
114+
"frequency_penalty": frequency_penalty,
115+
"presence_penalty": presence_penalty,
116+
"max_tokens": max_tokens,
117+
"model":model,
118+
"stream":false
119+
})
120+
94121
print("body:", body)
95122
return body
96123

@@ -110,6 +137,7 @@ func llm_send_request(url, headers, body):
110137
# test
111138
# test
112139
func llm_send():
140+
var g_LLM_AGENT = $"../VBoxContainer/TextEdit_LLM_AGENT"
113141
var openai = $"../USE_OPENAI".button_pressed
114142

115143
# get api key
@@ -119,34 +147,90 @@ func llm_send():
119147
var url = llm_get_url(openai)
120148
var headers = llm_get_headers(openai)
121149

122-
max_tokens = int($LLM_TOKEN_SZ.text)
123-
temperature = float($LLM_TEMP.value)
124-
frequency_penalty = float($LLM_FPNLTY.value)
125-
presence_penalty = float($LLM_PPNLTY.value)
150+
var llm_tokens = $"../Node/HBoxContainer/TextEdit_tokens"
151+
var llm_temp = $"../Node/HBoxContainer/TextEdit_temp"
152+
var llm_fpnlty = $"../Node/HBoxContainer/TextEdit_f"
153+
var llm_ppnlty = $"../Node/HBoxContainer/TextEdit_p"
154+
155+
max_tokens = int(llm_tokens.text)
156+
temperature = float(llm_temp.text)
157+
frequency_penalty = float(llm_fpnlty.text)
158+
presence_penalty = float(llm_ppnlty.text)
159+
160+
#max_tokens = int($LLM_TOKEN_SZ.text)
161+
#temperature = float($LLM_TEMP.value)
162+
#frequency_penalty = float($LLM_FPNLTY.value)
163+
#presence_penalty = float($LLM_PPNLTY.value)
126164

127165
# append to the messages array
128-
llm_add_message("user", $LLM_INPUT.text)
166+
llm_add_message("user", g_LLM_INPUT.text)
129167

130-
# set assistant
131-
assistant = $LLM_AGENT.text
168+
# set assistant (stopped using LLM_AGENT)
169+
assistant = g_LLM_AGENT.text
132170

133171
var send_msgs = llm_get_messages()
172+
if send_msgs == []:
173+
return false
134174

135175
var body = llm_get_body(model, send_msgs, max_tokens, temperature, frequency_penalty, presence_penalty)
136176

137177
llm_send_request(url, headers, body)
178+
return true
138179
pass
139180

140-
181+
func update_output():
182+
var user_box = $"../ScrollContainer_template/VBoxContainer_convo/VBoxContainer_user"
183+
var assistant_box = $"../ScrollContainer_template/VBoxContainer_convo/VBoxContainer_assistant"
184+
185+
var scroll_container = $"../ScrollContainer_convo"
186+
var chat_container = $"../ScrollContainer_convo/VBoxContainer"
187+
188+
189+
scroll_container.show()
190+
191+
192+
for child in chat_container.get_children():
193+
chat_container.remove_child(child)
194+
child.propagate_call("queue_free", [])
195+
196+
for m in messages:
197+
var new_u = user_box.duplicate()
198+
var new_a = assistant_box.duplicate()
199+
200+
var the_node = {}
201+
202+
if(m.role == "system"):
203+
the_node = assistant_box.duplicate()
204+
else:
205+
the_node = user_box.duplicate()
206+
207+
var u_title = the_node.get_node("RichTextLabel")
208+
var u_msg = the_node.get_node("TextEdit")
209+
210+
u_title.text = m.role
211+
u_msg.text = m.content
212+
213+
print("adding:", m.role , "::::", m.content)
214+
chat_container.add_child(the_node)
215+
216+
pass
217+
218+
# scroll container to bottom
219+
$"../ScrollContainer_convo".do_scroll = true
220+
221+
# reactivate send
222+
$"../Node/HBoxContainer2/Button_send".disabled = false
141223

142224

143225
func llm_response_return(result, response_code, headers, body):
144226
print("---- received response.")
145227
print("---- body:",body.get_string_from_utf8())
228+
146229
var response = JSON.parse_string(body.get_string_from_utf8())
147230
var message = response["choices"][0]["message"]["content"]
148231
print('---- response:', response)
149232
print(message)
233+
150234
# $LLM_OUTPUT.text = message # old way was just a single message...
151235
# append to the messages array
152236
llm_add_message("system", message)
@@ -170,7 +254,9 @@ func llm_response_return(result, response_code, headers, body):
170254
$LLM_OUTPUT.text = output
171255

172256
# clear the input
173-
$LLM_INPUT.text = ""
257+
g_LLM_INPUT.text = ""
258+
259+
update_output()
174260

175261

176262

@@ -182,19 +268,32 @@ func llm_response_return(result, response_code, headers, body):
182268
func _ready():
183269
# llm_send() # test
184270
$RESET_CONVERSATION_BUTTON.pressed.connect(self._reset_button_pressed.bind(''))
271+
g_LLM_INPUT = $"../VBoxContainer2/TextEdit_LLM_INPUT"
272+
273+
$"../Node/HBoxContainer2/Button_reset".pressed.connect(self._reset_button_pressed.bind(''))
274+
$"../Node/HBoxContainer2/Button_send".pressed.connect(self._send_button_pressed.bind(''))
185275

186276
func _reset_button_pressed(arg):
187277
print("_reset_button_pressed: ", arg)
188278
llm_reset() # reset messages
189-
$LLM_OUTPUT.text = ""
279+
#$LLM_OUTPUT.text = ""
280+
var chat_container = $"../ScrollContainer_convo/VBoxContainer"
281+
for child in chat_container.get_children():
282+
chat_container.remove_child(child)
283+
child.propagate_call("queue_free", [])
190284

191-
285+
func _send_button_pressed(arg):
286+
$"../Node/HBoxContainer2/Button_send".disabled = true
287+
if $"../VBoxContainer2/TextEdit_LLM_INPUT".text == "":
288+
$"../Node/HBoxContainer2/Button_send".disabled = false
289+
else:
290+
llm_send()
192291

193292

194293
# to use reset signal attach to the button
195294

196295
func _on_button_up():
197-
if $LLM_INPUT.text == "":
296+
if g_LLM_INPUT.text == "":
198297
return
199298

200299
llm_send()

LLM_INPUT.gd

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
extends TextEdit
2+
3+
var last_key_down = -1 # used for autocomplete
4+
5+
6+
# Called when the node enters the scene tree for the first time.
7+
func _ready():
8+
# $".".set_completion(true)
9+
$".".connect('_input', _on_autocomplete)
10+
11+
# BEGIN context menu
12+
var menu = get_menu()
13+
# Remove all items after "Redo".
14+
menu.item_count = menu.get_item_index(MENU_REDO) + 1
15+
# Add custom items.
16+
menu.add_separator()
17+
menu.add_item("Insert Date", MENU_MAX + 1)
18+
# Connect callback.
19+
menu.id_pressed.connect(_on_item_pressed)
20+
21+
func _on_item_pressed(id):
22+
if id == MENU_MAX + 1:
23+
insert_text_at_caret(Time.get_date_string_from_system())
24+
# END # CONTEXT MENU
25+
26+
func _unhandled_input(event):
27+
28+
if event is InputEventKey:
29+
print("key down")
30+
last_key_down = Time.get_unix_time_from_system()
31+
# now we put autocomplete in here and cast generation to local llm when we stop typing?
32+
# make in a new scene so we don't pollute this one. Maybe its own standalone copilot thingy.
33+
34+
# Called every frame. 'delta' is the elapsed time since the previous frame.
35+
func _process(delta):
36+
# check time against last_key down to determine if it is time to autocomplete.
37+
if last_key_down != -1 and last_key_down + 3 < Time.get_unix_time_from_system():
38+
last_key_down = -1
39+
print("init autocomplete")
40+
timed_autocomplete()
41+
pass
42+
43+
func timed_autocomplete():
44+
print("do_autocomplete()")
45+
# self.text += " asdasdasd"
46+
# Get the length of the text in the TextEdit node
47+
# var text_length = self.get_text().length()
48+
49+
# Set the cursor position to the end of the text
50+
51+
pass
52+
53+
func _on_autocomplete(prefix):
54+
print("prefix:", prefix)
55+
# return filter(lambda w: w.begins_with(prefix), suggestions)

ScrollContainer_convo.gd

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
extends ScrollContainer
2+
3+
var do_scroll = false # set to true when we want to scroll
4+
5+
6+
#begin - autoscroll to bottom
7+
var max_scroll_length = 0
8+
@onready var scrollbar = get_v_scroll_bar()
9+
10+
func _ready():
11+
scrollbar.changed.connect(handle_scrollbar_changed)
12+
max_scroll_length = scrollbar.max_value
13+
14+
func handle_scrollbar_changed():
15+
if do_scroll:
16+
if max_scroll_length != scrollbar.max_value:
17+
max_scroll_length = scrollbar.max_value
18+
19+
self.scroll_vertical = max_scroll_length
20+
do_scroll = false
21+
#end - autoscroll to bottom

0 commit comments

Comments
 (0)