@@ -14,11 +14,41 @@ void LlamaContext::_bind_methods() {
14
14
ClassDB::bind_method (D_METHOD (" set_model" , " model" ), &LlamaContext::set_model);
15
15
ClassDB::bind_method (D_METHOD (" get_model" ), &LlamaContext::get_model);
16
16
ClassDB::add_property (" LlamaContext" , PropertyInfo (Variant::OBJECT, " model" , PROPERTY_HINT_RESOURCE_TYPE, " LlamaModel" ), " set_model" , " get_model" );
17
+
18
+ ClassDB::bind_method (D_METHOD (" get_seed" ), &LlamaContext::get_seed);
19
+ ClassDB::bind_method (D_METHOD (" set_seed" , " seed" ), &LlamaContext::set_seed);
20
+ ClassDB::add_property (" LlamaContext" , PropertyInfo (Variant::INT, " seed" ), " set_seed" , " get_seed" );
21
+
22
+ ClassDB::bind_method (D_METHOD (" get_n_ctx" ), &LlamaContext::get_n_ctx);
23
+ ClassDB::bind_method (D_METHOD (" set_n_ctx" , " n_ctx" ), &LlamaContext::set_n_ctx);
24
+ ClassDB::add_property (" LlamaContext" , PropertyInfo (Variant::INT, " n_ctx" ), " set_n_ctx" , " get_n_ctx" );
25
+
26
+ ClassDB::bind_method (D_METHOD (" get_n_threads" ), &LlamaContext::get_n_threads);
27
+ ClassDB::bind_method (D_METHOD (" set_n_threads" , " n_threads" ), &LlamaContext::set_n_threads);
28
+ ClassDB::add_property (" LlamaContext" , PropertyInfo (Variant::INT, " n_threads" ), " set_n_threads" , " get_n_threads" );
29
+
30
+ ClassDB::bind_method (D_METHOD (" get_n_threads_batch" ), &LlamaContext::get_n_threads_batch);
31
+ ClassDB::bind_method (D_METHOD (" set_n_threads_batch" , " n_threads_batch" ), &LlamaContext::set_n_threads_batch);
32
+ ClassDB::add_property (" LlamaContext" , PropertyInfo (Variant::INT, " n_threads_batch" ), " set_n_threads_batch" , " get_n_threads_batch" );
33
+
17
34
ClassDB::bind_method (D_METHOD (" request_completion" , " prompt" ), &LlamaContext::request_completion);
18
35
ClassDB::bind_method (D_METHOD (" _fulfill_completion" , " prompt" ), &LlamaContext::_fulfill_completion);
36
+
19
37
ADD_SIGNAL (MethodInfo (" completion_generated" , PropertyInfo (Variant::STRING, " completion" ), PropertyInfo (Variant::BOOL, " is_final" )));
20
38
}
21
39
40
+ LlamaContext::LlamaContext () {
41
+ batch = llama_batch_init (4096 , 0 , 1 );
42
+
43
+ ctx_params = llama_context_default_params ();
44
+ ctx_params.seed = -1 ;
45
+ ctx_params.n_ctx = 4096 ;
46
+
47
+ int32_t n_threads = OS::get_singleton ()->get_processor_count ();
48
+ ctx_params.n_threads = n_threads;
49
+ ctx_params.n_threads_batch = n_threads;
50
+ }
51
+
22
52
void LlamaContext::_ready () {
23
53
// TODO: remove this and use runtime classes once godot 4.3 lands, see https://github.com/godotengine/godot/pull/82554
24
54
if (Engine::get_singleton ()->is_editor_hint ()) {
@@ -30,12 +60,6 @@ void LlamaContext::_ready() {
30
60
return ;
31
61
}
32
62
33
- ctx_params.seed = -1 ;
34
- ctx_params.n_ctx = 4096 ;
35
- int32_t n_threads = OS::get_singleton ()->get_processor_count ();
36
- ctx_params.n_threads = n_threads;
37
- ctx_params.n_threads_batch = n_threads;
38
-
39
63
ctx = llama_new_context_with_model (model->model , ctx_params);
40
64
if (ctx == NULL ) {
41
65
UtilityFunctions::printerr (vformat (" %s: Failed to initialize llama context, null ctx" , __func__));
@@ -44,6 +68,14 @@ void LlamaContext::_ready() {
44
68
UtilityFunctions::print (vformat (" %s: Context initialized" , __func__));
45
69
}
46
70
71
+ PackedStringArray LlamaContext::_get_configuration_warnings () const {
72
+ PackedStringArray warnings;
73
+ if (model == NULL ) {
74
+ warnings.push_back (" Model resource property not defined" );
75
+ }
76
+ return warnings;
77
+ }
78
+
47
79
Variant LlamaContext::request_completion (const String &prompt) {
48
80
UtilityFunctions::print (vformat (" %s: Requesting completion for prompt: %s" , __func__, prompt));
49
81
if (task_id) {
@@ -134,11 +166,38 @@ void LlamaContext::_fulfill_completion(const String &prompt) {
134
166
void LlamaContext::set_model (const Ref<LlamaModel> p_model) {
135
167
model = p_model;
136
168
}
137
-
138
169
Ref<LlamaModel> LlamaContext::get_model () {
139
170
return model;
140
171
}
141
172
173
+ int LlamaContext::get_seed () {
174
+ return ctx_params.seed ;
175
+ }
176
+ void LlamaContext::set_seed (int seed) {
177
+ ctx_params.seed = seed;
178
+ }
179
+
180
+ int LlamaContext::get_n_ctx () {
181
+ return ctx_params.n_ctx ;
182
+ }
183
+ void LlamaContext::set_n_ctx (int n_ctx) {
184
+ ctx_params.n_ctx = n_ctx;
185
+ }
186
+
187
+ int LlamaContext::get_n_threads () {
188
+ return ctx_params.n_threads ;
189
+ }
190
+ void LlamaContext::set_n_threads (int n_threads) {
191
+ ctx_params.n_threads = n_threads;
192
+ }
193
+
194
+ int LlamaContext::get_n_threads_batch () {
195
+ return ctx_params.n_threads_batch ;
196
+ }
197
+ void LlamaContext::set_n_threads_batch (int n_threads_batch) {
198
+ ctx_params.n_threads_batch = n_threads_batch;
199
+ }
200
+
142
201
LlamaContext::~LlamaContext () {
143
202
if (ctx) {
144
203
llama_free (ctx);
0 commit comments