Skip to content

Commit 02ec625

Browse files
authored
Removed usages of the old generate() API (#117)
## Change Removed usages of the old generate() API ## General checklist - [X] There are no breaking changes - [ ] I have added unit and/or integration tests for my change - [ ] The tests cover both positive and negative cases - [X] I have manually run all the unit and integration tests in the module I have added/changed, and they are all green - [ ] I have added/updated the [documentation](https://github.com/langchain4j/langchain4j/tree/main/docs/docs) - [ ] I have added an example in the [examples repo](https://github.com/langchain4j/langchain4j-examples) (only for "big" features)
1 parent 3019110 commit 02ec625

File tree

8 files changed

+101
-83
lines changed

8 files changed

+101
-83
lines changed

langchain4j-anthropic-spring-boot-starter/src/test/java/dev/langchain4j/anthropic/spring/AutoConfigIT.java

+10-11
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,11 @@
11
package dev.langchain4j.anthropic.spring;
22

3-
import dev.langchain4j.data.message.AiMessage;
4-
import dev.langchain4j.model.StreamingResponseHandler;
53
import dev.langchain4j.model.anthropic.AnthropicChatModel;
64
import dev.langchain4j.model.anthropic.AnthropicStreamingChatModel;
75
import dev.langchain4j.model.chat.ChatLanguageModel;
86
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
9-
import dev.langchain4j.model.output.Response;
7+
import dev.langchain4j.model.chat.response.ChatResponse;
8+
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
109
import org.junit.jupiter.api.AfterEach;
1110
import org.junit.jupiter.api.Test;
1211
import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable;
@@ -42,7 +41,7 @@ void should_provide_chat_model() {
4241

4342
ChatLanguageModel chatLanguageModel = context.getBean(ChatLanguageModel.class);
4443
assertThat(chatLanguageModel).isInstanceOf(AnthropicChatModel.class);
45-
assertThat(chatLanguageModel.generate("What is the capital of Germany?")).contains("Berlin");
44+
assertThat(chatLanguageModel.chat("What is the capital of Germany?")).contains("Berlin");
4645

4746
assertThat(context.getBean(AnthropicChatModel.class)).isSameAs(chatLanguageModel);
4847
});
@@ -59,24 +58,24 @@ void should_provide_streaming_chat_model() {
5958

6059
StreamingChatLanguageModel streamingChatLanguageModel = context.getBean(StreamingChatLanguageModel.class);
6160
assertThat(streamingChatLanguageModel).isInstanceOf(AnthropicStreamingChatModel.class);
62-
CompletableFuture<Response<AiMessage>> future = new CompletableFuture<>();
63-
streamingChatLanguageModel.generate("What is the capital of Germany?", new StreamingResponseHandler<AiMessage>() {
61+
CompletableFuture<ChatResponse> future = new CompletableFuture<>();
62+
streamingChatLanguageModel.chat("What is the capital of Germany?", new StreamingChatResponseHandler() {
6463

6564
@Override
66-
public void onNext(String token) {
65+
public void onPartialResponse(String partialResponse) {
6766
}
6867

6968
@Override
70-
public void onComplete(Response<AiMessage> response) {
71-
future.complete(response);
69+
public void onCompleteResponse(ChatResponse completeResponse) {
70+
future.complete(completeResponse);
7271
}
7372

7473
@Override
7574
public void onError(Throwable error) {
7675
}
7776
});
78-
Response<AiMessage> response = future.get(60, SECONDS);
79-
assertThat(response.content().text()).contains("Berlin");
77+
ChatResponse response = future.get(60, SECONDS);
78+
assertThat(response.aiMessage().text()).contains("Berlin");
8079

8180
assertThat(context.getBean(AnthropicStreamingChatModel.class)).isSameAs(streamingChatLanguageModel);
8281
});

langchain4j-azure-open-ai-spring-boot-starter/src/test/java/dev/langchain4j/azure/openai/spring/AutoConfigIT.java

+19-19
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package dev.langchain4j.azure.openai.spring;
22

33
import dev.langchain4j.data.message.AiMessage;
4-
import dev.langchain4j.model.StreamingResponseHandler;
54
import dev.langchain4j.model.azure.AzureOpenAiChatModel;
65
import dev.langchain4j.model.azure.AzureOpenAiEmbeddingModel;
76
import dev.langchain4j.model.azure.AzureOpenAiImageModel;
@@ -15,9 +14,10 @@
1514
import dev.langchain4j.model.chat.request.json.JsonObjectSchema;
1615
import dev.langchain4j.model.chat.request.json.JsonSchema;
1716
import dev.langchain4j.model.chat.request.json.JsonStringSchema;
17+
import dev.langchain4j.model.chat.response.ChatResponse;
18+
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
1819
import dev.langchain4j.model.embedding.EmbeddingModel;
1920
import dev.langchain4j.model.image.ImageModel;
20-
import dev.langchain4j.model.output.Response;
2121
import org.junit.jupiter.api.Test;
2222
import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable;
2323
import org.junit.jupiter.params.ParameterizedTest;
@@ -67,7 +67,7 @@ void should_provide_chat_model(String deploymentName) {
6767

6868
ChatLanguageModel chatLanguageModel = context.getBean(ChatLanguageModel.class);
6969
assertThat(chatLanguageModel).isInstanceOf(AzureOpenAiChatModel.class);
70-
assertThat(chatLanguageModel.generate("What is the capital of Germany?")).contains("Berlin");
70+
assertThat(chatLanguageModel.chat("What is the capital of Germany?")).contains("Berlin");
7171
assertThat(context.getBean(AzureOpenAiChatModel.class)).isSameAs(chatLanguageModel);
7272
});
7373
}
@@ -87,7 +87,7 @@ void should_provide_chat_model_with_listeners() {
8787

8888
ChatLanguageModel chatLanguageModel = context.getBean(ChatLanguageModel.class);
8989
assertThat(chatLanguageModel).isInstanceOf(AzureOpenAiChatModel.class);
90-
assertThat(chatLanguageModel.generate("What is the capital of Germany?")).contains("Berlin");
90+
assertThat(chatLanguageModel.chat("What is the capital of Germany?")).contains("Berlin");
9191
assertThat(context.getBean(AzureOpenAiChatModel.class)).isSameAs(chatLanguageModel);
9292

9393
ChatModelListener listener1 = context.getBean("listener1", ChatModelListener.class);
@@ -159,7 +159,7 @@ void should_provide_chat_model_no_azure(String deploymentName) {
159159

160160
ChatLanguageModel chatLanguageModel = context.getBean(ChatLanguageModel.class);
161161
assertThat(chatLanguageModel).isInstanceOf(AzureOpenAiChatModel.class);
162-
assertThat(chatLanguageModel.generate("What is the capital of Germany?")).contains("Berlin");
162+
assertThat(chatLanguageModel.chat("What is the capital of Germany?")).contains("Berlin");
163163

164164
assertThat(context.getBean(AzureOpenAiChatModel.class)).isSameAs(chatLanguageModel);
165165
});
@@ -184,24 +184,24 @@ void should_provide_streaming_chat_model(String deploymentName) {
184184

185185
StreamingChatLanguageModel streamingChatLanguageModel = context.getBean(StreamingChatLanguageModel.class);
186186
assertThat(streamingChatLanguageModel).isInstanceOf(AzureOpenAiStreamingChatModel.class);
187-
CompletableFuture<Response<AiMessage>> future = new CompletableFuture<>();
188-
streamingChatLanguageModel.generate("What is the capital of Germany?", new StreamingResponseHandler<AiMessage>() {
187+
CompletableFuture<ChatResponse> future = new CompletableFuture<>();
188+
streamingChatLanguageModel.chat("What is the capital of Germany?", new StreamingChatResponseHandler() {
189189

190190
@Override
191-
public void onNext(String token) {
191+
public void onPartialResponse(String partialResponse) {
192192
}
193193

194194
@Override
195-
public void onComplete(Response<AiMessage> response) {
196-
future.complete(response);
195+
public void onCompleteResponse(ChatResponse completeResponse) {
196+
future.complete(completeResponse);
197197
}
198198

199199
@Override
200200
public void onError(Throwable error) {
201201
}
202202
});
203-
Response<AiMessage> response = future.get(60, SECONDS);
204-
assertThat(response.content().text()).contains("Berlin");
203+
ChatResponse response = future.get(60, SECONDS);
204+
assertThat(response.aiMessage().text()).contains("Berlin");
205205

206206
assertThat(context.getBean(AzureOpenAiStreamingChatModel.class)).isSameAs(streamingChatLanguageModel);
207207
});
@@ -223,24 +223,24 @@ void should_provide_streaming_chat_model_with_listeners() {
223223

224224
StreamingChatLanguageModel streamingChatLanguageModel = context.getBean(StreamingChatLanguageModel.class);
225225
assertThat(streamingChatLanguageModel).isInstanceOf(AzureOpenAiStreamingChatModel.class);
226-
CompletableFuture<Response<AiMessage>> future = new CompletableFuture<>();
227-
streamingChatLanguageModel.generate("What is the capital of Germany?", new StreamingResponseHandler<AiMessage>() {
226+
CompletableFuture<ChatResponse> future = new CompletableFuture<>();
227+
streamingChatLanguageModel.chat("What is the capital of Germany?", new StreamingChatResponseHandler() {
228228

229229
@Override
230-
public void onNext(String token) {
230+
public void onPartialResponse(String partialResponse) {
231231
}
232232

233233
@Override
234-
public void onComplete(Response<AiMessage> response) {
235-
future.complete(response);
234+
public void onCompleteResponse(ChatResponse completeResponse) {
235+
future.complete(completeResponse);
236236
}
237237

238238
@Override
239239
public void onError(Throwable error) {
240240
}
241241
});
242-
Response<AiMessage> response = future.get(60, SECONDS);
243-
assertThat(response.content().text()).contains("Berlin");
242+
ChatResponse response = future.get(60, SECONDS);
243+
assertThat(response.aiMessage().text()).contains("Berlin");
244244

245245
assertThat(context.getBean(AzureOpenAiStreamingChatModel.class)).isSameAs(streamingChatLanguageModel);
246246

langchain4j-github-models-spring-boot-starter/src/test/java/dev/langchain4j/model/githubmodels/spring/AutoConfigIT.java

+10-11
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,13 @@
11
package dev.langchain4j.model.githubmodels.spring;
22

3-
import dev.langchain4j.data.message.AiMessage;
4-
import dev.langchain4j.model.StreamingResponseHandler;
53
import dev.langchain4j.model.chat.ChatLanguageModel;
64
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
5+
import dev.langchain4j.model.chat.response.ChatResponse;
6+
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
77
import dev.langchain4j.model.embedding.EmbeddingModel;
88
import dev.langchain4j.model.github.GitHubModelsChatModel;
99
import dev.langchain4j.model.github.GitHubModelsEmbeddingModel;
1010
import dev.langchain4j.model.github.GitHubModelsStreamingChatModel;
11-
import dev.langchain4j.model.output.Response;
1211
import org.junit.jupiter.api.Test;
1312
import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable;
1413
import org.springframework.boot.autoconfigure.AutoConfigurations;
@@ -39,7 +38,7 @@ void should_provide_chat_model() {
3938

4039
ChatLanguageModel chatLanguageModel = context.getBean(ChatLanguageModel.class);
4140
assertThat(chatLanguageModel).isInstanceOf(GitHubModelsChatModel.class);
42-
assertThat(chatLanguageModel.generate("What is the capital of France?")).contains("Paris");
41+
assertThat(chatLanguageModel.chat("What is the capital of France?")).contains("Paris");
4342
assertThat(context.getBean(GitHubModelsChatModel.class)).isSameAs(chatLanguageModel);
4443
});
4544
}
@@ -56,24 +55,24 @@ void should_provide_streaming_chat_model() {
5655

5756
StreamingChatLanguageModel streamingChatLanguageModel = context.getBean(StreamingChatLanguageModel.class);
5857
assertThat(streamingChatLanguageModel).isInstanceOf(GitHubModelsStreamingChatModel.class);
59-
CompletableFuture<Response<AiMessage>> future = new CompletableFuture<>();
60-
streamingChatLanguageModel.generate("What is the capital of France?", new StreamingResponseHandler<AiMessage>() {
58+
CompletableFuture<ChatResponse> future = new CompletableFuture<>();
59+
streamingChatLanguageModel.chat("What is the capital of France?", new StreamingChatResponseHandler() {
6160

6261
@Override
63-
public void onNext(String token) {
62+
public void onPartialResponse(String partialResponse) {
6463
}
6564

6665
@Override
67-
public void onComplete(Response<AiMessage> response) {
68-
future.complete(response);
66+
public void onCompleteResponse(ChatResponse completeResponse) {
67+
future.complete(completeResponse);
6968
}
7069

7170
@Override
7271
public void onError(Throwable error) {
7372
}
7473
});
75-
Response<AiMessage> response = future.get(60, SECONDS);
76-
assertThat(response.content().text()).contains("Paris");
74+
ChatResponse response = future.get(60, SECONDS);
75+
assertThat(response.aiMessage().text()).contains("Paris");
7776

7877
assertThat(context.getBean(GitHubModelsStreamingChatModel.class)).isSameAs(streamingChatLanguageModel);
7978
});

0 commit comments

Comments
 (0)