Skip to content

Commit 3626b14

Browse files
committed
test: adding tests, covering more models. models.test.ts
1 parent 6aca7c8 commit 3626b14

File tree

1 file changed

+31
-4
lines changed

1 file changed

+31
-4
lines changed

packages/core/src/tests/models.test.ts

+31-4
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,36 @@ describe("Model Provider Configuration", () => {
6060
expect(anthropicModels[ModelClass.LARGE]).toBe("claude-3-5-sonnet-20241022");
6161
});
6262

63-
test("should have correct token limits", () => {
63+
test("should have correct settings configuration", () => {
6464
const settings = models[ModelProviderName.ANTHROPIC].settings;
6565
expect(settings.maxInputTokens).toBe(200000);
6666
expect(settings.maxOutputTokens).toBe(4096);
67+
expect(settings.temperature).toBe(0.7);
68+
expect(settings.frequency_penalty).toBe(0.4);
69+
expect(settings.presence_penalty).toBe(0.4);
70+
});
71+
});
72+
73+
describe("LlamaCloud Provider", () => {
74+
test("should have correct endpoint", () => {
75+
expect(models[ModelProviderName.LLAMACLOUD].endpoint).toBe("https://api.llamacloud.com/v1");
76+
});
77+
78+
test("should have correct model mappings", () => {
79+
const llamaCloudModels = models[ModelProviderName.LLAMACLOUD].model;
80+
expect(llamaCloudModels[ModelClass.SMALL]).toBe("meta-llama/Llama-3.2-3B-Instruct-Turbo");
81+
expect(llamaCloudModels[ModelClass.MEDIUM]).toBe("meta-llama-3.1-8b-instruct");
82+
expect(llamaCloudModels[ModelClass.LARGE]).toBe("meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo");
83+
expect(llamaCloudModels[ModelClass.EMBEDDING]).toBe("togethercomputer/m2-bert-80M-32k-retrieval");
84+
expect(llamaCloudModels[ModelClass.IMAGE]).toBe("black-forest-labs/FLUX.1-schnell");
85+
});
86+
87+
test("should have correct settings configuration", () => {
88+
const settings = models[ModelProviderName.LLAMACLOUD].settings;
89+
expect(settings.maxInputTokens).toBe(128000);
90+
expect(settings.maxOutputTokens).toBe(8192);
91+
expect(settings.temperature).toBe(0.7);
92+
expect(settings.repetition_penalty).toBe(0.4);
6793
});
6894
});
6995

@@ -79,10 +105,10 @@ describe("Model Provider Configuration", () => {
79105

80106
describe("Model Retrieval Functions", () => {
81107
describe("getModel function", () => {
82-
test("should retrieve correct models for all providers", () => {
108+
test("should retrieve correct models for different providers and classes", () => {
83109
expect(getModel(ModelProviderName.OPENAI, ModelClass.SMALL)).toBe("gpt-4o-mini");
84110
expect(getModel(ModelProviderName.ANTHROPIC, ModelClass.LARGE)).toBe("claude-3-5-sonnet-20241022");
85-
expect(getModel(ModelProviderName.GOOGLE, ModelClass.MEDIUM)).toBe("gemini-1.5-flash-latest");
111+
expect(getModel(ModelProviderName.LLAMACLOUD, ModelClass.MEDIUM)).toBe("meta-llama-3.1-8b-instruct");
86112
});
87113

88114
test("should handle environment variable overrides", () => {
@@ -97,9 +123,10 @@ describe("Model Retrieval Functions", () => {
97123
});
98124

99125
describe("getEndpoint function", () => {
100-
test("should retrieve correct endpoints for all providers", () => {
126+
test("should retrieve correct endpoints for different providers", () => {
101127
expect(getEndpoint(ModelProviderName.OPENAI)).toBe("https://api.openai.com/v1");
102128
expect(getEndpoint(ModelProviderName.ANTHROPIC)).toBe("https://api.anthropic.com/v1");
129+
expect(getEndpoint(ModelProviderName.LLAMACLOUD)).toBe("https://api.llamacloud.com/v1");
103130
expect(getEndpoint(ModelProviderName.ETERNALAI)).toBe("https://mock.eternal.ai");
104131
});
105132

0 commit comments

Comments
 (0)