Skip to content

update transform and rater #228

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions tests/flow/rater/test_rater_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import unittest
from unittest.mock import MagicMock, patch
from uniflow.flow.rater.rater_flow import RaterFlow
from uniflow.node import Node
from uniflow.op.prompt import PromptTemplate

class TestRaterFlow(unittest.TestCase):
@patch('uniflow.flow.rater.rater_flow.HuggingfaceJsonFormattedLLMRater')
@patch('uniflow.flow.rater.rater_flow.OpenAIJsonFormattedLLMRater')
@patch('uniflow.flow.rater.rater_flow.LmRaterModel')
@patch('uniflow.flow.rater.rater_flow.ModelOp')
def setUp(self, mock_model_op, mock_lm_rater_model, mock_openai_rater_model, mock_huggingface_rater_model):
self.mock_model_op = mock_model_op
self.mock_lm_rater_model = mock_lm_rater_model
self.mock_openai_rater_model = mock_openai_rater_model
self.mock_huggingface_rater_model = mock_huggingface_rater_model
self.mock_lm_rater_model.return_value = MagicMock()
self.prompt_template = PromptTemplate(instruction="instruction", few_shot_prompt=[{}])
self.model_config_openai = {"response_format": {"type": "json_object"}, "model_server": "OpenAI"}
self.model_config_huggingface = {"response_format": {"type": "json_object"}, "model_server": "HuggingFace"}
self.model_config_rater = {"response_format": {"type": "other"}, "model_server": "open_ai"}
self.label2score = {"label1": 1.0, "label2": 2.0}
self.rater_flow_openai = RaterFlow(self.prompt_template, self.model_config_openai, self.label2score)
self.rater_flow_huggingface = RaterFlow(self.prompt_template, self.model_config_huggingface, self.label2score)
self.rater_flow_rater = RaterFlow(self.prompt_template, self.model_config_rater, self.label2score)

def test_init_json_openAI(self):
self.mock_model_op.assert_called()
self.mock_openai_rater_model.assert_called_once_with(prompt_template=self.prompt_template, model_config=self.model_config_openai, label2score=self.label2score)

def test_init_json_huggingface(self):
self.mock_model_op.assert_called()
self.mock_huggingface_rater_model.assert_called_once_with(prompt_template=self.prompt_template, model_config=self.model_config_huggingface, label2score=self.label2score)

def test_init_not_json(self):
self.mock_model_op.assert_called()
self.mock_lm_rater_model.assert_called_once_with(prompt_template=self.prompt_template, model_config=self.model_config_rater, label2score=self.label2score)

def test_run_openai(self):
nodes = [Node(name="node1", value_dict={"a": 1}), Node(name="node2", value_dict={"b": 2})]
self.rater_flow_openai.run(nodes)
self.mock_model_op.return_value.assert_called_once_with(nodes)

def test_run_huggingface(self):
nodes = [Node(name="node1", value_dict={"a": 1}), Node(name="node2", value_dict={"b": 2})]
self.rater_flow_huggingface.run(nodes)
self.mock_model_op.return_value.assert_called_once_with(nodes)

def test_run_not_json(self):
nodes = [Node(name="node1", value_dict={"a": 1}), Node(name="node2", value_dict={"b": 2})]
self.rater_flow_rater.run(nodes)
self.mock_model_op.return_value.assert_called_once_with(nodes)

if __name__ == '__main__':
unittest.main()
85 changes: 85 additions & 0 deletions tests/flow/transform/test_azure_openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import unittest
from unittest.mock import patch

from uniflow.flow.transform.transform_azure_openai_flow import AzureOpenAIModelFlow
from uniflow.node import Node
from uniflow.op.prompt import PromptTemplate


class TestAzureOpenAIModelFlow(unittest.TestCase):
@patch("uniflow.flow.transform.transform_azure_openai_flow.ModelOp")
@patch("uniflow.flow.transform.transform_azure_openai_flow.LmModel")
@patch("uniflow.flow.transform.transform_azure_openai_flow.JsonLmModel")
def setUp(self, mock_json_model, mock_lm_model, mock_model_op):
self.mock_json_model = mock_json_model
self.mock_lm_model = mock_lm_model
self.mock_model_op = mock_model_op

self.prompt_template0 = None
self.prompt_template = PromptTemplate(
instruction="instruction", few_shot_prompt=[{}]
)

self.model_config0 = None
self.model_config1 = {"response_format": {"type": "json_object"}}
self.model_config2 = {"response_format": {"type": "other"}}
self.azure_flow1 = AzureOpenAIModelFlow(
self.prompt_template, self.model_config1
)
self.azure_flow2 = AzureOpenAIModelFlow(
self.prompt_template, self.model_config2
)

def test_prompt_template_none(self):
"""Test AzureOpenAIModelFlow initialization with prompt_template=None."""
model_config = self.model_config1 # Mocked model config

with self.assertRaises(ValueError):
AzureOpenAIModelFlow(prompt_template=None, model_config=model_config)

def test_model_config_none(self):
"""Test AzureOpenAIModelFlow initialization with model_config=None."""
prompt_template = self.prompt_template # Mocked or real prompt template

with self.assertRaises(ValueError):
AzureOpenAIModelFlow(prompt_template=prompt_template, model_config=None)

def test_init_success(self):
self.mock_json_model.assert_called_once_with(
prompt_template=self.prompt_template, model_config=self.model_config1
)
self.mock_model_op.assert_called()

def test_not_json_init(self):
self.mock_lm_model.assert_called_once_with(
prompt_template=self.prompt_template, model_config=self.model_config2
)
self.mock_model_op.assert_called()

# def test_call_with_empty_node(self, mock_read_file):
# # arrange
# nodes = []

# # act
# output_nodes = self.extract_txt_op(nodes)

# # assert
# mock_read_file.assert_not_called()
# self.assertEqual(len(output_nodes), 0)
def test_run(self):
node1 = Node(name="node1", value_dict={"a": 1})
result = self.azure_flow1.run(node1)
self.mock_model_op.return_value.assert_called_once_with(node1)
expected_result = self.mock_model_op.return_value(node1)
self.assertEqual(result, expected_result)
# self.assertEqual(result, self.mock_model_op.return_value(node1))

def test_not_json_run(self):
node1 = Node(name="node1", value_dict={"a": 1})
result = self.azure_flow2.run(node1)
self.mock_model_op.return_value.assert_called_once_with(node1)
self.assertEqual(result, self.mock_model_op.return_value(node1))


if __name__ == "__main__":
unittest.main()
31 changes: 31 additions & 0 deletions tests/flow/transform/test_copy_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import unittest
from unittest.mock import patch

from uniflow.flow.transform.transform_copy_flow import TransformCopyFlow
from uniflow.node import Node
from uniflow.op.prompt import PromptTemplate


class TestTransformCopyFlow(unittest.TestCase):
@patch("uniflow.flow.transform.transform_copy_flow.CopyOp")
def setUp(self, mock_copy_op):
self.mock_copy_op = mock_copy_op
self.prompt_template = PromptTemplate(
instruction="instruction", few_shot_prompt=[{}]
)
self.model_config = {"response_format": {"type": "json_object"}}
self.copy_flow = TransformCopyFlow(self.prompt_template, self.model_config)

def test_init(self):
self.mock_copy_op.assert_called_once_with(name="copy_op")

def test_run(self):
node1 = Node(name="node1", value_dict={"a": 1})
result = self.copy_flow.run(node1)
self.mock_copy_op.return_value.assert_called_once_with(node1)
expected_result = self.mock_copy_op.return_value(node1)
self.assertEqual(result, expected_result)


if __name__ == "__main__":
unittest.main()
46 changes: 46 additions & 0 deletions tests/flow/transform/test_google_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import unittest
from unittest.mock import MagicMock, patch

from uniflow.flow.transform.transform_google_flow import GoogleModelFlow
from uniflow.node import Node
from uniflow.op.prompt import PromptTemplate


class TestGoogleModelFlow(unittest.TestCase):
@patch("uniflow.flow.transform.transform_google_flow.LmModel")
@patch("uniflow.flow.transform.transform_google_flow.ModelOp")
def setUp(self, mock_model_op, mock_lm_model):
self.mock_model_op = mock_model_op
self.mock_lm_model = mock_lm_model
self.mock_lm_model.return_value = MagicMock()
self.prompt_template = PromptTemplate(
instruction="instruction", few_shot_prompt=[{}]
)
self.model_config = {"response_format": {"type": "json_object"}}
self.google_flow = GoogleModelFlow(self.prompt_template, self.model_config)
# self.lm_model = LmModel(prompt_template=self.prompt_template, model_config=self.model_config)

def test_init(self):
self.mock_model_op.assert_called_once_with(
name="google_model_op",
model=self.mock_lm_model.return_value, # This represents the LmModel instance
)

def test_run(self):
nodes = [
Node(name="node1", value_dict={"a": 1}),
Node(name="node2", value_dict={"b": 2}),
]
result = self.google_flow.run(nodes)
self.mock_model_op.return_value.assert_called_once_with(nodes)
expected_result = self.mock_model_op.return_value(nodes)
self.assertEqual(result, expected_result)


# class TestTransformGoogleFlow(TestGoogleModelFlow):
# def setUp(self):
# super().setUp()
# self.transform_google_flow = TransformGoogleFlow(self.prompt_template, self.model_config)

if __name__ == "__main__":
unittest.main()
45 changes: 45 additions & 0 deletions tests/flow/transform/test_google_multimodal_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import unittest
from unittest.mock import MagicMock, patch

from uniflow.flow.transform.transform_google_multimodal_flow import (
GoogleMultiModalModelFlow,
)
from uniflow.node import Node
from uniflow.op.prompt import PromptTemplate


class TestGoogleMultiModalModelFlow(unittest.TestCase):
@patch("uniflow.flow.transform.transform_google_multimodal_flow.MmModel")
@patch("uniflow.flow.transform.transform_google_multimodal_flow.ModelOp")
def setUp(self, mock_model_op, mock_mm_model):
self.mock_model_op = mock_model_op
self.mock_mm_model = mock_mm_model
self.mock_mm_model.return_value = MagicMock()
self.prompt_template = PromptTemplate(
instruction="instruction", few_shot_prompt=[{}]
)
self.model_config = {"response_format": {"type": "json_object"}}
self.google_mm_flow = GoogleMultiModalModelFlow(
self.prompt_template, self.model_config
)

def test_init(self):
self.mock_model_op.assert_called_once_with(
name="google_mm_model_op",
model=self.mock_mm_model.return_value, # This represents the MmModel instance
)

def test_run(self):
nodes = [
Node(name="node1", value_dict={"a": 1}),
Node(name="node2", value_dict={"b": 2}),
]
# Mock the __call__ method of the ModelOp instance to return the nodes directly for simplicity
self.mock_model_op.return_value.return_value = nodes
result = self.google_mm_flow.run(nodes)
self.mock_model_op.return_value.assert_called_once_with(nodes)
self.assertEqual(result, nodes)


if __name__ == "__main__":
unittest.main()
54 changes: 54 additions & 0 deletions tests/flow/transform/test_huggingface_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import unittest
from unittest.mock import MagicMock, patch

from uniflow.flow.transform.transform_huggingface_flow import (
HuggingFaceModelFlow, # Update the import path as necessary
)
from uniflow.node import Node
from uniflow.op.prompt import PromptTemplate


class TestHuggingFaceModelFlow(unittest.TestCase):
@patch(
"uniflow.flow.transform.transform_huggingface_flow.ModelOp"
) # Update the import path as necessary
@patch(
"uniflow.flow.transform.transform_huggingface_flow.LmModel"
) # Update the import path as necessary
def setUp(self, mock_lm_model, mock_model_op):
self.mock_model_op = mock_model_op
self.mock_lm_model = mock_lm_model
# Mock the return value of LmModel to simulate its behavior without actual instantiation
self.mock_lm_model.return_value = MagicMock()
self.prompt_template = PromptTemplate(
instruction="instruction", few_shot_prompt=[{}]
)
self.model_config = {"response_format": {"type": "json_object"}}
self.huggingface_flow = HuggingFaceModelFlow(
self.prompt_template, self.model_config
)

def test_init(self):
# Verify that ModelOp is correctly instantiated with the expected arguments
self.mock_model_op.assert_called_once_with(
name="huggingface_model_op", model=self.mock_lm_model.return_value
)

def test_run(self):
# Prepare some test nodes to run through the flow
nodes = [
Node(name="node1", value_dict={"a": 1}),
Node(name="node2", value_dict={"b": 2}),
]
# Assume the ModelOp operation simply returns the nodes it receives
self.mock_model_op.return_value.return_value = nodes
# Run the flow with the test nodes
result = self.huggingface_flow.run(nodes)
# Verify that the ModelOp mock was called with the test nodes
self.mock_model_op.return_value.assert_called_once_with(nodes)
# Verify the result matches the expected outcome
self.assertEqual(result, nodes)


if __name__ == "__main__":
unittest.main()
46 changes: 46 additions & 0 deletions tests/flow/transform/test_lmqg_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import unittest
from unittest.mock import MagicMock, patch

from uniflow.flow.transform.transform_lmqg_flow import TransformLMQGFlow
from uniflow.node import Node
from uniflow.op.prompt import PromptTemplate


class TestTransformLMQGFlow(unittest.TestCase):
@patch("uniflow.flow.transform.transform_lmqg_flow.ModelOp")
@patch("uniflow.flow.transform.transform_lmqg_flow.LmModel")
def setUp(self, mock_lm_model, mock_model_op):
self.mock_model_op = mock_model_op
self.mock_lm_model = mock_lm_model
# Setup the mock for LmModel to simulate its behavior without actual instantiation
self.mock_lm_model.return_value = MagicMock()
self.prompt_template = PromptTemplate(
instruction="instruction", few_shot_prompt=[{}]
)
self.model_config = {"response_format": {"type": "json_object"}}
self.lmqg_flow = TransformLMQGFlow(self.prompt_template, self.model_config)

def test_init(self):
# Verify that ModelOp is instantiated with the correct arguments
self.mock_model_op.assert_called_once_with(
name="lmqg_model_op", model=self.mock_lm_model.return_value
)

def test_run(self):
# Prepare some test nodes to process
nodes = [
Node(name="node1", value_dict={"a": 1}),
Node(name="node2", value_dict={"b": 2}),
]
# Assume the ModelOp operation simply returns the nodes it receives for simplicity
self.mock_model_op.return_value.return_value = nodes
# Execute the flow with the test nodes
result = self.lmqg_flow.run(nodes)
# Verify that the ModelOp mock was invoked with the test nodes
self.mock_model_op.return_value.assert_called_once_with(nodes)
# Check that the result matches the expected output
self.assertEqual(result, nodes)


if __name__ == "__main__":
unittest.main()
Loading
Loading