From c45b943476f8eb6a12b5294520a589e7dc5c69ca Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Mon, 16 Sep 2024 09:23:41 +0000 Subject: [PATCH 01/25] added metadata in tag of worker image --- README.md | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index dea0cb0..311dcd6 100644 --- a/README.md +++ b/README.md @@ -247,11 +247,9 @@ account can have required permissions. 2) Create service image from the code and host it on [gcr.io](http://gcr.io) ```bash - docker build --tag gcr.io/$PROJECT_ID/sight-default -f sight_service/Dockerfile . - - gcloud auth print-access-token | docker login -u oauth2accesstoken --password-stdin https://gcr.io - - docker push gcr.io/$PROJECT_ID/sight-default + docker build --tag gcr.io/$PROJECT_ID/sight-default:$(git rev-parse --abbrev-ref HEAD)-$(git rev-parse --short HEAD) -f sight_service/Dockerfile . && \ + gcloud auth print-access-token | docker login -u oauth2accesstoken --password-stdin https://gcr.io && \ + docker push gcr.io/$PROJECT_ID/sight-default:$(git rev-parse --abbrev-ref HEAD)-$(git rev-parse --short HEAD) ``` 3) With the help of the image, launch cloud run service @@ -266,11 +264,9 @@ Host the worker image in a cloud which will be used as default image by the workers spawned using sight unless specified otherwise. ```bash -docker build --tag gcr.io/$PROJECT_ID/sight-worker -f py/Dockerfile . - -gcloud auth print-access-token | docker login -u oauth2accesstoken --password-stdin https://gcr.io - -docker push gcr.io/$PROJECT_ID/sight-worker +docker build --tag gcr.io/$PROJECT_ID/sight-worker:$(git rev-parse --abbrev-ref HEAD)-$(git rev-parse --short HEAD) -f py/Dockerfile . && \ +gcloud auth print-access-token | docker login -u oauth2accesstoken --password-stdin https://gcr.io && \ +docker push gcr.io/$PROJECT_ID/sight-worker:$(git rev-parse --abbrev-ref HEAD)-$(git rev-parse --short HEAD) ``` ## Logging API From bbe9065754bdfd4ba2a7b0c2e13ba19ac1eed7a7 Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Mon, 16 Sep 2024 09:43:16 +0000 Subject: [PATCH 02/25] changed to cloud logger and some auto-format changes --- py/helpers/cache/cache_gcs.py | 8 +- py/helpers/cache/cache_local.py | 77 +- py/helpers/cache/cache_none.py | 2 +- py/helpers/cache/cache_redis.py | 2 +- py/helpers/logs/logs_handler.py | 33 + py/sight/block.py | 157 +- py/sight/demo/cartpole/demo_cartpole.py | 106 +- py/sight/demo/cartpole/demo_pendulum.py | 89 +- py/sight/demo/cartpole/driver_cartpole.py | 31 +- py/sight/demo/cartpole/driver_pendulum.py | 25 +- py/sight/demo/cartpole/gym_demo.py | 6 +- py/sight/demo/fn_sphere_parallel.py | 273 ++-- py/sight/demo/search_optimization.py | 161 +- py/sight/demo/secret_find.py | 71 +- py/sight/demo/sir.py | 200 +-- py/sight/demo/spawn_workers.py | 51 +- py/sight/demo/volterra_lotka.py | 266 +-- py/sight/exception.py | 29 +- py/sight/gcs_utils.py | 156 +- py/sight/service_utils.py | 75 +- py/sight/sight.py | 1358 ++++++++-------- .../decision/acme/acme_optimizer_client.py | 669 ++++---- py/sight/widgets/decision/acme/sight_adder.py | 155 +- .../decision/analyze_decision_outcomes.py | 706 ++++---- py/sight/widgets/decision/converse.py | 57 +- py/sight/widgets/decision/current_status.py | 57 +- py/sight/widgets/decision/decision.py | 155 +- .../widgets/decision/decision_episode_fn.py | 5 +- py/sight/widgets/decision/env_driver.py | 44 +- py/sight/widgets/decision/get_outcome.py | 50 +- py/sight/widgets/decision/listen.py | 40 +- .../widgets/decision/llm_optimizer_client.py | 3 +- .../widgets/decision/shower_env_driver.py | 63 +- .../single_action_optimizer_client.py | 3 +- py/sight/widgets/decision/tell.py | 35 +- py/sight/widgets/decision/trials.py | 537 +++---- py/sight/widgets/numpy_sight/numpy_sight.py | 187 ++- py/sight/widgets/pandas_sight/pandas_sight.py | 169 +- py/sight/widgets/simulation/analysis_utils.py | 921 +++++------ py/sight/widgets/simulation/bulk_inference.py | 42 +- .../widgets/simulation/fine_tune_gemini.py | 49 +- .../simulation/generate_log_trans_dataset.py | 289 ++-- py/sight/widgets/simulation/run_trace.py | 663 ++++---- py/sight/widgets/simulation/simulation.py | 205 ++- .../simulation/simulation_parameters.py | 149 +- .../widgets/simulation/simulation_state.py | 281 ++-- .../simulation/simulation_time_step.py | 162 +- .../widgets/simulation/train_surrogate.py | 3 +- .../tensorflow_sight/tensorflow_sight.py | 3 +- sight_service/acme_optimizer.py | 23 +- sight_service/bayesian_opt.py | 222 +-- sight_service/exhaustive_search.py | 427 ++--- sight_service/genetic_algorithm.py | 733 +++++---- sight_service/llm.py | 1421 ++++++++--------- sight_service/nevergrad_opt.py | 84 +- sight_service/optimizer_instance.py | 274 ++-- sight_service/sensitivity_analysis.py | 324 ++-- sight_service/service_root.py | 23 +- sight_service/service_utils.py | 321 ++-- sight_service/single_action_optimizer.py | 19 +- sight_service/smc_py.py | 400 ++--- sight_service/vizier.py | 339 ++-- sight_service/worklist_scheduler_opt.py | 141 +- 63 files changed, 6725 insertions(+), 6904 deletions(-) create mode 100644 py/helpers/logs/logs_handler.py diff --git a/py/helpers/cache/cache_gcs.py b/py/helpers/cache/cache_gcs.py index 2b4bd3e..a723f38 100644 --- a/py/helpers/cache/cache_gcs.py +++ b/py/helpers/cache/cache_gcs.py @@ -1,17 +1,15 @@ import json from pathlib import Path -import logging +from helpers.logs.logs_handler import logger as logging from google.cloud import storage from .cache_interface import CacheInterface -from .cache_redis import RedisCache +from .cache_redis import RedisCache class GCSCache(CacheInterface): - def __init__(self, - config={}, - with_redis_client: RedisCache | None = None): + def __init__(self, config={}, with_redis_client: RedisCache | None = None): gcs_client = storage.Client() bucket_name = config.get('gcs_bucket', 'cameltrain-sight') self.bucket = gcs_client.bucket(bucket_name=bucket_name) diff --git a/py/helpers/cache/cache_local.py b/py/helpers/cache/cache_local.py index d50a0fb..753a3b3 100644 --- a/py/helpers/cache/cache_local.py +++ b/py/helpers/cache/cache_local.py @@ -1,6 +1,6 @@ import json import os -import logging +from helpers.logs.logs_handler import logger as logging from pathlib import Path from redis import StrictRedis @@ -10,42 +10,43 @@ class LocalCache(CacheInterface): - def __init__(self, - config: dict = {}, - with_redis_client: StrictRedis | None = None): - base_dir = config.get("local_base_dir", "./.cache_local_data") - self.redis_client = with_redis_client - self.current_script_path = os.path.dirname(os.path.abspath(__file__)) - self.base_dir = os.path.join(self.current_script_path, f"../../{base_dir}") - - def _local_cache_path(self, key: str): - return Path(self.base_dir) / Path(key).with_suffix(".json") - - def json_get(self, key: str): - if self.redis_client: - try: - value = self.redis_client.json_get(key=key) - if value: - return value - except Exception as e: - logging.warning("GOT THE ISSUE IN REDIS", e) + def __init__(self, + config: dict = {}, + with_redis_client: StrictRedis | None = None): + base_dir = config.get("local_base_dir", "./.cache_local_data") + self.redis_client = with_redis_client + self.current_script_path = os.path.dirname(os.path.abspath(__file__)) + self.base_dir = os.path.join(self.current_script_path, + f"../../{base_dir}") + + def _local_cache_path(self, key: str): + return Path(self.base_dir) / Path(key).with_suffix(".json") + + def json_get(self, key: str): + if self.redis_client: + try: + value = self.redis_client.json_get(key=key) + if value: + return value + except Exception as e: + logging.warning("GOT THE ISSUE IN REDIS", e) + return None + path = self._local_cache_path(key.replace(":", "/")) + if path.exists(): + with open(path, "r") as file: + value = json.load(file) + if self.redis_client: + self.redis_client.json_set(key, value) + return value return None - path = self._local_cache_path(key.replace(":", "/")) - if path.exists(): - with open(path, "r") as file: - value = json.load(file) + + def json_set(self, key, value): if self.redis_client: - self.redis_client.json_set(key, value) - return value - return None - - def json_set(self, key, value): - if self.redis_client: - try: - self.redis_client.json_set(key=key, value=value) - except Exception as e: - logging.warning("GOT THE ISSUE IN REDIS", e) - path = self._local_cache_path(key.replace(":", "/")) - path.parent.mkdir(parents=True, exist_ok=True) - with open(path, "w") as file: - json.dump(value, file) + try: + self.redis_client.json_set(key=key, value=value) + except Exception as e: + logging.warning("GOT THE ISSUE IN REDIS", e) + path = self._local_cache_path(key.replace(":", "/")) + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, "w") as file: + json.dump(value, file) diff --git a/py/helpers/cache/cache_none.py b/py/helpers/cache/cache_none.py index 44d3c4b..6fce380 100644 --- a/py/helpers/cache/cache_none.py +++ b/py/helpers/cache/cache_none.py @@ -1,6 +1,6 @@ import json import os -import logging +from helpers.logs.logs_handler import logger as logging from .cache_interface import CacheInterface from typing import Any diff --git a/py/helpers/cache/cache_redis.py b/py/helpers/cache/cache_redis.py index c69964d..9f99cd2 100644 --- a/py/helpers/cache/cache_redis.py +++ b/py/helpers/cache/cache_redis.py @@ -1,5 +1,5 @@ import json -import logging +from helpers.logs.logs_handler import logger as logging import redis from redis.commands.json.path import Path diff --git a/py/helpers/logs/logs_handler.py b/py/helpers/logs/logs_handler.py new file mode 100644 index 0000000..f577fe3 --- /dev/null +++ b/py/helpers/logs/logs_handler.py @@ -0,0 +1,33 @@ +# import logging + +# logging.basicConfig(level=logging.INFO) + +# logging.info("info") +# logging.debug("debug") +# logging.warning("warning") +# logging.error("error") + + + + + +import logging +from google.cloud import logging as cloud_logging + +# Instantiates a client +logging_client = cloud_logging.Client() + +# Retrieves a Cloud Logging handler based on the environment +# you're running in and integrates the handler with the +# Python logging module. By default this captures all logs +# at INFO level and higher +handler = logging_client.get_default_handler() + +# Set up Python logging +logger = logging.getLogger("cloudLogger") +logger.setLevel(logging.DEBUG) +logger.addHandler(handler) + +# Example of logging +# logger.info("This is an info message logged to GCP") + diff --git a/py/sight/block.py b/py/sight/block.py index bf9b7a0..e6cf96c 100644 --- a/py/sight/block.py +++ b/py/sight/block.py @@ -11,12 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Hierarchical blocks in the Sight log.""" import inspect from typing import Dict, Optional, Text -from absl import logging +from helpers.logs.logs_handler import logger as logging from sight.exception import exception from sight.location import Location from sight.proto import sight_pb2 @@ -24,34 +23,35 @@ class Block(object): - """Encapsulates start and stop points where a Sight log block is active.""" - - def __init__(self, *args): - if len(args) == 2: - (label, sight) = args - self.create_label(label, sight) - return - if len(args) == 3 and isinstance(args[2], dict): - (label, sight, attributes) = args - self.create_label(label, sight, attributes) - return - - if len(args) == 3: - (key, value, sight) = args - self.create_label('%s=%s' % (key, value), sight, {str(key): str(value)}) - return - (key, value, sight, attributes) = args - full_attributes = attributes.copy() - full_attributes[key] = value - self.create_label('%s=%s' % (key, value), sight, full_attributes) - - def create_label( - self, - label: str, - sight: Sight, - attributes: Optional[Dict[Text, Text]] = None, - ) -> Optional[Location]: - """Creates and enters a block with a given label and attributes. + """Encapsulates start and stop points where a Sight log block is active.""" + + def __init__(self, *args): + if len(args) == 2: + (label, sight) = args + self.create_label(label, sight) + return + if len(args) == 3 and isinstance(args[2], dict): + (label, sight, attributes) = args + self.create_label(label, sight, attributes) + return + + if len(args) == 3: + (key, value, sight) = args + self.create_label('%s=%s' % (key, value), sight, + {str(key): str(value)}) + return + (key, value, sight, attributes) = args + full_attributes = attributes.copy() + full_attributes[key] = value + self.create_label('%s=%s' % (key, value), sight, full_attributes) + + def create_label( + self, + label: str, + sight: Sight, + attributes: Optional[Dict[Text, Text]] = None, + ) -> Optional[Location]: + """Creates and enters a block with a given label and attributes. Args: label: The label that identifies this block. @@ -62,53 +62,50 @@ def create_label( Returns: The starting location of this block. """ - self.sight = sight - if sight is None: - logging.info('<<< %s', label) - return None - - if not self.sight.is_logging_enabled(): - return None - - self.label = label - if attributes: - self.attributes = attributes - else: - self.attributes = dict() - for key in sorted(self.attributes.keys()): - self.sight.set_attribute(key, self.attributes.get(key)) - # pytype: disable=attribute-error - return self.sight.enter_block( - self.label, sight_pb2.Object(), inspect.currentframe().f_back.f_back - ) - # pytype: enable=attribute-error - - def __enter__(self): - return self - - def __exit__(self, exc_type, value, traceback): - if not self.sight: - return - - if not self.sight.is_logging_enabled(): - return - - if exc_type is not None: - # pytype: disable=attribute-error - exception( - exc_type, value, traceback, self.sight, inspect.currentframe().f_back - ) - # pytype: enable=attribute-error - - if self.sight is None: - logging.info('>>> %s', self.label) - return - - # pytype: disable=attribute-error - self.sight.exit_block( - self.label, sight_pb2.Object(), inspect.currentframe().f_back - ) - # pytype: enable=attribute-error - - for key in sorted(self.attributes.keys(), reverse=True): - self.sight.unset_attribute(key) + self.sight = sight + if sight is None: + logging.info('<<< %s', label) + return None + + if not self.sight.is_logging_enabled(): + return None + + self.label = label + if attributes: + self.attributes = attributes + else: + self.attributes = dict() + for key in sorted(self.attributes.keys()): + self.sight.set_attribute(key, self.attributes.get(key)) + # pytype: disable=attribute-error + return self.sight.enter_block(self.label, sight_pb2.Object(), + inspect.currentframe().f_back.f_back) + # pytype: enable=attribute-error + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + if not self.sight: + return + + if not self.sight.is_logging_enabled(): + return + + if exc_type is not None: + # pytype: disable=attribute-error + exception(exc_type, value, traceback, self.sight, + inspect.currentframe().f_back) + # pytype: enable=attribute-error + + if self.sight is None: + logging.info('>>> %s', self.label) + return + + # pytype: disable=attribute-error + self.sight.exit_block(self.label, sight_pb2.Object(), + inspect.currentframe().f_back) + # pytype: enable=attribute-error + + for key in sorted(self.attributes.keys(), reverse=True): + self.sight.unset_attribute(key) diff --git a/py/sight/demo/cartpole/demo_cartpole.py b/py/sight/demo/cartpole/demo_cartpole.py index 9e8a825..f8b2a57 100644 --- a/py/sight/demo/cartpole/demo_cartpole.py +++ b/py/sight/demo/cartpole/demo_cartpole.py @@ -11,21 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the Sight Decision API to train gym environment.""" import warnings def warn(*args, **kwargs): - pass + pass warnings.warn = warn import os import gym -import logging +from helpers.logs.logs_handler import logger as logging + import numpy as np from typing import Sequence from absl import app @@ -39,57 +39,63 @@ def warn(*args, **kwargs): FLAGS = flags.FLAGS + def get_sight_instance(): - params = sight_pb2.Params( - label='cartpole_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='cartpole_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - # decision.run(sight=sight, env=wrappers.GymWrapper(gym.make("CartPole-v1"))) - decision.run( - state_attrs={ - "Cart Position": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-4.8, - max_value=4.8, - ), - "Cart Velocity": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-3.40, - max_value=3.40, - ), - "Pole Angle": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-0.418, - max_value=0.418, - ), - "Pole Angular Velocity": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-3.40, - max_value=3.40, - ), - }, - action_attrs={ - # "direction": sight_pb2.DecisionConfigurationStart.AttrProps( - # valid_int_values=[0,1], - # ), - "direction": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, - max_value=1, - # step_size=1 - ), - }, - # env=wrappers.GymWrapper(gym.make('CartPole-v1')), - driver_fn=driver_fn, - sight=sight, - ) + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + # decision.run(sight=sight, env=wrappers.GymWrapper(gym.make("CartPole-v1"))) + decision.run( + state_attrs={ + "Cart Position": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-4.8, + max_value=4.8, + ), + "Cart Velocity": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-3.40, + max_value=3.40, + ), + "Pole Angle": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-0.418, + max_value=0.418, + ), + "Pole Angular Velocity": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-3.40, + max_value=3.40, + ), + }, + action_attrs={ + # "direction": sight_pb2.DecisionConfigurationStart.AttrProps( + # valid_int_values=[0,1], + # ), + "direction": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=1, + # step_size=1 + ), + }, + # env=wrappers.GymWrapper(gym.make('CartPole-v1')), + driver_fn=driver_fn, + sight=sight, + ) if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG,) - # print(logging.getLogger(__name__)) - app.run(main) + logging.basicConfig(level=logging.DEBUG, ) + # print(logging.getLogger(__name__)) + app.run(main) diff --git a/py/sight/demo/cartpole/demo_pendulum.py b/py/sight/demo/cartpole/demo_pendulum.py index d6c6406..0c93bd2 100644 --- a/py/sight/demo/cartpole/demo_pendulum.py +++ b/py/sight/demo/cartpole/demo_pendulum.py @@ -11,21 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the Sight Decision API to train gym environment.""" import warnings def warn(*args, **kwargs): - pass + pass warnings.warn = warn import os import gym -import logging +from helpers.logs.logs_handler import logger as logging + import numpy as np from typing import Sequence from absl import app @@ -39,49 +39,54 @@ def warn(*args, **kwargs): FLAGS = flags.FLAGS + def get_sight_instance(): - params = sight_pb2.Params( - label='cartpole_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='cartpole_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - # decision.run(sight=sight, env=wrappers.GymWrapper(gym.make("CartPole-v1"))) - decision.run( - state_attrs={ - "x": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-1.0, - max_value=1.0, - ), - "y": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-1.0, - max_value=1.0, - ), - "Angular Velocity": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-8.0, - max_value=8.0, - ), - }, - action_attrs={ - "Torque": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-2.0, - max_value=2.0, - # step_size=1 - ), - }, - driver_fn=driver_fn, - sight=sight, - ) + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + # decision.run(sight=sight, env=wrappers.GymWrapper(gym.make("CartPole-v1"))) + decision.run( + state_attrs={ + "x": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-1.0, + max_value=1.0, + ), + "y": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-1.0, + max_value=1.0, + ), + "Angular Velocity": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-8.0, + max_value=8.0, + ), + }, + action_attrs={ + "Torque": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-2.0, + max_value=2.0, + # step_size=1 + ), + }, + driver_fn=driver_fn, + sight=sight, + ) if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG,) - # print(logging.getLogger(__name__)) - app.run(main) + logging.basicConfig(level=logging.DEBUG, ) + # print(logging.getLogger(__name__)) + app.run(main) diff --git a/py/sight/demo/cartpole/driver_cartpole.py b/py/sight/demo/cartpole/driver_cartpole.py index fbe4ef9..af13f99 100644 --- a/py/sight/demo/cartpole/driver_cartpole.py +++ b/py/sight/demo/cartpole/driver_cartpole.py @@ -13,7 +13,8 @@ # limitations under the License. """Default Driver function to be used while training within the Sight log.""" -import logging +from helpers.logs.logs_handler import logger as logging + import gym import numpy as np import dm_env @@ -72,12 +73,9 @@ def step(action): costheta = math.cos(theta) sintheta = math.sin(theta) - temp = ( - force + polemass_length * theta_dot**2 * sintheta - ) / total_mass + temp = (force + polemass_length * theta_dot**2 * sintheta) / total_mass thetaacc = (gravity * sintheta - costheta * temp) / ( - length * (4.0 / 3.0 - masspole * costheta**2 / total_mass) - ) + length * (4.0 / 3.0 - masspole * costheta**2 / total_mass)) xacc = temp - polemass_length * thetaacc * costheta / total_mass x = x + tau * x_dot @@ -86,18 +84,16 @@ def step(action): theta_dot = theta_dot + tau * thetaacc state = (x, x_dot, theta, theta_dot) - terminated = bool( - x < -x_threshold - or x > x_threshold - or theta < -theta_threshold_radians - or theta > theta_threshold_radians - ) + terminated = bool(x < -x_threshold or x > x_threshold + or theta < -theta_threshold_radians + or theta > theta_threshold_radians) if not terminated: reward = 1.0 else: reward = 0.0 - observation, reward, done, info = np.array(state, dtype=np.float32), reward, terminated, {} + observation, reward, done, info = np.array( + state, dtype=np.float32), reward, terminated, {} reset_next_step = done # Convert the type of the reward based on the spec, respecting the scalar or @@ -110,10 +106,10 @@ def step(action): specs.Array(shape=(), dtype=float, name='reward')) if done: - truncated = info.get('TimeLimit.truncated', False) - if truncated: - return dm_env.truncation(reward, observation) - return dm_env.termination(reward, observation) + truncated = info.get('TimeLimit.truncated', False) + if truncated: + return dm_env.truncation(reward, observation) + return dm_env.termination(reward, observation) return dm_env.transition(reward, observation) @@ -133,7 +129,6 @@ def driver_fn(sight) -> None: for i in range(len(state_attrs)): data_structures.log_var(state_attrs[i], timestep.observation[i], sight) - while not timestep.last(): chosen_action = decision.decision_point("DP_label", sight) timestep = step(chosen_action) diff --git a/py/sight/demo/cartpole/driver_pendulum.py b/py/sight/demo/cartpole/driver_pendulum.py index 3a3bf60..44788cc 100644 --- a/py/sight/demo/cartpole/driver_pendulum.py +++ b/py/sight/demo/cartpole/driver_pendulum.py @@ -13,7 +13,8 @@ # limitations under the License. """Default Driver function to be used while training within the Sight log.""" -import logging +from helpers.logs.logs_handler import logger as logging + import gym import numpy as np import dm_env @@ -27,7 +28,6 @@ _file_name = "driver.py" - reset_next_step = True state = None elapsed_steps = 0 @@ -55,7 +55,8 @@ def reset(): low = -high state = np_random.uniform(low=low, high=high) theta, thetadot = state - observation = np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) + observation = np.array( + [np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) return dm_env.restart(observation) @@ -71,15 +72,17 @@ def step(action): # step of pendulum th, thdot = state u = np.clip(action, -max_torque, max_torque)[0] - costs = angle_normalize(th) ** 2 + 0.1 * thdot**2 + 0.001 * (u**2) + costs = angle_normalize(th)**2 + 0.1 * thdot**2 + 0.001 * (u**2) - newthdot = thdot + (3 * g / (2 * l) * np.sin(th) + 3.0 / (m * l**2) * u) * dt + newthdot = thdot + (3 * g / (2 * l) * np.sin(th) + 3.0 / + (m * l**2) * u) * dt newthdot = np.clip(newthdot, -max_speed, max_speed) newth = th + newthdot * dt state = np.array([newth, newthdot]) theta, thetadot = state - latest_state = np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) + latest_state = np.array( + [np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) observation, reward, done, info = latest_state, -costs, False, {} elapsed_steps += 1 @@ -98,15 +101,17 @@ def step(action): specs.Array(shape=(), dtype=float, name='reward')) if done: - truncated = info.get('TimeLimit.truncated', False) - if truncated: - return dm_env.truncation(reward, observation) - return dm_env.termination(reward, observation) + truncated = info.get('TimeLimit.truncated', False) + if truncated: + return dm_env.truncation(reward, observation) + return dm_env.termination(reward, observation) return dm_env.transition(reward, observation) + def angle_normalize(x): return ((x + np.pi) % (2 * np.pi)) - np.pi + def driver_fn(sight) -> None: """Executes the logic of searching for a value. diff --git a/py/sight/demo/cartpole/gym_demo.py b/py/sight/demo/cartpole/gym_demo.py index 164af38..5180e76 100644 --- a/py/sight/demo/cartpole/gym_demo.py +++ b/py/sight/demo/cartpole/gym_demo.py @@ -24,8 +24,10 @@ def warn(*args, **kwargs): import os import gym -import logging -# import logging +from helpers.logs.logs_handler import logger as logging + +# from helpers.logs.logs_handler import logger as logging + from typing import Sequence from absl import app from absl import flags diff --git a/py/sight/demo/fn_sphere_parallel.py b/py/sight/demo/fn_sphere_parallel.py index a1b9093..e1bae0c 100644 --- a/py/sight/demo/fn_sphere_parallel.py +++ b/py/sight/demo/fn_sphere_parallel.py @@ -25,7 +25,7 @@ def warn(*args, **kwargs): from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import multiprocessing import subprocess import re @@ -46,14 +46,14 @@ def warn(*args, **kwargs): _DOCKER_FILE_PATH = 'sight_service/Dockerfile' _SERVICE_PREFIX = 'sight-' + def build_push_service_img(sight_id): build_out = subprocess.run( [ 'docker', 'build', '-t', - f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + - sight_id, + f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id, '-f', _DOCKER_FILE_PATH, '.', @@ -66,9 +66,9 @@ def build_push_service_img(sight_id): # Step 2: Retrieve an OAuth2 access token access_token_cmd = ['gcloud', 'auth', 'print-access-token'] access_token_result = subprocess.run(access_token_cmd, - capture_output=True, - text=True, - check=True) + capture_output=True, + text=True, + check=True) access_token = access_token_result.stdout.strip() # Step 3: Authenticate with gcr.io using the access token @@ -87,8 +87,7 @@ def build_push_service_img(sight_id): push_out = subprocess.run( [ 'docker', 'push', - f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + - sight_id + f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id ], check=True, capture_output=True, @@ -97,42 +96,61 @@ def build_push_service_img(sight_id): return f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id + def delete_service_img(sight_id): - print('deleting image : gcr.io/' + _PROJECT_ID + '/' + _SERVICE_PREFIX + sight_id) - subprocess.run( - [ - 'gcloud', - 'container', - 'images', - 'delete', - f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}{sight_id}', - '--quiet', - '--force-delete-tags', - ], - check=True, - ) + print('deleting image : gcr.io/' + _PROJECT_ID + '/' + _SERVICE_PREFIX + + sight_id) + subprocess.run( + [ + 'gcloud', + 'container', + 'images', + 'delete', + f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}{sight_id}', + '--quiet', + '--force-delete-tags', + ], + check=True, + ) + def delete_service(service_name): print('deleting sight service') sight_service_name = _SERVICE_PREFIX + service_name - cmd_args = ['gcloud', 'run', 'services', 'delete', sight_service_name, '--quiet'] + cmd_args = [ + 'gcloud', 'run', 'services', 'delete', sight_service_name, '--quiet' + ] result = subprocess.run(args=cmd_args, capture_output=True, text=True) # print('result from deletion :', result) if result.returncode == 0: - print(f'Successfully deleted Cloud Run service: {_SERVICE_PREFIX}{service_name}') + print( + f'Successfully deleted Cloud Run service: {_SERVICE_PREFIX}{service_name}' + ) else: print(f'Error deleting Cloud Run service: {result.stderr}') def run_experiment(sight_id, optimizer_value, image_id, table_queue): cmd_args = [ - 'python', 'py/sight/demo/fn_sphere.py', '--decision_mode', 'train', - '--deployment_mode', 'distributed', '--num_train_workers', '1', - '--num_trials', '10', '--optimizer_type', optimizer_value, - '--docker_image', 'gcr.io/cameltrain/sight-worker', + 'python', + 'py/sight/demo/fn_sphere.py', + '--decision_mode', + 'train', + '--deployment_mode', + 'distributed', + '--num_train_workers', + '1', + '--num_trials', + '10', + '--optimizer_type', + optimizer_value, + '--docker_image', + 'gcr.io/cameltrain/sight-worker', # '--service_docker_file', 'sight_service/Dockerfile' - '--service_docker_img', image_id, - '--parent_id', sight_id + '--service_docker_img', + image_id, + '--parent_id', + sight_id ] result = subprocess.run(args=cmd_args, capture_output=True, text=True) # print('here result is : ', result.stdout) @@ -143,10 +161,12 @@ def run_experiment(sight_id, optimizer_value, image_id, table_queue): if (table_name and service_name): # print(result.stdout) - table_queue.put((optimizer_value, table_name.group(1), service_name.group(1))) + table_queue.put( + (optimizer_value, table_name.group(1), service_name.group(1))) # else: print(f'whole log from {optimizer_value} : ', result.stderr) + def get_sight_instance(): params = sight_pb2.Params( label='sphere_parallel', @@ -155,6 +175,7 @@ def get_sight_instance(): sight_obj = Sight(params) return sight_obj + def check_exp_status(exp_sight_id, exp_service_id): print('in check exp_status .........................') print(exp_sight_id, exp_service_id) @@ -162,108 +183,112 @@ def check_exp_status(exp_sight_id, exp_service_id): req = service_pb2.CurrentStatusRequest() req.client_id = exp_sight_id response = service.call( - lambda s, meta: s.CurrentStatus(req, 300, metadata=meta) - ) + lambda s, meta: s.CurrentStatus(req, 300, metadata=meta)) print('response :', response.status) - if(response.status == service_pb2.CurrentStatusResponse.Status.SUCCESS): - return True + if (response.status == service_pb2.CurrentStatusResponse.Status.SUCCESS): + return True else: - return False + return False + def main(argv: Sequence[str]) -> None: if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") with get_sight_instance() as sight: - image_id = build_push_service_img(str(sight.id)) - print('image_id : ', image_id) - sight.text(image_id) - - # optimizer_values = ['ng_random_search', 'ng_pso', 'ng_cga'] - # optimizer_values = ['ng_random_search', 'ng_pso', 'ng_cga', 'ng_es', 'ng_dl_opo', 'ng_dde'] - optimizer_values = ['ng_cga'] - # optimizer_values = ['bayesian_opt'] - - # optimizer_values = [ - # 'ng_random_search', 'ng_pso', 'ng_de', 'ng_cga', 'ng_es', 'ng_dl_opo', 'ng_dde', - # 'ng_nmm', 'ng_tiny_spsa', 'ng_scr_hammersley_search', - # 'ng_two_points_de', 'ng_cma_small', 'ng_cma', 'ng_auto', 'ng_bo', - # 'ng_voronoi_de', 'bayesian_opt' - # ] - table_queue = multiprocessing.Queue() - processes = [] - - for optimizer_value in optimizer_values: - process = multiprocessing.Process(target=run_experiment, - args=(str(sight.id), optimizer_value, image_id, table_queue)) - processes.append(process) - process.start() - print('all process started.....') - - for process in processes: - process.join() - print('all process finished.....') - - delete_service_img(str(sight.id)) - - experiment_details = {} - while not table_queue.empty(): - optimizer_value, table_name, service_name = table_queue.get() - with Block("Superscript Experiment Details", sight): - with Attribute("optimizer", optimizer_value, sight): - sight_id_match = re.search(r'\.(.*?)_log$', table_name) - exp_sight_id = sight_id_match.group(1) - # with Attribute("sight_id", exp_sight_id, sight): - # with Attribute("table_name", table_name, sight): - # sight.text(f"{optimizer_value}:{exp_sight_id}") - sight_obj = sight_pb2.Object() - sight_obj.sub_type = sight_pb2.Object.SubType.ST_LINK - sight_obj.link.linked_sight_id = str(exp_sight_id) - sight_obj.link.link_type = sight_pb2.Link.LinkType.LT_PARENT_TO_CHILD - frame = inspect.currentframe().f_back.f_back.f_back - sight.set_object_code_loc(sight_obj, frame) - sight.log_object(sight_obj, True) - experiment_details[optimizer_value] = [exp_sight_id,table_name,service_name] - - print('experiment_details : ', experiment_details) - - print('waiting for all experiments to get completed.......') - completed_services = [] - while True: - print("checking if remaining experiments got compelted or not, to delete it's service") - # completed_services = [] - # for k,v in experiment_details.items(): - # if check_exp_status(v[0], v[2]): - # service_name = v[2] - # sight_id = v[0] - # completed_services.append(service_name) - # del experiment_details[k] - # delete_service(service_name) - - for k in list(experiment_details.keys()): - v = experiment_details[k] - if check_exp_status(v[0], v[2]): - service_name = v[2] - # sight_id = v[0] - completed_services.append(service_name) - del experiment_details[k] - delete_service(service_name) - - # Check if all services have succeeded - print('completed_services : ', completed_services) - if len(completed_services) == len(optimizer_values): - # print() - break # All services have succeeded, exit loop - - # Wait for some time before polling again - print('going in sleep mode for 60 sec') - time.sleep(60) # Polling interval of 60 seconds - - logging.info( - 'Log GUI : https://streamlit-app-dq7fdwqgbq-uc.a.run.app/?' - 'log_id=%s', - str(sight.id) - ) + image_id = build_push_service_img(str(sight.id)) + print('image_id : ', image_id) + sight.text(image_id) + + # optimizer_values = ['ng_random_search', 'ng_pso', 'ng_cga'] + # optimizer_values = ['ng_random_search', 'ng_pso', 'ng_cga', 'ng_es', 'ng_dl_opo', 'ng_dde'] + optimizer_values = ['ng_cga'] + # optimizer_values = ['bayesian_opt'] + + # optimizer_values = [ + # 'ng_random_search', 'ng_pso', 'ng_de', 'ng_cga', 'ng_es', 'ng_dl_opo', 'ng_dde', + # 'ng_nmm', 'ng_tiny_spsa', 'ng_scr_hammersley_search', + # 'ng_two_points_de', 'ng_cma_small', 'ng_cma', 'ng_auto', 'ng_bo', + # 'ng_voronoi_de', 'bayesian_opt' + # ] + table_queue = multiprocessing.Queue() + processes = [] + + for optimizer_value in optimizer_values: + process = multiprocessing.Process(target=run_experiment, + args=(str(sight.id), + optimizer_value, image_id, + table_queue)) + processes.append(process) + process.start() + print('all process started.....') + + for process in processes: + process.join() + print('all process finished.....') + + delete_service_img(str(sight.id)) + + experiment_details = {} + while not table_queue.empty(): + optimizer_value, table_name, service_name = table_queue.get() + with Block("Superscript Experiment Details", sight): + with Attribute("optimizer", optimizer_value, sight): + sight_id_match = re.search(r'\.(.*?)_log$', table_name) + exp_sight_id = sight_id_match.group(1) + # with Attribute("sight_id", exp_sight_id, sight): + # with Attribute("table_name", table_name, sight): + # sight.text(f"{optimizer_value}:{exp_sight_id}") + sight_obj = sight_pb2.Object() + sight_obj.sub_type = sight_pb2.Object.SubType.ST_LINK + sight_obj.link.linked_sight_id = str(exp_sight_id) + sight_obj.link.link_type = sight_pb2.Link.LinkType.LT_PARENT_TO_CHILD + frame = inspect.currentframe().f_back.f_back.f_back + sight.set_object_code_loc(sight_obj, frame) + sight.log_object(sight_obj, True) + experiment_details[optimizer_value] = [ + exp_sight_id, table_name, service_name + ] + + print('experiment_details : ', experiment_details) + + print('waiting for all experiments to get completed.......') + completed_services = [] + while True: + print( + "checking if remaining experiments got compelted or not, to delete it's service" + ) + # completed_services = [] + # for k,v in experiment_details.items(): + # if check_exp_status(v[0], v[2]): + # service_name = v[2] + # sight_id = v[0] + # completed_services.append(service_name) + # del experiment_details[k] + # delete_service(service_name) + + for k in list(experiment_details.keys()): + v = experiment_details[k] + if check_exp_status(v[0], v[2]): + service_name = v[2] + # sight_id = v[0] + completed_services.append(service_name) + del experiment_details[k] + delete_service(service_name) + + # Check if all services have succeeded + print('completed_services : ', completed_services) + if len(completed_services) == len(optimizer_values): + # print() + break # All services have succeeded, exit loop + + # Wait for some time before polling again + print('going in sleep mode for 60 sec') + time.sleep(60) # Polling interval of 60 seconds + + logging.info( + 'Log GUI : https://streamlit-app-dq7fdwqgbq-uc.a.run.app/?' + 'log_id=%s', str(sight.id)) if __name__ == "__main__": diff --git a/py/sight/demo/search_optimization.py b/py/sight/demo/search_optimization.py index 55d7cf7..bd29670 100644 --- a/py/sight/demo/search_optimization.py +++ b/py/sight/demo/search_optimization.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the Sight Decision API to optimize an application.""" import math @@ -19,7 +18,7 @@ from typing import Sequence from absl import app -from absl import logging +from helpers.logs.logs_handler import logger as logging from sight import data_structures from sight.proto import sight_pb2 from sight.sight import Sight @@ -27,97 +26,95 @@ def diff_abs(x) -> float: - """Differentiable variant of the absolute value function.""" - return math.sqrt(x * x + 0.1) + """Differentiable variant of the absolute value function.""" + return math.sqrt(x * x + 0.1) def driver(sight: Sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: sight: The Sight logger object used to drive decisions. """ - for i in range(1): - target = random.randrange(0, 1000) - current = random.randrange(0, 1000) - - step = 0 - data_structures.log_var('current', current, sight) - data_structures.log_var('target', target, sight) - - step += 1 - while current != target and step < 100: - decision.decision_outcome('distance', -diff_abs(target - current), sight) - - data_structures.log_var('current', current, sight) - choice = decision.decision_point( - 'move', - sight, - lambda: { - 'go1': ( - random.randrange( - current // 2 if current < target else target // 2, - current * 2 if current > target else target * 2, - ) - - current - ), - # 'go2': - # random.randrange( - # current // 2 - # if current < target else target // 2, current * 2 - # if current > target else target * 2) - current, - # f'{math.ceil((target - current)/2) if target > current else math.floor((target - current)/2)}' - }, - ) - logging.info('choice=%s', choice) - - current += int( - choice['go1'] - ) # + choice['go2']) #int((choice*2 - 1)*100) - logging.info( - '%d: %d: amount=%s, current=%s, target=%s', - i, - step, - int(choice['go1']), - # int(choice['go2']), - current, - target, - ) - step += 1 - decision.decision_outcome('distance', -diff_abs(target - current), sight) + for i in range(1): + target = random.randrange(0, 1000) + current = random.randrange(0, 1000) + + step = 0 + data_structures.log_var('current', current, sight) + data_structures.log_var('target', target, sight) + + step += 1 + while current != target and step < 100: + decision.decision_outcome('distance', -diff_abs(target - current), + sight) + + data_structures.log_var('current', current, sight) + choice = decision.decision_point( + 'move', + sight, + lambda: { + 'go1': (random.randrange( + current // 2 if current < target else target // 2, + current * 2 if current > target else target * 2, + ) - current), + # 'go2': + # random.randrange( + # current // 2 + # if current < target else target // 2, current * 2 + # if current > target else target * 2) - current, + # f'{math.ceil((target - current)/2) if target > current else math.floor((target - current)/2)}' + }, + ) + logging.info('choice=%s', choice) + + current += int( + choice['go1']) # + choice['go2']) #int((choice*2 - 1)*100) + logging.info( + '%d: %d: amount=%s, current=%s, target=%s', + i, + step, + int(choice['go1']), + # int(choice['go2']), + current, + target, + ) + step += 1 + decision.decision_outcome('distance', -diff_abs(target - current), + sight) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - params = sight_pb2.Params( - label='SearchOptimization', - log_owner='bronevet@google.com', - # local=True, - capacitor_output=True, - log_dir_path='/tmp/', - ) - - with Sight(params) as sight: - decision.run( - driver_fn=driver, - state_attrs={ - 'current': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=1000 - ), - 'target': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=1000 - ), - }, - action_attrs={ - 'go1': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=100 - ), - }, - sight=sight, + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + params = sight_pb2.Params( + label='SearchOptimization', + log_owner='bronevet@google.com', + # local=True, + capacitor_output=True, + log_dir_path='/tmp/', ) + with Sight(params) as sight: + decision.run( + driver_fn=driver, + state_attrs={ + 'current': + sight_pb2.DecisionConfigurationStart.AttrProps(min_value=0, + max_value=1000), + 'target': + sight_pb2.DecisionConfigurationStart.AttrProps(min_value=0, + max_value=1000), + }, + action_attrs={ + 'go1': + sight_pb2.DecisionConfigurationStart.AttrProps(min_value=0, + max_value=100), + }, + sight=sight, + ) + if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/demo/secret_find.py b/py/sight/demo/secret_find.py index d1236af..07af7cf 100644 --- a/py/sight/demo/secret_find.py +++ b/py/sight/demo/secret_find.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the Sight Decision API to optimize an application.""" import os @@ -20,58 +19,60 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision FLAGS = flags.FLAGS + def driver(sight: Sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: sight: The Sight logger object used to drive decisions. """ - secret_num = random.randrange(0, 1000) - logging.info('secret_num=%s', secret_num) - choice = decision.decision_point('move', sight) - logging.info('choice=%s, error=%s', choice, choice['guess'] - secret_num) + secret_num = random.randrange(0, 1000) + logging.info('secret_num=%s', secret_num) + choice = decision.decision_point('move', sight) + logging.info('choice=%s, error=%s', choice, choice['guess'] - secret_num) + + decision.decision_outcome('distance', -abs(choice['guess'] - secret_num), + sight) - decision.decision_outcome( - 'distance', -abs(choice['guess'] - secret_num), sight - ) + proposed_guess = secret_num + (choice['guess'] - secret_num) / 2 + logging.info('proposed_guess=%s', proposed_guess) + decision.propose_action(-abs(choice['guess'] - secret_num) / 2, + {'guess': proposed_guess}, sight) - proposed_guess = secret_num + (choice['guess'] - secret_num) / 2 - logging.info('proposed_guess=%s', proposed_guess) - decision.propose_action( - -abs(choice['guess'] - secret_num) / 2, {'guess': proposed_guess}, sight - ) def get_sight_instance(): - params = sight_pb2.Params( - label='secret_find_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='secret_find_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj + def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') - with get_sight_instance() as sight: - decision.run( - driver_fn=driver, - state_attrs={}, - action_attrs={ - 'guess': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=1000, step_size=10 - ), - }, - sight=sight, - ) + with get_sight_instance() as sight: + decision.run( + driver_fn=driver, + state_attrs={}, + action_attrs={ + 'guess': + sight_pb2.DecisionConfigurationStart.AttrProps(min_value=0, + max_value=1000, + step_size=10), + }, + sight=sight, + ) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/demo/sir.py b/py/sight/demo/sir.py index 4f58d74..287b05f 100644 --- a/py/sight/demo/sir.py +++ b/py/sight/demo/sir.py @@ -11,14 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Simulation of the Susceptible Infected Recovered model using Sight.""" from typing import Dict, Sequence from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import pandas as pd from sight import data_structures from sight.proto import sight_pb2 @@ -29,116 +28,131 @@ from sight.widgets.simulation.simulation_time_step import SimulationTimeStep import os -_LAST_TS = flags.DEFINE_integer( - 'last_ts', 10, 'The final day of the simulation.' -) -_MAX_DAYS = flags.DEFINE_integer( - 'max_days', 1000, 'The number of days the solver simulates.' -) -_MAX_POP = flags.DEFINE_integer( - 'max_pop', 10000, 'The number members in the population.' -) -_BETA = flags.DEFINE_float( - 'beta', .1, 'The disease transmission rate.' -) -_GAMMA = flags.DEFINE_float( - 'gamnma', .1, 'The disease recovery rate.' -) +_LAST_TS = flags.DEFINE_integer('last_ts', 10, + 'The final day of the simulation.') +_MAX_DAYS = flags.DEFINE_integer('max_days', 1000, + 'The number of days the solver simulates.') +_MAX_POP = flags.DEFINE_integer('max_pop', 10000, + 'The number members in the population.') +_BETA = flags.DEFINE_float('beta', .1, 'The disease transmission rate.') +_GAMMA = flags.DEFINE_float('gamnma', .1, 'The disease recovery rate.') def driver(sight: Sight) -> None: - """Solves Lotka-Volterra equations using explicit Euler method.""" - dt = .1 - - # data_structures.log_var('S', S, sight) - # data_structures.log_var('I', I, sight) - # data_structures.log_var('R', R, sight) - action = decision.decision_point('init', sight) - print('dt=%s, action=%s' % (dt, action)) - I, R = 1, 0 - S = int(action['population']) - I - R - - hist = [] - for idx in range(int(int(action['num_days'])/dt) - 1): - dotS = -action['beta'] * S * I / int(action['population']) - dotI = action['beta'] * S * I / int(action['population']) - action['gamma'] * I - dotR = action['gamma'] * I - - - S += dotS * dt - I += dotI * dt - R += dotR * dt - - print('%d: S=(%s/d%s), dotI=(%s/d%s), dotR=(%s/d%s)' % (idx, S, dotS, I, dotI, R, dotR)) + """Solves Lotka-Volterra equations using explicit Euler method.""" + dt = .1 # data_structures.log_var('S', S, sight) # data_structures.log_var('I', I, sight) # data_structures.log_var('R', R, sight) - hist.append([S, I, R]) - data_structures.log_var('time series', - pd.DataFrame(hist, columns=['S', 'I', 'R']), - sight) - decision.decision_outcome('out', sight, reward=R, outcome={'S': S, 'I': I, 'R': R}) + action = decision.decision_point('init', sight) + print('dt=%s, action=%s' % (dt, action)) + I, R = 1, 0 + S = int(action['population']) - I - R + + hist = [] + for idx in range(int(int(action['num_days']) / dt) - 1): + dotS = -action['beta'] * S * I / int(action['population']) + dotI = action['beta'] * S * I / int( + action['population']) - action['gamma'] * I + dotR = action['gamma'] * I + + S += dotS * dt + I += dotI * dt + R += dotR * dt + + print('%d: S=(%s/d%s), dotI=(%s/d%s), dotR=(%s/d%s)' % + (idx, S, dotS, I, dotI, R, dotR)) + + # data_structures.log_var('S', S, sight) + # data_structures.log_var('I', I, sight) + # data_structures.log_var('R', R, sight) + hist.append([S, I, R]) + data_structures.log_var('time series', + pd.DataFrame(hist, columns=['S', 'I', 'R']), sight) + decision.decision_outcome('out', + sight, + reward=R, + outcome={ + 'S': S, + 'I': I, + 'R': R + }) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - with Sight(sight_pb2.Params( - label='SIR', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - )) as sight: + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + with Sight( + sight_pb2.Params( + label='SIR', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + )) as sight: decision.run( driver_fn=driver, - description = ''' + description=''' I am building an SIR model to analyze the progress of Measles infections in Los Angeles during the summer of 2020. I need to configure this model's parameters based on data from the Los Angeles County Department of Public Health. ''', - state_attrs={ - }, + state_attrs={}, action_attrs={ - 'population': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=_MAX_POP.value, - description='The total population of the area affected by the infection.', - discrete_prob_dist = sight_pb2.DiscreteProbDist( - uniform = sight_pb2.DiscreteProbDist.Uniform( - min_val = 0, max_val = _MAX_POP.value)) - ), - 'num_days': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=_MAX_DAYS.value, - description='The number of days of the infection being simulated.', - discrete_prob_dist = sight_pb2.DiscreteProbDist( - uniform = sight_pb2.DiscreteProbDist.Uniform( - min_val = 0, max_val = _MAX_DAYS.value)) - ), - 'beta': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=.2, - description='The transmission rate of the disease.', - continuous_prob_dist = sight_pb2.ContinuousProbDist( - uniform = sight_pb2.ContinuousProbDist.Uniform( - min_val = 0, max_val = .2)) - ), - 'gamma': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=.2, - description='The recovery rate of the disease.', - continuous_prob_dist = sight_pb2.ContinuousProbDist( - uniform = sight_pb2.ContinuousProbDist.Uniform( - min_val = 0, max_val = .2)) - ), + 'population': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=_MAX_POP.value, + description= + 'The total population of the area affected by the infection.', + discrete_prob_dist=sight_pb2.DiscreteProbDist( + uniform=sight_pb2.DiscreteProbDist.Uniform( + min_val=0, max_val=_MAX_POP.value))), + 'num_days': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=_MAX_DAYS.value, + description= + 'The number of days of the infection being simulated.', + discrete_prob_dist=sight_pb2.DiscreteProbDist( + uniform=sight_pb2.DiscreteProbDist.Uniform( + min_val=0, max_val=_MAX_DAYS.value))), + 'beta': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=.2, + description='The transmission rate of the disease.', + continuous_prob_dist=sight_pb2.ContinuousProbDist( + uniform=sight_pb2.ContinuousProbDist.Uniform( + min_val=0, max_val=.2))), + 'gamma': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=.2, + description='The recovery rate of the disease.', + continuous_prob_dist=sight_pb2.ContinuousProbDist( + uniform=sight_pb2.ContinuousProbDist.Uniform( + min_val=0, max_val=.2))), }, outcome_attrs={ - 'S': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=_MAX_POP.value, - description='The number of people who are susceptible to the disease.', + 'S': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=_MAX_POP.value, + description= + 'The number of people who are susceptible to the disease.', ), - 'I': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=_MAX_POP.value, - description='The number of people who are infected by the disease.', + 'I': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=_MAX_POP.value, + description= + 'The number of people who are infected by the disease.', ), - 'R': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=_MAX_POP.value, - description='The number of people who have recovered from the disease.', + 'R': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=_MAX_POP.value, + description= + 'The number of people who have recovered from the disease.', ), }, sight=sight, @@ -146,4 +160,4 @@ def main(argv: Sequence[str]) -> None: if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/demo/spawn_workers.py b/py/sight/demo/spawn_workers.py index 34223b4..5ab5710 100644 --- a/py/sight/demo/spawn_workers.py +++ b/py/sight/demo/spawn_workers.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Binary to spawn multiple workers with given file.""" import os @@ -22,38 +21,40 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import trials - FLAGS = flags.FLAGS + def get_sight_instance(): - print('creating sight object') - params = sight_pb2.Params( - label='original_demo', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + print('creating sight object') + params = sight_pb2.Params( + label='original_demo', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj + def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - trials.start_jobs( - num_train_workers=1, - binary_path='py/sight/demo/demo.py', - optimizer_type='worklist_scheduler', - docker_image='gcr.io/cameltrain/sight-portfolio-worker', - decision_mode='train', - deployment_mode='worker_mode', - worker_mode='dsub_cloud_worker', - sight=sight, - ) + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + trials.start_jobs( + num_train_workers=1, + binary_path='py/sight/demo/demo.py', + optimizer_type='worklist_scheduler', + docker_image='gcr.io/cameltrain/sight-portfolio-worker', + decision_mode='train', + deployment_mode='worker_mode', + worker_mode='dsub_cloud_worker', + sight=sight, + ) + if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/demo/volterra_lotka.py b/py/sight/demo/volterra_lotka.py index cb45794..88756be 100644 --- a/py/sight/demo/volterra_lotka.py +++ b/py/sight/demo/volterra_lotka.py @@ -11,14 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Simulation of the Lotka-Volterra equations using Sight.""" from typing import Dict, Sequence from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import math import numpy as np from sight import data_structures @@ -30,110 +29,108 @@ # from sight.widgets.simulation.simulation_time_step import SimulationTimeStep import os -_LAST_TS = flags.DEFINE_integer( - 'last_ts', 10, 'The final day of the simulation.' -) -_NUM_ITERS = flags.DEFINE_integer( - 'num_iters', 100, 'The number of steps the solver takes.' -) +_LAST_TS = flags.DEFINE_integer('last_ts', 10, + 'The final day of the simulation.') +_NUM_ITERS = flags.DEFINE_integer('num_iters', 100, + 'The number of steps the solver takes.') _R0 = flags.DEFINE_integer('r0', 10, 'Initial size of prey population.') _F0 = flags.DEFINE_integer('f0', 10, 'Initial size of predator population.') _ALPHA = flags.DEFINE_float('alpha', 1.1, 'Rate of growth of prey population.') _BETA = flags.DEFINE_float('beta', 0.4, 'Rate of predator and prey meeting.') -_GAMMA = flags.DEFINE_float( - 'gamma', 0.4, 'Rate of death of predator population.' -) -_DELTA = flags.DEFINE_float( - 'delta', 0.1, 'Rate of growth of predator population.' -) +_GAMMA = flags.DEFINE_float('gamma', 0.4, + 'Rate of death of predator population.') +_DELTA = flags.DEFINE_float('delta', 0.1, + 'Rate of growth of predator population.') def default_params() -> Dict[str, float]: - """Returns the run's default configuration parameters. + """Returns the run's default configuration parameters. These are used if the Decision API doesn't set them to something else while searching for a good configuration. """ - return { - 'R0': _R0.value, - 'F0': _F0.value, - 'alpha': _ALPHA.value, - 'beta': _BETA.value, - 'gamma': _GAMMA.value, - 'delta': _DELTA.value, - } + return { + 'R0': _R0.value, + 'F0': _F0.value, + 'alpha': _ALPHA.value, + 'beta': _BETA.value, + 'gamma': _GAMMA.value, + 'delta': _DELTA.value, + } def driver(sight: Sight) -> None: - """Solves Lotka-Volterra equations using explicit Euler method.""" - steps = np.linspace(0, _LAST_TS.value, _NUM_ITERS.value) - # logging.info('steps=%s', steps) - data_structures.log_var('R', 0, sight) - data_structures.log_var('F', 0, sight) - action = decision.decision_point('init', sight) #, default_params) - logging.info('action=%s', action) - print(len(steps)) - for idx in range(len(steps) - 1): - # with SimulationTimeStep( - # time_step_index=[idx], - # time_step=steps[idx], - # time_step_size=_LAST_TS.value / _NUM_ITERS.value, - # time_step_units=sight_pb2.SimulationTimeStepStart.TSU_UNKNOWN, - # sight=sight, - # ): - - if idx == 0: - r = action['R0'] - f = action['F0'] - alpha = action['alpha'] - beta = action['beta'] - gamma = action['gamma'] - delta = action['delta'] - - dt = steps[idx + 1] - steps[idx] - last_r = r - # logging.info('%s: dt=%s', idx, dt) - r = r * (1 + alpha * dt - gamma * dt * f) - f = f * (1 - beta * dt + delta * dt * last_r) + """Solves Lotka-Volterra equations using explicit Euler method.""" + steps = np.linspace(0, _LAST_TS.value, _NUM_ITERS.value) + # logging.info('steps=%s', steps) + data_structures.log_var('R', 0, sight) + data_structures.log_var('F', 0, sight) + action = decision.decision_point('init', sight) #, default_params) + logging.info('action=%s', action) + print(len(steps)) + for idx in range(len(steps) - 1): + # with SimulationTimeStep( + # time_step_index=[idx], + # time_step=steps[idx], + # time_step_size=_LAST_TS.value / _NUM_ITERS.value, + # time_step_units=sight_pb2.SimulationTimeStepStart.TSU_UNKNOWN, + # sight=sight, + # ): + + if idx == 0: + r = action['R0'] + f = action['F0'] + alpha = action['alpha'] + beta = action['beta'] + gamma = action['gamma'] + delta = action['delta'] + + dt = steps[idx + 1] - steps[idx] + last_r = r + # logging.info('%s: dt=%s', idx, dt) + r = r * (1 + alpha * dt - gamma * dt * f) + f = f * (1 - beta * dt + delta * dt * last_r) # logging.info('%s: r=%s, f=%s', idx, r, f) - logging.info('r=%s', r) - if math.isinf(r): - decision.decision_outcome('prey_pop', -1000, sight) - else: - decision.decision_outcome('prey_pop', r if r < 100 else 100-3*(r-100), sight) + logging.info('r=%s', r) + if math.isinf(r): + decision.decision_outcome('prey_pop', -1000, sight) + else: + decision.decision_outcome('prey_pop', + r if r < 100 else 100 - 3 * (r - 100), sight) - # with SimulationState({}, sight): - # data_structures.log_var('R', r, sight) - # data_structures.log_var('F', f, sight) + # with SimulationState({}, sight): + # data_structures.log_var('R', r, sight) + # data_structures.log_var('F', f, sight) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - with Sight(sight_pb2.Params( - label='Volterra-Lotka', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - text_output=True, - )) as sight: - # Simulation.run_decision_configuration( - # label='Volterra-Lotka', - # parameters={ - # 'LAST_TS': _LAST_TS.value, - # '_NUM_ITERS': _NUM_ITERS.value, - # 'R0': _R0.value, - # 'F0': _F0.value, - # 'alpha': _ALPHA.value, - # 'beta': _BETA.value, - # 'gamma': _GAMMA.value, - # 'delta': _DELTA.value, - # }, - # reference_trace_file_path=flags.FLAGS.reference_run_file, - decision.run( - driver_fn=driver, - description = ''' + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + with Sight( + sight_pb2.Params( + label='Volterra-Lotka', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + text_output=True, + )) as sight: + # Simulation.run_decision_configuration( + # label='Volterra-Lotka', + # parameters={ + # 'LAST_TS': _LAST_TS.value, + # '_NUM_ITERS': _NUM_ITERS.value, + # 'R0': _R0.value, + # 'F0': _F0.value, + # 'alpha': _ALPHA.value, + # 'beta': _BETA.value, + # 'gamma': _GAMMA.value, + # 'delta': _DELTA.value, + # }, + # reference_trace_file_path=flags.FLAGS.reference_run_file, + decision.run( + driver_fn=driver, + description=''' The Lotka-Volterra equations, also known as the Lotka-Volterra predator-prey model, are a pair of first-order nonlinear differential equations, frequently used to describe the dynamics of biological systems in which two species interact, one as a predator and the other as prey. The prey are assumed to have an unlimited food supply and to reproduce exponentially, unless subject to predation; this exponential growth is represented in the equation above by the term αx. The rate of predation on the prey is assumed to be proportional to the rate at which the predators and the prey meet; this is represented above by βxy. If either x or y is zero, then there can be no predation. With these two terms the prey equation above can be interpreted as follows: the rate of change of the prey's population is given by its own growth rate minus the rate at which it is preyed upon. @@ -147,45 +144,66 @@ def main(argv: Sequence[str]) -> None: Predators have limitless appetite. Both populations can be described by a single variable. This amounts to assuming that the populations do not have a spatial or age distribution that contributes to the dynamics. ''', - state_attrs={ - 'R': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=100, - description='The number of prey animals in the population' - ), - 'F': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=100, - description='The number of predator animals in the population' - ), - }, - action_attrs={ - 'R0': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=20, - description='The number of predator animals in the population at the start of the simulation.' - ), - 'F0': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=20, - description='The number of prey animals in the population at the start of the simulation.' - ), - 'alpha': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=20, - description='The growth rate of the prey.', - ), - 'beta': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=20, - description='The effect of the presence of predators on the prey growth rate, for example by predator eating the prey.' - ), - 'gamma': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=20, - description='The death rate of the predators independent of the prey.', - ), - 'delta': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=20, - description='The effect of the presence of prey on the predator\'s growth rate, for example how the predator eating the prey affects the predator population.', - ), - }, - sight=sight, - ) + state_attrs={ + 'R': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=100, + description='The number of prey animals in the population' + ), + 'F': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=100, + description= + 'The number of predator animals in the population'), + }, + action_attrs={ + 'R0': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=20, + description= + 'The number of predator animals in the population at the start of the simulation.' + ), + 'F0': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=20, + description= + 'The number of prey animals in the population at the start of the simulation.' + ), + 'alpha': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=20, + description='The growth rate of the prey.', + ), + 'beta': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=20, + description= + 'The effect of the presence of predators on the prey growth rate, for example by predator eating the prey.' + ), + 'gamma': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=20, + description= + 'The death rate of the predators independent of the prey.', + ), + 'delta': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=20, + description= + 'The effect of the presence of prey on the predator\'s growth rate, for example how the predator eating the prey affects the predator population.', + ), + }, + sight=sight, + ) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/exception.py b/py/sight/exception.py index 806a100..f6549b1 100644 --- a/py/sight/exception.py +++ b/py/sight/exception.py @@ -11,15 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Documentation of exception events in the sight log.""" -from absl import logging +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 def exception(exc_type, value, traceback, sight, frame): - """Documents an exception events in the Sight log if Sight is being used. + """Documents an exception events in the Sight log if Sight is being used. Args: exc_type: The exc_type of the exception that was thrown @@ -29,15 +28,15 @@ def exception(exc_type, value, traceback, sight, frame): not being used. frame: The call stack frame that contains the calling context information. """ - logging.exception( - 'Exception: exc_type=%s, value=%s, traceback=%s', - str(exc_type), - str(value), - str(traceback), - ) - if sight is not None: - sight.enter_block('Exception', sight_pb2.Object(), frame=frame) - sight.text_block('exc_type', str(exc_type), frame=frame) - sight.text_block('value', str(value), frame=frame) - sight.text_block('traceback', str(traceback), frame=frame) - sight.exit_block('Exception', sight_pb2.Object(), frame=frame) + logging.exception( + 'Exception: exc_type=%s, value=%s, traceback=%s', + str(exc_type), + str(value), + str(traceback), + ) + if sight is not None: + sight.enter_block('Exception', sight_pb2.Object(), frame=frame) + sight.text_block('exc_type', str(exc_type), frame=frame) + sight.text_block('value', str(value), frame=frame) + sight.text_block('traceback', str(traceback), frame=frame) + sight.exit_block('Exception', sight_pb2.Object(), frame=frame) diff --git a/py/sight/gcs_utils.py b/py/sight/gcs_utils.py index 2658ede..d9eebb6 100644 --- a/py/sight/gcs_utils.py +++ b/py/sight/gcs_utils.py @@ -11,42 +11,41 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """GCS related helper functions.""" import os import subprocess -from absl import logging +from helpers.logs.logs_handler import logger as logging from google.cloud import bigquery from google.cloud import storage from sight.proto import sight_pb2 def upload_blob_from_stream(bucket_name, gcp_path, file_obj, file_name, count): - """uploads given file to the bucket. - - Args: - bucket_name: name of the bucket to store the file - gcp_path: directory path to store the file - file_obj: file object to be stored - file_name: name given to file - count: chunk number of file - """ - storage_client = storage.Client() - bucket = storage_client.bucket(bucket_name) - if not bucket.exists(): - # logging.info(f"creating bucket {bucket_name}, as it didn't exist....") - bucket = storage_client.create_bucket(bucket_name) - - blob_name = gcp_path + '/' + file_name + '_' + str(count) + '.avro' - blob = bucket.blob(blob_name) - # Rewind the stream to the beginning. This step can be omitted if the input - # stream will always be at a correct position. - file_obj.seek(0) - # Upload data from the stream to your bucket. - blob.upload_from_file(file_obj) - # logging.info(f'Stream data uploaded to {blob_name} in bucket {bucket_name}.') + """uploads given file to the bucket. + + Args: + bucket_name: name of the bucket to store the file + gcp_path: directory path to store the file + file_obj: file object to be stored + file_name: name given to file + count: chunk number of file + """ + storage_client = storage.Client() + bucket = storage_client.bucket(bucket_name) + if not bucket.exists(): + # logging.info(f"creating bucket {bucket_name}, as it didn't exist....") + bucket = storage_client.create_bucket(bucket_name) + + blob_name = gcp_path + '/' + file_name + '_' + str(count) + '.avro' + blob = bucket.blob(blob_name) + # Rewind the stream to the beginning. This step can be omitted if the input + # stream will always be at a correct position. + file_obj.seek(0) + # Upload data from the stream to your bucket. + blob.upload_from_file(file_obj) + # logging.info(f'Stream data uploaded to {blob_name} in bucket {bucket_name}.') def create_table( @@ -56,7 +55,7 @@ def create_table( external_file_format, external_file_uri, ): - """Create BigQuery external table mapping to file in GCS bucket. + """Create BigQuery external table mapping to file in GCS bucket. Args: project_id: GCP projectId. @@ -70,67 +69,56 @@ def create_table( Returns: """ - try: - # Check if the dataset exists - client = bigquery.Client(project_id) - dataset = client.get_dataset(dataset_name) - # logging.info(f"Dataset {dataset_name} already exists.") - except Exception as e: - # If the dataset does not exist, create a new dataset - dataset = bigquery.Dataset(f"{project_id}.{dataset_name}") - dataset = client.create_dataset(dataset) - # logging.info(f"Dataset {dataset_name} created.") - - - # logging.info( - # 'Creating external table %s mapping to : %s.', - # table_name, - # external_file_uri, - # ) - try: - client = bigquery.Client(project_id) - dataset_ref = client.dataset(dataset_name) - table_ref = bigquery.TableReference(dataset_ref, table_name) - table = bigquery.Table(table_ref) - - external_config = bigquery.ExternalConfig(external_file_format) - external_config.source_uris = [external_file_uri] - table.external_data_configuration = external_config - client.create_table(table) - # logging.info('%s table successfully created.', table_name) - except Exception as e: - logging.info(f"Error creating table: {e}") - - -def create_external_bq_table( - params: sight_pb2.Params, file_name: str, client_id: int -): - """create external table in BigQuery from avro files using URI, located in the bucket. + try: + # Check if the dataset exists + client = bigquery.Client(project_id) + dataset = client.get_dataset(dataset_name) + # logging.info(f"Dataset {dataset_name} already exists.") + except Exception as e: + # If the dataset does not exist, create a new dataset + dataset = bigquery.Dataset(f"{project_id}.{dataset_name}") + dataset = client.create_dataset(dataset) + # logging.info(f"Dataset {dataset_name} created.") + + # logging.info( + # 'Creating external table %s mapping to : %s.', + # table_name, + # external_file_uri, + # ) + try: + client = bigquery.Client(project_id) + dataset_ref = client.dataset(dataset_name) + table_ref = bigquery.TableReference(dataset_ref, table_name) + table = bigquery.Table(table_ref) + + external_config = bigquery.ExternalConfig(external_file_format) + external_config.source_uris = [external_file_uri] + table.external_data_configuration = external_config + client.create_table(table) + # logging.info('%s table successfully created.', table_name) + except Exception as e: + logging.info(f"Error creating table: {e}") + + +def create_external_bq_table(params: sight_pb2.Params, file_name: str, + client_id: int): + """create external table in BigQuery from avro files using URI, located in the bucket. Args: params: sight parameters to get details of the files file_name: name of the file client_id: sight client id """ - external_file_uri = ( - params.external_file_uri - + params.bucket_name - + '/' - + params.gcp_path - + '/' - # + '/client_' - + params.label - + '_' - + str(client_id) - + '/' - + '*' - + params.file_format - ) - if 'PARENT_LOG_ID' not in os.environ: - create_table( - os.environ["PROJECT_ID"], - params.dataset_name, - file_name, - params.external_file_format, - external_file_uri, - ) + external_file_uri = ( + params.external_file_uri + params.bucket_name + '/' + params.gcp_path + + '/' + # + '/client_' + + params.label + '_' + str(client_id) + '/' + '*' + params.file_format) + if 'PARENT_LOG_ID' not in os.environ: + create_table( + os.environ["PROJECT_ID"], + params.dataset_name, + file_name, + params.external_file_format, + external_file_uri, + ) diff --git a/py/sight/service_utils.py b/py/sight/service_utils.py index 0f864a2..1875114 100644 --- a/py/sight/service_utils.py +++ b/py/sight/service_utils.py @@ -22,7 +22,7 @@ from typing import Any, Callable import uuid from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging from dotenv import load_dotenv import google.auth.transport.requests import google.oauth2.id_token @@ -65,7 +65,10 @@ _DEPLOYMENT_MODE = flags.DEFINE_enum( 'deployment_mode', None, - ['vm', 'distributed', 'local', 'dsub_local', 'docker_local', 'worker_mode'], + [ + 'vm', 'distributed', 'local', 'dsub_local', 'docker_local', + 'worker_mode' + ], ('The procedure to use when training a model to drive applications that ' 'use the Decision API.'), ) @@ -85,8 +88,6 @@ def get_service_id() -> str: global _SERVICE_ID global _SIGHT_SERVICE_KNOWN - - # print('os.environ : ', os.environ) if 'SIGHT_SERVICE_ID' in os.environ: # print('used env flow from get_service_id.....') @@ -104,14 +105,15 @@ def get_service_id() -> str: # logging.info("service id : %s%s", _SERVICE_PREFIX, _SERVICE_ID) return _SERVICE_ID + def get_port_number() -> str: if 'PORT' in os.environ: - return os.environ['PORT'] + return os.environ['PORT'] # need to use secure channel for cloud run server - elif(FLAGS.deployment_mode in ['local', 'vm']): - return '8080' + elif (FLAGS.deployment_mode in ['local', 'vm']): + return '8080' else: - return FLAGS.port + return FLAGS.port def _service_addr() -> str: @@ -145,10 +147,10 @@ def _find_or_deploy_server() -> str: """deploy sight server with given docker image.""" global _SIGHT_SERVICE_KNOWN - if(os.environ.get('SIGHT_SERVICE_ID')): - # print('service found from environment variable : ', get_service_id()) - # logging.info('service found from environment variable') - return get_service_id() + if (os.environ.get('SIGHT_SERVICE_ID')): + # print('service found from environment variable : ', get_service_id()) + # logging.info('service found from environment variable') + return get_service_id() if _SIGHT_SERVICE_KNOWN or (not _SERVICE_DOCKER_FILE.value and not _SERVICE_DOCKER_IMG.value): @@ -454,6 +456,7 @@ def obtain_secure_channel(options=None): ) return channel + def obtain_insecure_channel(options): """create insecure channel to communicate with server. @@ -461,9 +464,9 @@ def obtain_insecure_channel(options): service_handle: to communicate with server """ if 'IP_ADDR' in os.environ: - host = os.environ["IP_ADDR"] + host = os.environ["IP_ADDR"] else: - host = 'localhost' + host = 'localhost' target = '{}:{}'.format(host, get_port_number()) # print("service_url here : ", targpending action ids :et) @@ -479,33 +482,33 @@ def generate_metadata(): channel_opts = [ ('grpc.max_send_message_length', 512 * 1024 * 1024), ('grpc.max_receive_message_length', 512 * 1024 * 1024), - ] + ] - if 'IP_ADDR' in os.environ or ('deployment_mode' in FLAGS and FLAGS.deployment_mode in ['local','vm']): + if 'IP_ADDR' in os.environ or ('deployment_mode' in FLAGS and + FLAGS.deployment_mode in ['local', 'vm']): - channel = obtain_insecure_channel(channel_opts) - sight_service = service_pb2_grpc.SightServiceStub(channel) - metadata = [] - return sight_service, metadata + channel = obtain_insecure_channel(channel_opts) + sight_service = service_pb2_grpc.SightServiceStub(channel) + metadata = [] + return sight_service, metadata # elif 'deployment_mode' == "worker_mode": # return sight_service, metadata else: - #for worker spawned using vm mode, they must be connect via insecure channel - # if(): - - - # for client code, need to find or deploy cloud run service, workers will directly get via env - if 'deployment_mode' in FLAGS and FLAGS.deployment_mode == "distributed": - _find_or_deploy_server() - - secure_channel = obtain_secure_channel() - # print("secure_channel : ", secure_channel) - sight_service = service_pb2_grpc.SightServiceStub(secure_channel) - metadata = [] - id_token = generate_id_token() - # print('id_token : ', id_token) - metadata.append(('authorization', 'Bearer ' + id_token)) - return sight_service, metadata + #for worker spawned using vm mode, they must be connect via insecure channel + # if(): + + # for client code, need to find or deploy cloud run service, workers will directly get via env + if 'deployment_mode' in FLAGS and FLAGS.deployment_mode == "distributed": + _find_or_deploy_server() + + secure_channel = obtain_secure_channel() + # print("secure_channel : ", secure_channel) + sight_service = service_pb2_grpc.SightServiceStub(secure_channel) + metadata = [] + id_token = generate_id_token() + # print('id_token : ', id_token) + metadata.append(('authorization', 'Bearer ' + id_token)) + return sight_service, metadata # def calculate_response_time(start_time): diff --git a/py/sight/sight.py b/py/sight/sight.py index 2c3ba0c..96e5547 100644 --- a/py/sight/sight.py +++ b/py/sight/sight.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Core logging class that provides APIs for creating a structured log.""" from __future__ import annotations @@ -24,7 +23,7 @@ import threading from typing import Any, Optional, Sequence -from absl import logging +from helpers.logs.logs_handler import logger as logging from absl import flags import asyncio import contextvars @@ -46,47 +45,51 @@ from sight.widgets.simulation.simulation_widget_state import SimulationWidgetState load_dotenv() -_PARENT_ID = flags.DEFINE_string( - 'parent_id', None, 'Sight log Id of super script') +_PARENT_ID = flags.DEFINE_string('parent_id', None, + 'Sight log Id of super script') FLAGS = flags.FLAGS current_script_directory = os.path.dirname(os.path.abspath(__file__)) -_SCHEMA_FILE_PATH = os.path.join(current_script_directory, '..', 'avrofile-schema.avsc') +_SCHEMA_FILE_PATH = os.path.join(current_script_directory, '..', + 'avrofile-schema.avsc') + def generate_default_sight_params(): - """Returns a sight object with default parameters. + """Returns a sight object with default parameters. If user has provided values for some of them while initializing, it will be used, otherwise this value are passed. """ - default_prams = sight_pb2.Params( - label='default_sight', - log_owner='bronovetsky@google.com', - local=True, - text_output=False, - capacitor_output=False, - avro_output=True, - log_dir_path='/tmp/', - bucket_name='sight-bucket', - gcp_path='sight-logs', - file_format='.avro', - dataset_name='sight_logs', - external_file_format='AVRO', - external_file_uri='gs://', - ) - return default_prams + default_prams = sight_pb2.Params( + label='default_sight', + log_owner='bronovetsky@google.com', + local=True, + text_output=False, + capacitor_output=False, + avro_output=True, + log_dir_path='/tmp/', + bucket_name='sight-bucket', + gcp_path='sight-logs', + file_format='.avro', + dataset_name='sight_logs', + external_file_format='AVRO', + external_file_uri='gs://', + ) + return default_prams + @dataclasses.dataclass class SightLocationState: - location: Location - line_prefix: str - line_suffix: str - open_block_start_locations: list[Any] - num_direct_contents: Location - num_transitive_contents: Location - active_block_labels: list[Any] + location: Location + line_prefix: str + line_suffix: str + open_block_start_locations: list[Any] + num_direct_contents: Location + num_transitive_contents: Location + active_block_labels: list[Any] + class Sight(object): - """Object that manages writing a Sight log in some structured format. + """Object that manages writing a Sight log in some structured format. Provides an interface for higher-level logging abstractions to be built on top of this base functionality. @@ -144,242 +147,238 @@ class Sight(object): file_name: """ - # The common prefix of source code files that should be removed from emitted - # log when documenting the logging code location. - CODE_FILES_PATH_PREFIX = 'runfiles/google3/' - - # The absolute path of the Sight protodb file. - # PROTODB_PATH = 'google3/googlex/cortex/sight/proto2/sight_proto2db.protodb' - - # The API Key for the BQ Sight service - # SIGHT_API_KEY = 'AKfycbz35qrsrKUmm2FITMsLW9vSbKoBxEYv4EggM_m1Q2H3' #cameltrain - # SIGHT_API_KEY = 'AKfycbw9eY9dk-JstxeAizfMfJZ8qwHm6BVmOZEgBUey-HPL' #catan-(now generalized) - SIGHT_API_KEY = 'AKfycbzU74yRL1Dc0Xu5--oJricaD-H50UgF3FKM_E8_CMP7uNesQEk-k3cm57R3vTsjbWCcxA' - - def __init__( - self, - params: sight_pb2.Params, - configuration: Optional[Sequence[sight_pb2.Object]] = None, - ): - # generating default params to run sight - default_params = generate_default_sight_params() - # replacing fields provided user - default_params.MergeFrom(params) - default_params.label = default_params.label.replace(' ', '_') - self.params = default_params - # print("self.params : ", self.params) - - # Initialize each widget's state to make sure its state field is created. - self.widget_decision_state = defaultdict(dict) - self.widget_simulation_state = SimulationWidgetState() - self.widget_simulation_state = SimulationWidgetState() - # self._configure(configuration) - - - # Configure the tracking state of the Sight object, which records the current location - # in the log of the current task, including its hierarchical nesting. - self.pause_logging_depth = 0 - - self.location = contextvars.ContextVar('location') - self.location.set(Location()) - if 'PARENT_LOG_ID' in os.environ: - self.location.get().exit() - worker_location = (os.environ['worker_location']).split(':') - for loc in worker_location: - self.location.get().enter(loc) - self.location.get().enter(0) - self.index = 1 - - self.line_prefix = contextvars.ContextVar('line_prefix') - self.line_prefix.set('') - self.line_suffix = contextvars.ContextVar('line_suffix') - self.line_suffix.set('') - self.open_block_start_locations = contextvars.ContextVar('line_suffix') - self.open_block_start_locations.set([]) - self.num_direct_contents = contextvars.ContextVar('num_direct_contents') - self.num_direct_contents.set(Location()) - self.num_transitive_contents = contextvars.ContextVar('num_transitive_contents') - self.num_transitive_contents.set(Location()) - self.active_block_labels = contextvars.ContextVar('active_block_labels') - self.active_block_labels.set([]) - - self.attributes = {} - self.open = True - - self.id = 0 - self.set_attribute('log_uid', str(self.id)) - - if self.params.silent_logger: - return - - # The path prefix common to all the file(s) that hold the log. - self.path_prefix = '' - path_label = 'log' - if self.params.label: - path_label = self.params.label - - if self.params.in_memory: - self.path_prefix = '' - self.id = 0 - self.in_memory_log = [] - self.text_log = None - self.capacitor_log = None - self.avro_log = None - self.avro_schema = None - self.avro_record_counter = 0 - self.avro_file_counter = 0 - self.file_name = self.params.label - else: - if self.params.local: - self.path_prefix = '%s/%s' % (self.params.log_dir_path, path_label) + # The common prefix of source code files that should be removed from emitted + # log when documenting the logging code location. + CODE_FILES_PATH_PREFIX = 'runfiles/google3/' + + # The absolute path of the Sight protodb file. + # PROTODB_PATH = 'google3/googlex/cortex/sight/proto2/sight_proto2db.protodb' + + # The API Key for the BQ Sight service + # SIGHT_API_KEY = 'AKfycbz35qrsrKUmm2FITMsLW9vSbKoBxEYv4EggM_m1Q2H3' #cameltrain + # SIGHT_API_KEY = 'AKfycbw9eY9dk-JstxeAizfMfJZ8qwHm6BVmOZEgBUey-HPL' #catan-(now generalized) + SIGHT_API_KEY = 'AKfycbzU74yRL1Dc0Xu5--oJricaD-H50UgF3FKM_E8_CMP7uNesQEk-k3cm57R3vTsjbWCcxA' + + def __init__( + self, + params: sight_pb2.Params, + configuration: Optional[Sequence[sight_pb2.Object]] = None, + ): + # generating default params to run sight + default_params = generate_default_sight_params() + # replacing fields provided user + default_params.MergeFrom(params) + default_params.label = default_params.label.replace(' ', '_') + self.params = default_params + # print("self.params : ", self.params) + + # Initialize each widget's state to make sure its state field is created. + self.widget_decision_state = defaultdict(dict) + self.widget_simulation_state = SimulationWidgetState() + self.widget_simulation_state = SimulationWidgetState() + # self._configure(configuration) + + # Configure the tracking state of the Sight object, which records the current location + # in the log of the current task, including its hierarchical nesting. + self.pause_logging_depth = 0 + + self.location = contextvars.ContextVar('location') + self.location.set(Location()) + if 'PARENT_LOG_ID' in os.environ: + self.location.get().exit() + worker_location = (os.environ['worker_location']).split(':') + for loc in worker_location: + self.location.get().enter(loc) + self.location.get().enter(0) + self.index = 1 + + self.line_prefix = contextvars.ContextVar('line_prefix') + self.line_prefix.set('') + self.line_suffix = contextvars.ContextVar('line_suffix') + self.line_suffix.set('') + self.open_block_start_locations = contextvars.ContextVar('line_suffix') + self.open_block_start_locations.set([]) + self.num_direct_contents = contextvars.ContextVar( + 'num_direct_contents') + self.num_direct_contents.set(Location()) + self.num_transitive_contents = contextvars.ContextVar( + 'num_transitive_contents') + self.num_transitive_contents.set(Location()) + self.active_block_labels = contextvars.ContextVar( + 'active_block_labels') + self.active_block_labels.set([]) + + self.attributes = {} + self.open = True + self.id = 0 + self.set_attribute('log_uid', str(self.id)) + + if self.params.silent_logger: + return + + # The path prefix common to all the file(s) that hold the log. + self.path_prefix = '' + path_label = 'log' + if self.params.label: + path_label = self.params.label + + if self.params.in_memory: + self.path_prefix = '' + self.id = 0 + self.in_memory_log = [] + self.text_log = None + self.capacitor_log = None + self.avro_log = None + self.avro_schema = None + self.avro_record_counter = 0 + self.avro_file_counter = 0 + self.file_name = self.params.label + else: + if self.params.local: + self.path_prefix = '%s/%s' % (self.params.log_dir_path, + path_label) + self.id = 0 + + # Added : opening Avro file + + if self.params.avro_output: + # logging.info('#######SERVICE###############') + + try: + + if 'PARENT_LOG_ID' in os.environ: + logging.info('PARENT_LOG_ID found - worker process') + worker_location = os.environ[ + 'worker_location'].replace(':', '_') + self.path_prefix = (self.params.label + '_' + + os.environ['PARENT_LOG_ID'] + '_' + + 'worker' + '_' + worker_location + + '_' + 'log') + self.id = os.environ['PARENT_LOG_ID'] + print("log id is : ", self.id) + elif (FLAGS.sight_log_id): + logging.info('Using provided sight id') + self.id = FLAGS.sight_log_id + self.path_prefix = (self.params.label + '_' + self.id + + '_' + 'log' + '_run_mode') + else: + # logging.info('calling generate metadata') + req = service_pb2.CreateRequest( + # log_owner=self.params.log_owner, + # label=self.params.label, + # log_dir_path=self.params.log_dir_path, + # format='LF_AVRO', + ) + response = service.call( + lambda s, meta: s.Create(req, 300, metadata=meta)) + logging.info('##### response=%s #####', response) + self.id = response.id + # logging.info('PARENT_LOG_ID not found - parent process') + self.path_prefix = (self.params.label + '_' + + str(response.id) + '_' + 'log') + + except Exception as e: + logging.info('RPC ERROR: %s', e) + if not self.params.log_dir_path: + self.params.log_dir_path = '/tmp/' + self.path_prefix = '%s/%s' % (self.params.log_dir_path, + path_label) + logging.exception( + 'Logging only locally to %s due to: error %s ', + self.path_prefix, + e, + ) + self.params.local = True + + self.avro_log_file_path = ( + self.params.label + '_' + str(self.id) + '/' + + self.path_prefix + # 'client_' + str(self.id) + '/' + self.path_prefix + ) + self.file_name = self.avro_log_file_path.split('/')[-1] + # self.table_name = self.params.label + '_' + str(self.id) + '_' + 'log' + self.table_name = str(self.id) + '_' + 'log' + + if 'SIGHT_PATH' in os.environ: + self.avro_schema = load_schema( + f'{os.environ["SIGHT_PATH"]}/../avrofile-schema.avsc') + else: + # print('avro-schema path is : ', _SCHEMA_FILE_PATH) + self.avro_schema = load_schema(_SCHEMA_FILE_PATH) + self.avro_log = io.BytesIO() + self.avro_record_counter = 0 + self.avro_file_counter = 0 + + if self.params.text_output: + self.text_log_file_path = self.path_prefix + '.txt' + self.text_log = open(self.text_log_file_path, 'w') + else: + self.text_log = None + + # if build_data.Changelist(): + # self.change_list_number = int(build_data.Changelist()) + # self.set_attribute('change_list_number', str(self.change_list_number)) + # if build_data.CitcSnapshot(): + # self.citc_snapshot = int(build_data.CitcSnapshot()) + # self.set_attribute('citc_snapshot', str(self.citc_snapshot)) + + def get_location_state(self) -> SightLocationState: + return SightLocationState( + self.location.get().clone(), + self.line_prefix.get(), + self.line_suffix.get(), + self.open_block_start_locations.get().copy(), + self.num_direct_contents.get().clone(), + self.num_transitive_contents.get().clone(), + self.active_block_labels.get().copy(), + ) - # Added : opening Avro file - - if self.params.avro_output: - # logging.info('#######SERVICE###############') - - try: - - if 'PARENT_LOG_ID' in os.environ: - logging.info('PARENT_LOG_ID found - worker process') - worker_location = os.environ['worker_location'].replace(':', '_') - self.path_prefix = ( - self.params.label - + '_' - + os.environ['PARENT_LOG_ID'] - + '_' - + 'worker' - + '_' - + worker_location - + '_' - + 'log' - ) - self.id = os.environ['PARENT_LOG_ID'] - print("log id is : ", self.id) - elif (FLAGS.sight_log_id): - logging.info('Using provided sight id') - self.id = FLAGS.sight_log_id - self.path_prefix = ( - self.params.label + '_' + self.id + '_' + 'log' + '_run_mode' - ) - else: - # logging.info('calling generate metadata') - req = service_pb2.CreateRequest( - # log_owner=self.params.log_owner, - # label=self.params.label, - # log_dir_path=self.params.log_dir_path, - # format='LF_AVRO', - ) - response = service.call( - lambda s, meta: s.Create(req, 300, metadata=meta) - ) - logging.info('##### response=%s #####', response) - self.id = response.id - # logging.info('PARENT_LOG_ID not found - parent process') - self.path_prefix = ( - self.params.label + '_' + str(response.id) + '_' + 'log' - ) + def set_location_state(self, state: SightLocationState) -> None: + self.location.set(state.location) + self.line_prefix.set(state.line_prefix) + self.line_suffix.set(state.line_suffix) + self.open_block_start_locations.set(state.open_block_start_locations) + self.num_direct_contents.set(state.num_direct_contents) + self.num_transitive_contents.set(state.num_transitive_contents) + self.active_block_labels.set(state.active_block_labels) - except Exception as e: - logging.info('RPC ERROR: %s', e) - if not self.params.log_dir_path: - self.params.log_dir_path = '/tmp/' - self.path_prefix = '%s/%s' % (self.params.log_dir_path, path_label) - logging.exception( - 'Logging only locally to %s due to: error %s ', - self.path_prefix, - e, - ) - self.params.local = True - - self.avro_log_file_path = ( - self.params.label - + '_' - + str(self.id) - + '/' - + self.path_prefix - # 'client_' + str(self.id) + '/' + self.path_prefix - ) - self.file_name = self.avro_log_file_path.split('/')[-1] - # self.table_name = self.params.label + '_' + str(self.id) + '_' + 'log' - self.table_name = str(self.id) + '_' + 'log' - - if 'SIGHT_PATH' in os.environ: - self.avro_schema = load_schema( - f'{os.environ["SIGHT_PATH"]}/../avrofile-schema.avsc' - ) - else: - # print('avro-schema path is : ', _SCHEMA_FILE_PATH) - self.avro_schema = load_schema(_SCHEMA_FILE_PATH) - self.avro_log = io.BytesIO() - self.avro_record_counter = 0 - self.avro_file_counter = 0 - - if self.params.text_output: - self.text_log_file_path = self.path_prefix + '.txt' - self.text_log = open(self.text_log_file_path, 'w') - else: - self.text_log = None - - # if build_data.Changelist(): - # self.change_list_number = int(build_data.Changelist()) - # self.set_attribute('change_list_number', str(self.change_list_number)) - # if build_data.CitcSnapshot(): - # self.citc_snapshot = int(build_data.CitcSnapshot()) - # self.set_attribute('citc_snapshot', str(self.citc_snapshot)) - - def get_location_state(self) -> SightLocationState: - return SightLocationState( - self.location.get().clone(), - self.line_prefix.get(), - self.line_suffix.get(), - self.open_block_start_locations.get().copy(), - self.num_direct_contents.get().clone(), - self.num_transitive_contents.get().clone(), - self.active_block_labels.get().copy(), - ) - - def set_location_state(self, state: SightLocationState) -> None: - self.location.set(state.location) - self.line_prefix.set(state.line_prefix) - self.line_suffix.set(state.line_suffix) - self.open_block_start_locations.set(state.open_block_start_locations) - self.num_direct_contents.set(state.num_direct_contents) - self.num_transitive_contents.set(state.num_transitive_contents) - self.active_block_labels.set(state.active_block_labels) - - - def create_task(self, func): - frame = inspect.currentframe().f_back - async def go(func, state): - # self.location.set(temp_location) - self.set_location_state(state) - # label = f'id={task_id}' - # print('%s/%s: outside self.location=%s/%s' % (task_id, asyncio.current_task().get_name(), self.location.get(), id(self.location.get()))) - # self.enter_block(label, sight_pb2.Object(), frame) - # print('%s/%s: inside self.location=%s/%s' % (task_id, asyncio.current_task().get_name(), self.location.get(), id(self.location.get()))) - return await func - # self.exit_block(label, sight_pb2.Object(), frame) - - self.enter_block(f'asyncio.create_task: {asyncio.current_task().get_name()}', sight_pb2.Object(), frame) - state = self.get_location_state() #self.location.get().clone() - # print('%s/%s: temp_location=%s=%s' % (task_id, asyncio.current_task().get_name(), state, id(state))) - - new_task = asyncio.create_task(go(func, state))#, name=f'task_{task_id}') - self.exit_block(f'asyncio.create_task: {asyncio.current_task().get_name()}', sight_pb2.Object(), frame) - return new_task - - @classmethod - def silent(cls) -> Sight: - return Sight(sight_pb2.Params(silent_logger=True)) - - def new( - self, - params: sight_pb2.Params, - configuration: Optional[Sequence[sight_pb2.Object]] = None, - ) -> Sight: - """Returns a new instance of Sight. + def create_task(self, func): + frame = inspect.currentframe().f_back + + async def go(func, state): + # self.location.set(temp_location) + self.set_location_state(state) + # label = f'id={task_id}' + # print('%s/%s: outside self.location=%s/%s' % (task_id, asyncio.current_task().get_name(), self.location.get(), id(self.location.get()))) + # self.enter_block(label, sight_pb2.Object(), frame) + # print('%s/%s: inside self.location=%s/%s' % (task_id, asyncio.current_task().get_name(), self.location.get(), id(self.location.get()))) + return await func + # self.exit_block(label, sight_pb2.Object(), frame) + + self.enter_block( + f'asyncio.create_task: {asyncio.current_task().get_name()}', + sight_pb2.Object(), frame) + state = self.get_location_state() #self.location.get().clone() + # print('%s/%s: temp_location=%s=%s' % (task_id, asyncio.current_task().get_name(), state, id(state))) + + new_task = asyncio.create_task(go(func, + state)) #, name=f'task_{task_id}') + self.exit_block( + f'asyncio.create_task: {asyncio.current_task().get_name()}', + sight_pb2.Object(), frame) + return new_task + + @classmethod + def silent(cls) -> Sight: + return Sight(sight_pb2.Params(silent_logger=True)) + + def new( + self, + params: sight_pb2.Params, + configuration: Optional[Sequence[sight_pb2.Object]] = None, + ) -> Sight: + """Returns a new instance of Sight. This method is useful for creating new Sight logger objects in cases where it is not feasible to import Sight (due to circular import dependencies) @@ -390,130 +389,127 @@ def new( params: Primary configuration parameters of the logger. configuration: Sight log that contains additional configuration details. """ - return Sight(params, configuration) - - def __enter__(self): - return self - - def __exit__(self, exc_type, value, traceback): - # last rpc call to server for this sight id - req = service_pb2.CloseRequest() - req.client_id = str(self.id) - response = service.call( - lambda s, meta: s.Close(req, 300, metadata=meta) - ) - # print("close rpc status :", response.response_str) - - if self.params.silent_logger: - self.close() - if exc_type is not None: - # pytype: disable=attribute-error - exception(exc_type, value, traceback, self, inspect.currentframe().f_back) - # pytype: enable=attribute-error - self.close() - - def __del__(self): - self.close() - - def close(self): - """Closes this logger. Finalizes all log files so are ready for use.""" - if self.params.silent_logger: - return - - if not self.open: - return - - if hasattr(self, 'citc_snapshot'): - self.unset_attribute('citc_snapshot') - if hasattr(self, 'change_list_number'): - self.unset_attribute('change_list_number') - - if self.text_log: - self.text_log.close() - - if self.avro_log: - if self.avro_log.getbuffer().nbytes > 0: - self.avro_file_counter += 1 - upload_blob_from_stream( - self.params.bucket_name, - self.params.gcp_path, - self.avro_log, - self.avro_log_file_path, - self.avro_file_counter, - ) - # if this is the only avro file, table has not been created yet - if self.avro_file_counter == 1: - create_external_bq_table(self.params, self.table_name, self.id) - logging.info( - 'Log GUI : https://script.google.com/a/google.com/macros/s/%s/exec?' - 'log_id=%s.%s&log_owner=%s&project_id=%s', - self.SIGHT_API_KEY, - self.params.dataset_name, - self.table_name, - self.params.log_owner, - os.environ['PROJECT_ID'] - ) - print(f'table generated : {self.params.dataset_name}.{self.table_name}') - self.avro_log.close() - - if not self.params.local and not self.params.in_memory: - logging.info( - ( - #'Log : https://script.google.com/a/google.com/macros/s/%s/exec?' - 'Log : https://script.google.com/a/google.com/macros/s/%s/dev?' - 'log_id=%s.%s&log_owner=%s&project_id=%s', - ), - self.SIGHT_API_KEY, - self.params.dataset_name, - self.table_name, - self.params.log_owner, - os.environ['PROJECT_ID'] - ) + return Sight(params, configuration) + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + # last rpc call to server for this sight id + req = service_pb2.CloseRequest() + req.client_id = str(self.id) + response = service.call( + lambda s, meta: s.Close(req, 300, metadata=meta)) + # print("close rpc status :", response.response_str) + + if self.params.silent_logger: + self.close() + if exc_type is not None: + # pytype: disable=attribute-error + exception(exc_type, value, traceback, self, + inspect.currentframe().f_back) + # pytype: enable=attribute-error + self.close() + + def __del__(self): + self.close() + + def close(self): + """Closes this logger. Finalizes all log files so are ready for use.""" + if self.params.silent_logger: + return + + if not self.open: + return + + if hasattr(self, 'citc_snapshot'): + self.unset_attribute('citc_snapshot') + if hasattr(self, 'change_list_number'): + self.unset_attribute('change_list_number') + + if self.text_log: + self.text_log.close() + + if self.avro_log: + if self.avro_log.getbuffer().nbytes > 0: + self.avro_file_counter += 1 + upload_blob_from_stream( + self.params.bucket_name, + self.params.gcp_path, + self.avro_log, + self.avro_log_file_path, + self.avro_file_counter, + ) + # if this is the only avro file, table has not been created yet + if self.avro_file_counter == 1: + create_external_bq_table(self.params, self.table_name, + self.id) + logging.info( + 'Log GUI : https://script.google.com/a/google.com/macros/s/%s/exec?' + 'log_id=%s.%s&log_owner=%s&project_id=%s', + self.SIGHT_API_KEY, self.params.dataset_name, + self.table_name, self.params.log_owner, + os.environ['PROJECT_ID']) + print( + f'table generated : {self.params.dataset_name}.{self.table_name}' + ) + self.avro_log.close() + + if not self.params.local and not self.params.in_memory: + logging.info( + ( + #'Log : https://script.google.com/a/google.com/macros/s/%s/exec?' + 'Log : https://script.google.com/a/google.com/macros/s/%s/dev?' + 'log_id=%s.%s&log_owner=%s&project_id=%s', ), + self.SIGHT_API_KEY, + self.params.dataset_name, + self.table_name, + self.params.log_owner, + os.environ['PROJECT_ID']) - if(FLAGS.decision_mode == 'train'): - decision.finalize(self) - finalize_server() - self.open = False + if (FLAGS.decision_mode == 'train'): + decision.finalize(self) + finalize_server() + self.open = False - def pause_logging(self) -> None: - self.pause_logging_depth += 1 + def pause_logging(self) -> None: + self.pause_logging_depth += 1 - def resume_logging(self) -> None: - self.pause_logging_depth -= 1 + def resume_logging(self) -> None: + self.pause_logging_depth -= 1 - def is_logging_enabled(self) -> bool: - return not self.params.silent_logger and self.pause_logging_depth <= 1 + def is_logging_enabled(self) -> bool: + return not self.params.silent_logger and self.pause_logging_depth <= 1 - def get_in_memory_log(self) -> sight_pb2.Log: - """Returns a proto that contains the full Sight in-memory log.""" - log = sight_pb2.Log() + def get_in_memory_log(self) -> sight_pb2.Log: + """Returns a proto that contains the full Sight in-memory log.""" + log = sight_pb2.Log() - if self.in_memory_log: - log.obj.extend(self.in_memory_log) + if self.in_memory_log: + log.obj.extend(self.in_memory_log) - return log + return log - def set_object_code_loc(self, obj: sight_pb2.Object, frame: Any) -> None: - """Updates obj with the calling context information in frame. + def set_object_code_loc(self, obj: sight_pb2.Object, frame: Any) -> None: + """Updates obj with the calling context information in frame. Args: obj: The object to be updated frame: The call stack frame that contains the calling context information. """ - frameinfo = inspect.getframeinfo(frame) - google3_loc = frameinfo.filename.find(self.CODE_FILES_PATH_PREFIX) - if google3_loc >= 0: - obj.file = frameinfo.filename[ - google3_loc + len(self.CODE_FILES_PATH_PREFIX) : - ] - else: - obj.file = frameinfo.filename - obj.line = frameinfo.lineno - obj.func = frameinfo.function + frameinfo = inspect.getframeinfo(frame) + google3_loc = frameinfo.filename.find(self.CODE_FILES_PATH_PREFIX) + if google3_loc >= 0: + obj.file = frameinfo.filename[google3_loc + + len(self.CODE_FILES_PATH_PREFIX):] + else: + obj.file = frameinfo.filename + obj.line = frameinfo.lineno + obj.func = frameinfo.function - def text(self, text_val: str, end='\n', frame=None) -> str: - """Logs a text value to the Sight log. + def text(self, text_val: str, end='\n', frame=None) -> str: + """Logs a text value to the Sight log. Args: text_val: The text value to be logged. @@ -524,39 +520,43 @@ def text(self, text_val: str, end='\n', frame=None) -> str: Returns: The logged text. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return '' + if self.params.silent_logger or self.pause_logging_depth > 0: + return '' + + obj = sight_pb2.Object() + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + self.set_object_code_loc(obj, frame) + + if self.is_binary_logged(): + obj.sub_type = sight_pb2.Object.SubType.ST_TEXT + obj.text.text = text_val.replace('\n', '\\n') + end + obj.text.sub_type = sight_pb2.Text.SubType.ST_TEXT + self.log_object(obj, True) + + if end == '\n': + full_text_line = '(%s:%d) function : %s\n %s\n' % ( + obj.file, + obj.line, + obj.func, + # self.line_prefix, + text_val, + # self.line_suffix, + ) + else: + full_text_line = text_val + end + self.emit_text_to_file(full_text_line) - obj = sight_pb2.Object() - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - self.set_object_code_loc(obj, frame) - - if self.is_binary_logged(): - obj.sub_type = sight_pb2.Object.SubType.ST_TEXT - obj.text.text = text_val.replace('\n', '\\n') + end - obj.text.sub_type = sight_pb2.Text.SubType.ST_TEXT - self.log_object(obj, True) - - if end == '\n': - full_text_line = '(%s:%d) function : %s\n %s\n' % ( - obj.file, - obj.line, - obj.func, - # self.line_prefix, - text_val, - # self.line_suffix, - ) - else: - full_text_line = text_val + end - self.emit_text_to_file(full_text_line) - - return full_text_line - - def text_block(self, label: str, text_val: str, end='\n', frame=None) -> str: - """Logs a block that contains a specified text string as its contents. + return full_text_line + + def text_block(self, + label: str, + text_val: str, + end='\n', + frame=None) -> str: + """Logs a block that contains a specified text string as its contents. Args: label: The label of the block. @@ -568,37 +568,38 @@ def text_block(self, label: str, text_val: str, end='\n', frame=None) -> str: Returns: The logged text. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return '' + if self.params.silent_logger or self.pause_logging_depth > 0: + return '' - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - self.enter_block(label, sight_pb2.Object(), frame) - ret_val = self.text(text_val, end, frame) - self.exit_block(label, sight_pb2.Object(), frame) + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + self.enter_block(label, sight_pb2.Object(), frame) + ret_val = self.text(text_val, end, frame) + self.exit_block(label, sight_pb2.Object(), frame) - return ret_val + return ret_val - def gap(self) -> Optional[Location]: - """Logs a dummy gap value value to the Sight log. + def gap(self) -> Optional[Location]: + """Logs a dummy gap value value to the Sight log. Returns: The location of the dummy object in the log. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return None + if self.params.silent_logger or self.pause_logging_depth > 0: + return None - if self.is_binary_logged(): - return self.log_object( - sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_GAP), True - ) + if self.is_binary_logged(): + return self.log_object( + sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_GAP), + True) - def enter_block( - self, label: str, obj: sight_pb2.Object, frame: Optional[Any] = None - ) -> Optional[Location]: - """Documents in the Sight log that a hierarchical block was entered. + def enter_block(self, + label: str, + obj: sight_pb2.Object, + frame: Optional[Any] = None) -> Optional[Location]: + """Documents in the Sight log that a hierarchical block was entered. Args: label: The label of the block. @@ -611,44 +612,48 @@ def enter_block( Returns: The log Location of the block's starting point. """ - if self.params.silent_logger: - return None + if self.params.silent_logger: + return None - if self.pause_logging_depth > 0: - return self.location.get() + if self.pause_logging_depth > 0: + return self.location.get() - self.active_block_labels.get().append(label) - # self.emit_text_to_file( - # self.line_prefix + label + '<<<' + self.line_suffix + '\n' - # ) - self.emit_text_to_file(self.line_prefix.get() + label + '\n' + '>>> ' + '\n') - self.line_prefix.set(self.line_prefix.get() + label + ': ') + self.active_block_labels.get().append(label) + # self.emit_text_to_file( + # self.line_prefix + label + '<<<' + self.line_suffix + '\n' + # ) + self.emit_text_to_file(self.line_prefix.get() + label + '\n' + '>>> ' + + '\n') + self.line_prefix.set(self.line_prefix.get() + label + ': ') - obj_location = self.location.get() - if self.is_binary_logged(): - obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START + obj_location = self.location.get() + if self.is_binary_logged(): + obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START - if obj.block_start is None: - obj.block_start = sight_pb2.BlockStart() - obj.block_start.label = label + if obj.block_start is None: + obj.block_start = sight_pb2.BlockStart() + obj.block_start.label = label - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - self.set_object_code_loc(obj, frame) + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + self.set_object_code_loc(obj, frame) - self.log_object(obj, False) - self.open_block_start_locations.get().append(obj.location) + self.log_object(obj, False) + self.open_block_start_locations.get().append(obj.location) - self.num_direct_contents.get().enter(0) - self.num_transitive_contents.get().enter(0) - self.location.get().enter(0) + self.num_direct_contents.get().enter(0) + self.num_transitive_contents.get().enter(0) + self.location.get().enter(0) - return obj_location + return obj_location - def exit_block(self, label: str, obj: sight_pb2.Object, frame=None) -> None: - """Documents in the Sight log that a hierarchical block was exited. + def exit_block(self, + label: str, + obj: sight_pb2.Object, + frame=None) -> None: + """Documents in the Sight log that a hierarchical block was exited. Args: label: the label of the block. @@ -658,62 +663,61 @@ def exit_block(self, label: str, obj: sight_pb2.Object, frame=None) -> None: frame: the call stack frame that the calling context from which the logging event was created. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return - - if not self.active_block_labels.get() or self.location.get().size() == 1: - logging.warning('Exiting inactive Sight block "%s"', label) - return - - self.active_block_labels.get().pop() - self.line_prefix.set('') - for block_label in self.active_block_labels.get(): - self.line_prefix.set(self.line_prefix.get() + block_label + ': ') - - self.location.get().exit() - self.location.get().next() - - if self.is_binary_logged(): - if not self.open_block_start_locations.get(): - logging.warning('Exiting inactive Sight block "%s"', label) - - obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_END - if obj.block_end is None: - obj.block_end = sight_pb2.BlockEnd() - obj.block_end.label = label - obj.block_end.num_direct_contents = self.num_direct_contents.get().pos() - obj.block_end.num_transitive_contents = self.num_transitive_contents.get().pos() - obj.block_end.location_of_block_start = self.open_block_start_locations.get()[ - -1 - ] - self.open_block_start_locations.get().pop() - - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - self.set_object_code_loc(obj, frame) - - self.log_object(obj, True) - - self.emit_text_to_file( - # self.line_prefix + label + '>>>' + self.line_suffix + '\n' - '<<< ' - + '\n' - ) - - self.num_direct_contents.get().exit() - self.num_transitive_contents.get().exit() - - def _update_line_suffix(self) -> None: - # Each value in self.attributes is non-empty since empty values are removed - # in unset_attribute. - self.line_suffix.set('| ' + ','.join( - [f'{key}={value[-1]}' for key, value in self.attributes.items()] - )) - - def set_attribute(self, key: str, value: str) -> None: - """Documents in the Sight log a new key-value attribute mapping. + if self.params.silent_logger or self.pause_logging_depth > 0: + return + + if not self.active_block_labels.get() or self.location.get().size( + ) == 1: + logging.warning('Exiting inactive Sight block "%s"', label) + return + + self.active_block_labels.get().pop() + self.line_prefix.set('') + for block_label in self.active_block_labels.get(): + self.line_prefix.set(self.line_prefix.get() + block_label + ': ') + + self.location.get().exit() + self.location.get().next() + + if self.is_binary_logged(): + if not self.open_block_start_locations.get(): + logging.warning('Exiting inactive Sight block "%s"', label) + + obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_END + if obj.block_end is None: + obj.block_end = sight_pb2.BlockEnd() + obj.block_end.label = label + obj.block_end.num_direct_contents = self.num_direct_contents.get( + ).pos() + obj.block_end.num_transitive_contents = self.num_transitive_contents.get( + ).pos() + obj.block_end.location_of_block_start = self.open_block_start_locations.get( + )[-1] + self.open_block_start_locations.get().pop() + + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + self.set_object_code_loc(obj, frame) + + self.log_object(obj, True) + + self.emit_text_to_file( + # self.line_prefix + label + '>>>' + self.line_suffix + '\n' + '<<< ' + '\n') + + self.num_direct_contents.get().exit() + self.num_transitive_contents.get().exit() + + def _update_line_suffix(self) -> None: + # Each value in self.attributes is non-empty since empty values are removed + # in unset_attribute. + self.line_suffix.set('| ' + ','.join( + [f'{key}={value[-1]}' for key, value in self.attributes.items()])) + + def set_attribute(self, key: str, value: str) -> None: + """Documents in the Sight log a new key-value attribute mapping. Until the mapping is unset all logged objects will be annotated with this key/value pair. @@ -722,11 +726,11 @@ def set_attribute(self, key: str, value: str) -> None: key: the name of the key being set. value: the value assigned to key. """ - self.attributes.setdefault(key, []).append(value) - self._update_line_suffix() + self.attributes.setdefault(key, []).append(value) + self._update_line_suffix() - def unset_attribute(self, key: str) -> None: - """Removes from the Sight log a new key-value attribute mapping. + def unset_attribute(self, key: str) -> None: + """Removes from the Sight log a new key-value attribute mapping. Subsequent logged logged objects will no longer be annotated with this key/value pair. If the key had a value mapped to it before the value @@ -736,29 +740,30 @@ def unset_attribute(self, key: str) -> None: Args: key: the name of the key being unset. """ - values = self.attributes.get(key) - if not values: - logging.error('Failed to unset attribute %s, which is not set.', key) - return + values = self.attributes.get(key) + if not values: + logging.error('Failed to unset attribute %s, which is not set.', + key) + return - values.pop() - if not values: - del self.attributes[key] + values.pop() + if not values: + del self.attributes[key] - self._update_line_suffix() + self._update_line_suffix() - def fetch_attributes(self) -> dict[str,str]: - """Fetches all the values of attributes that is currently set to within Sight. + def fetch_attributes(self) -> dict[str, str]: + """Fetches all the values of attributes that is currently set to within Sight. Returns: The dictionary that contains key-value pairs of attributes currently set to. """ - attr_dict = {} - for k,v in self.attributes.items(): - attr_dict[k] = v[-1] - return attr_dict + attr_dict = {} + for k, v in self.attributes.items(): + attr_dict[k] = v[-1] + return attr_dict - def get_attribute(self, key: str) -> str: - """Fetches the value that a key is currently set to within Sight. + def get_attribute(self, key: str) -> str: + """Fetches the value that a key is currently set to within Sight. Args: key: the name of the key being fetched. @@ -766,15 +771,15 @@ def get_attribute(self, key: str) -> str: Returns: The value that key is currently set to. """ - values = self.attributes.get(key) - if not values: - return '' - return values[-1] + values = self.attributes.get(key) + if not values: + return '' + return values[-1] - def log_object( - self, obj: sight_pb2.Object, advance_location: bool = True - ) -> Optional[Location]: - """Emits a single object to the Sight log. + def log_object(self, + obj: sight_pb2.Object, + advance_location: bool = True) -> Optional[Location]: + """Emits a single object to the Sight log. Args: obj: A Sight object where log event is to be recorded. This object may @@ -785,139 +790,138 @@ def log_object( Returns: The Location of the logged object. """ - if self.params.silent_logger: - return None - - if self.pause_logging_depth > 0: - return self.location.get() - - if not self.num_direct_contents.get().is_empty(): - self.num_direct_contents.get().next() - self.num_transitive_contents.get().next_all() - - obj_location = self.location.get() - if self.is_binary_logged(): - obj.location = str(self.location.get()) - obj.index = self.index - self.index += 1 - - for key, value in self.attributes.items(): - if not value: - logging.warning('No attributes recorded for key %s', key) - continue - - attr = obj.attribute.add() - attr.key = key - attr.value = str(value[-1]) - - for loc in self.open_block_start_locations.get(): - obj.ancestor_start_location.append(str(loc)) - obj.ancestor_start_location.append(str(self.location.get())) - - obj.order.timestamp_ns = time.time_ns() - - if self.params.in_memory: - self.in_memory_log.append(obj) - elif self.avro_log: - dict_obj = MessageToDict(obj, preserving_proto_field_name=True) - fastavro.writer(self.avro_log, self.avro_schema, [dict_obj]) - self.avro_record_counter += 1 - if self.avro_record_counter % 1000 == 0: - self.avro_file_counter += 1 - upload_blob_from_stream( - self.params.bucket_name, - self.params.gcp_path, - self.avro_log, - self.avro_log_file_path, - self.avro_file_counter, - ) - if self.avro_file_counter == 1: - create_external_bq_table(self.params, self.table_name, self.id) - logging.info( - 'Log GUI : https://script.google.com/a/google.com/macros/s/%s/exec?' - 'log_id=%s.%s&log_owner=%s&project_id=%s', - self.SIGHT_API_KEY, - self.params.dataset_name, - self.table_name, - self.params.log_owner, - os.environ['PROJECT_ID'] - ) - print(f'table generated : {self.params.dataset_name}.{self.table_name}') - self.avro_log.close() - self.avro_log = io.BytesIO() - - if advance_location: - self.location.get().next() - - return obj_location - - def emit_text_to_file(self, text_val: str) -> None: - """Emits text to the output text file, if one is being used. + if self.params.silent_logger: + return None + + if self.pause_logging_depth > 0: + return self.location.get() + + if not self.num_direct_contents.get().is_empty(): + self.num_direct_contents.get().next() + self.num_transitive_contents.get().next_all() + + obj_location = self.location.get() + if self.is_binary_logged(): + obj.location = str(self.location.get()) + obj.index = self.index + self.index += 1 + + for key, value in self.attributes.items(): + if not value: + logging.warning('No attributes recorded for key %s', key) + continue + + attr = obj.attribute.add() + attr.key = key + attr.value = str(value[-1]) + + for loc in self.open_block_start_locations.get(): + obj.ancestor_start_location.append(str(loc)) + obj.ancestor_start_location.append(str(self.location.get())) + + obj.order.timestamp_ns = time.time_ns() + + if self.params.in_memory: + self.in_memory_log.append(obj) + elif self.avro_log: + dict_obj = MessageToDict(obj, preserving_proto_field_name=True) + fastavro.writer(self.avro_log, self.avro_schema, [dict_obj]) + self.avro_record_counter += 1 + if self.avro_record_counter % 1000 == 0: + self.avro_file_counter += 1 + upload_blob_from_stream( + self.params.bucket_name, + self.params.gcp_path, + self.avro_log, + self.avro_log_file_path, + self.avro_file_counter, + ) + if self.avro_file_counter == 1: + create_external_bq_table(self.params, self.table_name, + self.id) + logging.info( + 'Log GUI : https://script.google.com/a/google.com/macros/s/%s/exec?' + 'log_id=%s.%s&log_owner=%s&project_id=%s', + self.SIGHT_API_KEY, self.params.dataset_name, + self.table_name, self.params.log_owner, + os.environ['PROJECT_ID']) + print( + f'table generated : {self.params.dataset_name}.{self.table_name}' + ) + self.avro_log.close() + self.avro_log = io.BytesIO() + + if advance_location: + self.location.get().next() + + return obj_location + + def emit_text_to_file(self, text_val: str) -> None: + """Emits text to the output text file, if one is being used. Args: text_val: The text to be logged. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return + if self.params.silent_logger or self.pause_logging_depth > 0: + return - if self.text_log: - self.text_log.write(text_val) - # logging.info(text_val) + if self.text_log: + self.text_log.write(text_val) + # logging.info(text_val) - def is_binary_logged(self) -> bool: - """Returns whether a binary proto representation is being logged.""" - # return self.params.capacitor_output - return self.params.avro_output + def is_binary_logged(self) -> bool: + """Returns whether a binary proto representation is being logged.""" + # return self.params.capacitor_output + return self.params.avro_output - def _configure(self, configuration: Sequence[sight_pb2.Object]) -> None: - """Initializes the configuration of this logger and widgets. + def _configure(self, configuration: Sequence[sight_pb2.Object]) -> None: + """Initializes the configuration of this logger and widgets. Args: configuration: Sight log that stores configuration log objects. """ - if not configuration: - decision.configure(None, self.widget_decision_state) - return - - self.add_config(configuration) - - # def add_config(self, configuration: Sequence[sight_pb2.Object]) -> None: - # """Augments the configuration of this logger from an in-memory log. - - # Args: - # configuration: Sight log that stores configuration log objects. - # """ - # if not configuration: - # return - # for cur in configuration: - # if ( - # cur.sub_type != sight_pb2.Object.ST_BLOCK_START - # or cur.block_start.sub_type != sight_pb2.BlockStart.ST_CONFIGURATION - # ): - # continue - - # if ( - # cur.block_start.configuration.sub_type - # == sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION - # ): - # decision.configure( - # cur.block_start.configuration.decision_configuration, - # self.widget_decision_state, - # ) - - # def add_config_file(self, config_file_path: str) -> None: - # """Augments the configuration of this logger from a file. - - # Args: - # config_file_path: File glob that contains a Sight log that stores - # configuration log objects. - # """ - # self.add_config(_read_capacitor_file(config_file_path)) # pytype: disable=wrong-arg-types # dynamic-method-lookup - + if not configuration: + decision.configure(None, self.widget_decision_state) + return + + self.add_config(configuration) + + # def add_config(self, configuration: Sequence[sight_pb2.Object]) -> None: + # """Augments the configuration of this logger from an in-memory log. + + # Args: + # configuration: Sight log that stores configuration log objects. + # """ + # if not configuration: + # return + # for cur in configuration: + # if ( + # cur.sub_type != sight_pb2.Object.ST_BLOCK_START + # or cur.block_start.sub_type != sight_pb2.BlockStart.ST_CONFIGURATION + # ): + # continue + + # if ( + # cur.block_start.configuration.sub_type + # == sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION + # ): + # decision.configure( + # cur.block_start.configuration.decision_configuration, + # self.widget_decision_state, + # ) + + # def add_config_file(self, config_file_path: str) -> None: + # """Augments the configuration of this logger from a file. + + # Args: + # config_file_path: File glob that contains a Sight log that stores + # configuration log objects. + # """ + # self.add_config(_read_capacitor_file(config_file_path)) # pytype: disable=wrong-arg-types # dynamic-method-lookup def text(text_val: str, sight, end='\n', frame=None) -> str: - """Logs a text value to the Sight log if Sight is being used. + """Logs a text value to the Sight log if Sight is being used. If no Sight logger object is provided, nothing is logged. @@ -931,21 +935,21 @@ def text(text_val: str, sight, end='\n', frame=None) -> str: Returns: The logged text. """ - if sight.params.silent_logger or sight.pause_logging_depth > 0: - return '' + if sight.params.silent_logger or sight.pause_logging_depth > 0: + return '' - if sight is None: - return '' + if sight is None: + return '' - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - return sight.text(text_val, end=end, frame=frame) + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + return sight.text(text_val, end=end, frame=frame) def text_block(label: str, text_val: str, sight, frame=None) -> str: - """Logs to Sight a block that contains a text string if Sight is being used. + """Logs to Sight a block that contains a text string if Sight is being used. If no Sight logger object is provided, nothing is logged. @@ -959,14 +963,14 @@ def text_block(label: str, text_val: str, sight, frame=None) -> str: Returns: The logged text. """ - if sight.params.silent_logger or sight.pause_logging_depth > 0: - return '' + if sight.params.silent_logger or sight.pause_logging_depth > 0: + return '' - if sight is None: - return '' + if sight is None: + return '' - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - return sight.text_block(label, text_val, frame) + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + return sight.text_block(label, text_val, frame) diff --git a/py/sight/widgets/decision/acme/acme_optimizer_client.py b/py/sight/widgets/decision/acme/acme_optimizer_client.py index 8b55b3a..d3153c6 100644 --- a/py/sight/widgets/decision/acme/acme_optimizer_client.py +++ b/py/sight/widgets/decision/acme/acme_optimizer_client.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Client for dm-acme optimizer to communicate with server.""" import math -import logging +from helpers.logs.logs_handler import logger as logging from typing import Optional, Sequence, Tuple from absl import flags from acme import specs @@ -59,7 +58,6 @@ # ) # return possible_actions - # def generate_spec_details(attr_dict): # """convert the spec details of environment into usable format.""" # method_name = "generate_spec_details" @@ -93,109 +91,108 @@ # ) -class AcmeOptimizerClient (OptimizerClient): - """Acme client for the Sight service.""" - - def __init__(self, sight): - super().__init__(sight_pb2.DecisionConfigurationStart.OptimizerType.OT_ACME) - self._sight = sight - self._actor = None - self._adder = None - self._variable_source = None - self._dp_first_call = True - self._last_acme_action = None - - # added to run the base example - self._replay_server = None - self._replay_client = None - self._dataset = None - self._learner = None - - @override - def create_config(self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: - # print("self._sight.widget_decision_state['decision_episode_fn'] : ", self._sight) - print("in create config") - choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig() - - if(FLAGS.acme_agent == 'dqn'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_DQN - elif(FLAGS.acme_agent == 'd4pg'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_D4PG - # elif(FLAGS.acme_agent == 'impala'): - # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_IMPALA - elif(FLAGS.acme_agent == 'mdqn'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MDQN - elif(FLAGS.acme_agent == 'qrdqn'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_QRDQN - # elif(FLAGS.acme_agent == 'ppo'): - # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_PPO - # elif(FLAGS.acme_agent == 'mpo'): - # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MPO - # elif(FLAGS.acme_agent == 'sac'): - # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_SAC - elif(FLAGS.acme_agent == 'td3'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_TD3 - - - - - # possible_actions = fetch_possible_actions(self._sight.widget_decision_state['decision_episode_fn']) - # choice_config.acme_config.possible_actions = possible_actions - - #? using state and action related data as common to all choice_config - # ( - # state_min, - # state_max, - # state_param_length, - # action_min, - # action_max, - # action_param_length, - # possible_actions, - # ) = generate_spec_details( - # self._sight.widget_decision_state['decision_episode_fn'] - # ) - # choice_config.acme_config.state_min.extend(state_min) - # choice_config.acme_config.state_max.extend(state_max) - # choice_config.acme_config.state_param_length = state_param_length - # choice_config.acme_config.action_min.extend(action_min) - # choice_config.acme_config.action_max.extend(action_max) - # choice_config.acme_config.action_param_length = action_param_length - # choice_config.acme_config.possible_actions = possible_actions - - # if FLAGS.env_name: - # choice_config.acme_config.env_name = FLAGS.env_name - - return choice_config - - def generate_env_spec( - self, - # state_min, - # state_max, - # state_param_length, - # action_min, - # action_max, - # action_param_length, - attr_dict - )-> specs.EnvironmentSpec: - """Generates the environment spec for the environment.""" - - method_name = "generate_env_spec" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - # dtype_mapping = { - # sight_pb2.DecisionConfigurationStart.DataType.DT_INT32: np.int32, - # sight_pb2.DecisionConfigurationStart.DataType.DT_INT64: np.int64, - # sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT32: np.float32, - # sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT64: np.float64, - # } - - default_dtype = np.float32 - state_min = np.array(list(attr_dict.state_min.values())) - state_max = np.array(list(attr_dict.state_max.values())) - state_param_length = len(attr_dict.state_attrs) - # state_dtype = dtype_mapping[attr_dict.state_dtype] - observations = specs.BoundedArray( - shape=(state_param_length,), +class AcmeOptimizerClient(OptimizerClient): + """Acme client for the Sight service.""" + + def __init__(self, sight): + super().__init__( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_ACME) + self._sight = sight + self._actor = None + self._adder = None + self._variable_source = None + self._dp_first_call = True + self._last_acme_action = None + + # added to run the base example + self._replay_server = None + self._replay_client = None + self._dataset = None + self._learner = None + + @override + def create_config( + self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: + # print("self._sight.widget_decision_state['decision_episode_fn'] : ", self._sight) + print("in create config") + choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig() + + if (FLAGS.acme_agent == 'dqn'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_DQN + elif (FLAGS.acme_agent == 'd4pg'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_D4PG + # elif(FLAGS.acme_agent == 'impala'): + # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_IMPALA + elif (FLAGS.acme_agent == 'mdqn'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MDQN + elif (FLAGS.acme_agent == 'qrdqn'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_QRDQN + # elif(FLAGS.acme_agent == 'ppo'): + # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_PPO + # elif(FLAGS.acme_agent == 'mpo'): + # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MPO + # elif(FLAGS.acme_agent == 'sac'): + # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_SAC + elif (FLAGS.acme_agent == 'td3'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_TD3 + + # possible_actions = fetch_possible_actions(self._sight.widget_decision_state['decision_episode_fn']) + # choice_config.acme_config.possible_actions = possible_actions + + #? using state and action related data as common to all choice_config + # ( + # state_min, + # state_max, + # state_param_length, + # action_min, + # action_max, + # action_param_length, + # possible_actions, + # ) = generate_spec_details( + # self._sight.widget_decision_state['decision_episode_fn'] + # ) + # choice_config.acme_config.state_min.extend(state_min) + # choice_config.acme_config.state_max.extend(state_max) + # choice_config.acme_config.state_param_length = state_param_length + # choice_config.acme_config.action_min.extend(action_min) + # choice_config.acme_config.action_max.extend(action_max) + # choice_config.acme_config.action_param_length = action_param_length + # choice_config.acme_config.possible_actions = possible_actions + + # if FLAGS.env_name: + # choice_config.acme_config.env_name = FLAGS.env_name + + return choice_config + + def generate_env_spec( + self, + # state_min, + # state_max, + # state_param_length, + # action_min, + # action_max, + # action_param_length, + attr_dict + ) -> specs.EnvironmentSpec: + """Generates the environment spec for the environment.""" + + method_name = "generate_env_spec" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + # dtype_mapping = { + # sight_pb2.DecisionConfigurationStart.DataType.DT_INT32: np.int32, + # sight_pb2.DecisionConfigurationStart.DataType.DT_INT64: np.int64, + # sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT32: np.float32, + # sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT64: np.float64, + # } + + default_dtype = np.float32 + state_min = np.array(list(attr_dict.state_min.values())) + state_max = np.array(list(attr_dict.state_max.values())) + state_param_length = len(attr_dict.state_attrs) + # state_dtype = dtype_mapping[attr_dict.state_dtype] + observations = specs.BoundedArray( + shape=(state_param_length, ), # dtype=state_dtype, dtype=default_dtype, name="observation", @@ -203,139 +200,137 @@ def generate_env_spec( maximum=state_max, ), - action_param_length = len(attr_dict.action_attrs) - # if(attr_dict.action_min): - action_min = np.array(list(attr_dict.action_min.values())) - # if(attr_dict.action_max): - action_max = np.array(list(attr_dict.action_max.values())) - # if(attr_dict.action_dtype): - # action_dtype = dtype_mapping[attr_dict.action_dtype] - - # create discrete spec - if(attr_dict.valid_action_values): - possible_values_list = list(attr_dict.valid_action_values.values())[0] - actions = specs.DiscreteArray( - num_values=len(possible_values_list), - dtype=np.int64, - name="action", - ) - # create bounded spec - else: - if(attr_dict.step_size): - default_dtype=np.int64 - actions = specs.BoundedArray( - shape=(action_param_length,), - # dtype=action_dtype, - dtype=default_dtype, - name="action", - minimum=action_min, - maximum=action_max, - ) - - # print(state_dtype, action_dtype) - - new_env_spec = specs.EnvironmentSpec( - # works for gym - observations=observations, - actions=actions, - rewards=specs.Array(shape=(), dtype=float, name="reward"), - discounts=specs.BoundedArray( - shape=(), dtype=float, minimum=0.0, maximum=1.0, name="discount" - ), - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - # print("new_env_spec : ", new_env_spec) - return new_env_spec - - def create_new_actor(self): - """Creates a new actor.""" - method_name = "create_new_actor" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - # if FLAGS.env_name: - # if FLAGS.env_name == "Pendulum-v1": - # experiment = build_d4pg_config(env_name=FLAGS.env_name) - # else: - # experiment = build_dqn_config(env_name=FLAGS.env_name) - # # print("experiment : ", experiment) - - # environment = experiment.environment_factory() - # environment_spec = specs.make_environment_spec(environment) - # # print('environment_spec : ', environment_spec) - - # else: - attr_dict = self._sight.widget_decision_state['decision_episode_fn'] - environment_spec = self.generate_env_spec( - attr_dict - ) - - if(FLAGS.acme_agent == 'dqn'): - experiment = build_dqn_config() - elif(FLAGS.acme_agent == 'd4pg'): - experiment = build_d4pg_config() - # elif(FLAGS.acme_agent == 'impala'): - # experiment = build_impala_config() - elif(FLAGS.acme_agent == 'mdqn'): - experiment = build_mdqn_config() - elif(FLAGS.acme_agent == 'qrdqn'): - experiment = build_qrdqn_config() - # elif(FLAGS.acme_agent == 'ppo'): - # experiment = build_ppo_config() - # elif(FLAGS.acme_agent == 'mpo'): - # experiment = build_mpo_config() - # elif(FLAGS.acme_agent == 'sac'): - # experiment = build_sac_config(environment_spec) - elif(FLAGS.acme_agent == 'td3'): - experiment = build_td3_config() - - - # ( - # state_min, - # state_max, - # state_param_length, - # state_dtype, - # action_min, - # action_max, - # action_param_length, - # action_dtype - # # possible_actions, - # ) = generate_spec_details( - # self._sight.widget_decision_state['decision_episode_fn'] - # ) - - # print('environment_spec : ', environment_spec) - - networks = experiment.network_factory(environment_spec) - policy = config.make_policy( - experiment=experiment, - networks=networks, - environment_spec=environment_spec, - evaluation=False, - ) - # print("network : ", networks) - # print("policy : ", policy) - - self._adder = sight_adder.SightAdder() - self._variable_source = sight_variable_source.SightVariableSource( - adder=self._adder, client_id=self._sight.id, sight=self._sight - ) - - - key = jax.random.PRNGKey(0) - actor_key, key = jax.random.split(key) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return experiment.builder.make_actor( - actor_key, - policy, - environment_spec, - variable_source=self._variable_source, - adder=self._adder, - ) - - @override - def decision_point(self, sight, request: service_pb2.DecisionPointRequest): - # def decision_point(self, sight): - """communicates with decision_point method on server. + action_param_length = len(attr_dict.action_attrs) + # if(attr_dict.action_min): + action_min = np.array(list(attr_dict.action_min.values())) + # if(attr_dict.action_max): + action_max = np.array(list(attr_dict.action_max.values())) + # if(attr_dict.action_dtype): + # action_dtype = dtype_mapping[attr_dict.action_dtype] + + # create discrete spec + if (attr_dict.valid_action_values): + possible_values_list = list( + attr_dict.valid_action_values.values())[0] + actions = specs.DiscreteArray( + num_values=len(possible_values_list), + dtype=np.int64, + name="action", + ) + # create bounded spec + else: + if (attr_dict.step_size): + default_dtype = np.int64 + actions = specs.BoundedArray( + shape=(action_param_length, ), + # dtype=action_dtype, + dtype=default_dtype, + name="action", + minimum=action_min, + maximum=action_max, + ) + + # print(state_dtype, action_dtype) + + new_env_spec = specs.EnvironmentSpec( + # works for gym + observations=observations, + actions=actions, + rewards=specs.Array(shape=(), dtype=float, name="reward"), + discounts=specs.BoundedArray(shape=(), + dtype=float, + minimum=0.0, + maximum=1.0, + name="discount"), + ) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + # print("new_env_spec : ", new_env_spec) + return new_env_spec + + def create_new_actor(self): + """Creates a new actor.""" + method_name = "create_new_actor" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + # if FLAGS.env_name: + # if FLAGS.env_name == "Pendulum-v1": + # experiment = build_d4pg_config(env_name=FLAGS.env_name) + # else: + # experiment = build_dqn_config(env_name=FLAGS.env_name) + # # print("experiment : ", experiment) + + # environment = experiment.environment_factory() + # environment_spec = specs.make_environment_spec(environment) + # # print('environment_spec : ', environment_spec) + + # else: + attr_dict = self._sight.widget_decision_state['decision_episode_fn'] + environment_spec = self.generate_env_spec(attr_dict) + + if (FLAGS.acme_agent == 'dqn'): + experiment = build_dqn_config() + elif (FLAGS.acme_agent == 'd4pg'): + experiment = build_d4pg_config() + # elif(FLAGS.acme_agent == 'impala'): + # experiment = build_impala_config() + elif (FLAGS.acme_agent == 'mdqn'): + experiment = build_mdqn_config() + elif (FLAGS.acme_agent == 'qrdqn'): + experiment = build_qrdqn_config() + # elif(FLAGS.acme_agent == 'ppo'): + # experiment = build_ppo_config() + # elif(FLAGS.acme_agent == 'mpo'): + # experiment = build_mpo_config() + # elif(FLAGS.acme_agent == 'sac'): + # experiment = build_sac_config(environment_spec) + elif (FLAGS.acme_agent == 'td3'): + experiment = build_td3_config() + + # ( + # state_min, + # state_max, + # state_param_length, + # state_dtype, + # action_min, + # action_max, + # action_param_length, + # action_dtype + # # possible_actions, + # ) = generate_spec_details( + # self._sight.widget_decision_state['decision_episode_fn'] + # ) + + # print('environment_spec : ', environment_spec) + + networks = experiment.network_factory(environment_spec) + policy = config.make_policy( + experiment=experiment, + networks=networks, + environment_spec=environment_spec, + evaluation=False, + ) + # print("network : ", networks) + # print("policy : ", policy) + + self._adder = sight_adder.SightAdder() + self._variable_source = sight_variable_source.SightVariableSource( + adder=self._adder, client_id=self._sight.id, sight=self._sight) + + key = jax.random.PRNGKey(0) + actor_key, key = jax.random.split(key) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return experiment.builder.make_actor( + actor_key, + policy, + environment_spec, + variable_source=self._variable_source, + adder=self._adder, + ) + + @override + def decision_point(self, sight, request: service_pb2.DecisionPointRequest): + # def decision_point(self, sight): + """communicates with decision_point method on server. Stores the trajectories locally, after storing 50 trajectories, calls Update on actor so send those to server and fetch latest weights from @@ -346,109 +341,105 @@ def decision_point(self, sight, request: service_pb2.DecisionPointRequest): Returns: action to be performed. """ - method_name = "decision_point" - # logging.info(">>>> In %s of %s", method_name, _file_name) - - observation = np.array( - list(sight.widget_decision_state["state"].values()), - dtype=np.float32, - # todo : meetashah - this should be extracted from env - ) - # print('observation : ', observation) - if self._dp_first_call: - # create actor, if not there - if self._actor is None: - print("no actor found, creating new one.....") - self._actor = self.create_new_actor() - # update will fetch the latest weights from learner into actor policy - self._actor.update(wait=True) - - timestep = dm_env.TimeStep( - step_type=dm_env.StepType.FIRST, - reward=None, - discount=None, - observation=observation, - ) - self._actor.observe_first(timestep) - self._dp_first_call = False - else: - # do this for subsequent call - # logging.info("subsequent call of decision_point...") - timestep = dm_env.TimeStep( - step_type=dm_env.StepType.MID, - reward=np.array( - sight.widget_decision_state["outcome_value"], dtype=np.float64 - ), - discount=np.array( - sight.widget_decision_state["discount"], dtype=np.float64 - ), - observation=observation, - ) - - # action = np.array(self._last_acme_action, dtype=np.int64) - # todo : meetashah - changed dtyep from int64 to float32 for d4pg agent - # action = np.array(self._last_acme_action, dtype=np.float32, ndmin=1) - - # self._actor.observe(action, next_timestep=timestep) - self._actor.observe(self._last_acme_action, next_timestep=timestep) - - if len(self._actor._adder._observation_list) % 50 == 0: - self._actor.update(wait=True) - - # store current action for next call as last_action - self._last_acme_action = self._actor.select_action(observation) - # print("last_Acme_Action : ", self._last_acme_action, self._last_acme_action.dtype, type(self._last_acme_action), self._last_acme_action.shape) - # raise SystemError - - # todo:meetashah- for dqn-cartpole, we get dtype int32 but require int64 - if(self._last_acme_action.dtype == 'int32'): - self._last_acme_action = np.array(self._last_acme_action, dtype=np.int64) - # self._last_acme_action = self._last_acme_action.reshape((1,)) - - - # print("last_Acme_Action : ", self._last_acme_action, self._last_acme_action.dtype, self._last_acme_action.shape) - # raise SystemError - # logging.info("<<<< Out %s of %s", method_name, _file_name) - return self._last_acme_action - - @override - def finalize_episode(self, sight, request: service_pb2.FinalizeEpisodeRequest): - """completes episode and stores remaining local trajectories to server. + method_name = "decision_point" + # logging.info(">>>> In %s of %s", method_name, _file_name) + + observation = np.array( + list(sight.widget_decision_state["state"].values()), + dtype=np.float32, + # todo : meetashah - this should be extracted from env + ) + # print('observation : ', observation) + if self._dp_first_call: + # create actor, if not there + if self._actor is None: + print("no actor found, creating new one.....") + self._actor = self.create_new_actor() + # update will fetch the latest weights from learner into actor policy + self._actor.update(wait=True) + + timestep = dm_env.TimeStep( + step_type=dm_env.StepType.FIRST, + reward=None, + discount=None, + observation=observation, + ) + self._actor.observe_first(timestep) + self._dp_first_call = False + else: + # do this for subsequent call + # logging.info("subsequent call of decision_point...") + timestep = dm_env.TimeStep( + step_type=dm_env.StepType.MID, + reward=np.array(sight.widget_decision_state["outcome_value"], + dtype=np.float64), + discount=np.array(sight.widget_decision_state["discount"], + dtype=np.float64), + observation=observation, + ) + + # action = np.array(self._last_acme_action, dtype=np.int64) + # todo : meetashah - changed dtyep from int64 to float32 for d4pg agent + # action = np.array(self._last_acme_action, dtype=np.float32, ndmin=1) + + # self._actor.observe(action, next_timestep=timestep) + self._actor.observe(self._last_acme_action, next_timestep=timestep) + + if len(self._actor._adder._observation_list) % 50 == 0: + self._actor.update(wait=True) + + # store current action for next call as last_action + self._last_acme_action = self._actor.select_action(observation) + # print("last_Acme_Action : ", self._last_acme_action, self._last_acme_action.dtype, type(self._last_acme_action), self._last_acme_action.shape) + # raise SystemError + + # todo:meetashah- for dqn-cartpole, we get dtype int32 but require int64 + if (self._last_acme_action.dtype == 'int32'): + self._last_acme_action = np.array(self._last_acme_action, + dtype=np.int64) + # self._last_acme_action = self._last_acme_action.reshape((1,)) + + # print("last_Acme_Action : ", self._last_acme_action, self._last_acme_action.dtype, self._last_acme_action.shape) + # raise SystemError + # logging.info("<<<< Out %s of %s", method_name, _file_name) + return self._last_acme_action + + @override + def finalize_episode(self, sight, + request: service_pb2.FinalizeEpisodeRequest): + """completes episode and stores remaining local trajectories to server. Args: sight: sight object. """ - method_name = "finalize_episode" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - observation = np.array( - list(sight.widget_decision_state["state"].values()), - dtype=np.float32, - ) - timestep = dm_env.TimeStep( - step_type=dm_env.StepType.LAST, - reward=np.array( - sight.widget_decision_state["outcome_value"], dtype=np.float64 - ), - discount=np.array( - sight.widget_decision_state["discount"], dtype=np.float64 - ), - observation=np.array(observation, dtype=np.float32), - ) - # action = np.array(self._last_acme_action, dtype=np.int64) - # todo : meetashah - changed dtyep from int64 to float64 for d4pg agent - # action = np.array(self._last_acme_action, dtype=np.float32) - # self._actor.observe(action, next_timestep=timestep) - self._actor.observe(self._last_acme_action, next_timestep=timestep) - - - # send remaining records to server and fetch latest weights in response - # if len(self._actor._adder._observation_list) % 50 == 0: - self._actor.update(wait=True) - # self._actor._adder.reset() # _actor._adder._observation_list = [] + method_name = "finalize_episode" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + observation = np.array( + list(sight.widget_decision_state["state"].values()), + dtype=np.float32, + ) + timestep = dm_env.TimeStep( + step_type=dm_env.StepType.LAST, + reward=np.array(sight.widget_decision_state["outcome_value"], + dtype=np.float64), + discount=np.array(sight.widget_decision_state["discount"], + dtype=np.float64), + observation=np.array(observation, dtype=np.float32), + ) + # action = np.array(self._last_acme_action, dtype=np.int64) + # todo : meetashah - changed dtyep from int64 to float64 for d4pg agent + # action = np.array(self._last_acme_action, dtype=np.float32) + # self._actor.observe(action, next_timestep=timestep) + self._actor.observe(self._last_acme_action, next_timestep=timestep) + + # send remaining records to server and fetch latest weights in response + # if len(self._actor._adder._observation_list) % 50 == 0: + self._actor.update(wait=True) + # self._actor._adder.reset() # _actor._adder._observation_list = [] - # resetting this global varibale so, next iteration will - # start with observer_first - self._dp_first_call = True + # resetting this global varibale so, next iteration will + # start with observer_first + self._dp_first_call = True - logging.debug("<<<< Out %s of %s", method_name, _file_name) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/py/sight/widgets/decision/acme/sight_adder.py b/py/sight/widgets/decision/acme/sight_adder.py index e3b487f..8f14a9f 100644 --- a/py/sight/widgets/decision/acme/sight_adder.py +++ b/py/sight/widgets/decision/acme/sight_adder.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Custom implementation of base Adder.""" -import logging +from helpers.logs.logs_handler import logger as logging from typing import Any, Optional from acme import types from acme.adders import base @@ -24,94 +23,92 @@ _file_name = "sight_adder.py" + class SightAdder(base.Adder): - """A custom adder based on the base.Adder with some logic changes. + """A custom adder based on the base.Adder with some logic changes. This adder maintains observations provided via actor in a list. """ - def __init__(self): - """Initialize a CustomAdder instance.""" - self._observation_list = [] - self._existing_batch_last_record = None + def __init__(self): + """Initialize a CustomAdder instance.""" + self._observation_list = [] + self._existing_batch_last_record = None - def reset(self, timeout_ms: Optional[int] = None): - """Resets the adder's buffer.""" - # reset called at initial stage or afrer whole episode completed - if ( - not self._existing_batch_last_record - or self._existing_batch_last_record["next_timestep"].last() - ): - self._observation_list = [] - # whole episode not completed so, converting last record of this batch - # as FIRST type record for next batch - else: - timestep = dm_env.TimeStep( - step_type=dm_env.StepType.FIRST, - reward=None, - discount=None, - observation=self._existing_batch_last_record[ - "next_timestep" - ].observation, - ) - observation_dict = {"action": None, "next_timestep": timestep} - self._observation_list = [observation_dict] + def reset(self, timeout_ms: Optional[int] = None): + """Resets the adder's buffer.""" + # reset called at initial stage or afrer whole episode completed + if (not self._existing_batch_last_record + or self._existing_batch_last_record["next_timestep"].last()): + self._observation_list = [] + # whole episode not completed so, converting last record of this batch + # as FIRST type record for next batch + else: + timestep = dm_env.TimeStep( + step_type=dm_env.StepType.FIRST, + reward=None, + discount=None, + observation=self._existing_batch_last_record["next_timestep"]. + observation, + ) + observation_dict = {"action": None, "next_timestep": timestep} + self._observation_list = [observation_dict] - def observation_to_proto(self, observation: dict[str, Any]): - method_name = "observation_to_proto" - logging.debug(">>>> In %s of %s", method_name, _file_name) - obs = service_pb2.Acme_Request().Observation() + def observation_to_proto(self, observation: dict[str, Any]): + method_name = "observation_to_proto" + logging.debug(">>>> In %s of %s", method_name, _file_name) + obs = service_pb2.Acme_Request().Observation() - if observation["action"]: - obs.action.CopyFrom(ndarray_to_proto(observation["action"])) - obs.steptype = observation["next_timestep"].step_type - if observation["next_timestep"].reward: - obs.reward.CopyFrom(ndarray_to_proto(observation["next_timestep"].reward)) - if observation["next_timestep"].discount: - obs.discount.CopyFrom( - ndarray_to_proto(observation["next_timestep"].discount) - ) - obs.observation.CopyFrom( - ndarray_to_proto(observation["next_timestep"].observation) - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return obs + if observation["action"]: + obs.action.CopyFrom(ndarray_to_proto(observation["action"])) + obs.steptype = observation["next_timestep"].step_type + if observation["next_timestep"].reward: + obs.reward.CopyFrom( + ndarray_to_proto(observation["next_timestep"].reward)) + if observation["next_timestep"].discount: + obs.discount.CopyFrom( + ndarray_to_proto(observation["next_timestep"].discount)) + obs.observation.CopyFrom( + ndarray_to_proto(observation["next_timestep"].observation)) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return obs - def fetch_and_reset_observation_list(self, sight_client_id, sight_worker_id, learner_keys): - method_name = "fetch_and_reset_observation_list" - logging.debug(">>>> In %s of %s", method_name, _file_name) - final_observation = False - request = service_pb2.DecisionPointRequest() - request.client_id = str(sight_client_id) - request.worker_id = str(sight_worker_id) + def fetch_and_reset_observation_list(self, sight_client_id, + sight_worker_id, learner_keys): + method_name = "fetch_and_reset_observation_list" + logging.debug(">>>> In %s of %s", method_name, _file_name) + final_observation = False + request = service_pb2.DecisionPointRequest() + request.client_id = str(sight_client_id) + request.worker_id = str(sight_worker_id) - acme_config = service_pb2.Acme_Request() - if len(self._observation_list) > 0: - for episode_obs in self._observation_list: - obs = self.observation_to_proto(episode_obs) - acme_config.episode_observations.append(obs) - # print("learner_keys : ", learner_keys) + acme_config = service_pb2.Acme_Request() + if len(self._observation_list) > 0: + for episode_obs in self._observation_list: + obs = self.observation_to_proto(episode_obs) + acme_config.episode_observations.append(obs) + # print("learner_keys : ", learner_keys) - if(learner_keys!=['']): - for key in learner_keys: - acme_config.learner_keys.append(key) + if (learner_keys != ['']): + for key in learner_keys: + acme_config.learner_keys.append(key) - request.acme_config.CopyFrom(acme_config) - self.reset() - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return request, final_observation + request.acme_config.CopyFrom(acme_config) + self.reset() + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return request, final_observation - def add_first(self, timestep: dm_env.TimeStep): - """Record the first observation of a trajectory.""" - self.add(action=None, next_timestep=timestep) + def add_first(self, timestep: dm_env.TimeStep): + """Record the first observation of a trajectory.""" + self.add(action=None, next_timestep=timestep) - def add( - self, - action: types.NestedArray, - next_timestep: dm_env.TimeStep, - extras: types.NestedArray = (), - ): - """Record an action and the following timestep.""" - observation_dict = {"action": action, "next_timestep": next_timestep} - self._existing_batch_last_record = observation_dict - self._observation_list.append(observation_dict) + def add( + self, + action: types.NestedArray, + next_timestep: dm_env.TimeStep, + extras: types.NestedArray = (), + ): + """Record an action and the following timestep.""" + observation_dict = {"action": action, "next_timestep": next_timestep} + self._existing_batch_last_record = observation_dict + self._observation_list.append(observation_dict) diff --git a/py/sight/widgets/decision/analyze_decision_outcomes.py b/py/sight/widgets/decision/analyze_decision_outcomes.py index 699dfa0..d5807d4 100644 --- a/py/sight/widgets/decision/analyze_decision_outcomes.py +++ b/py/sight/widgets/decision/analyze_decision_outcomes.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Analyze the impact of decisions on subsequent outcomes.""" import io @@ -21,8 +20,8 @@ from absl import app from absl import flags -from absl import logging -import apache_beam as beam +from helpers.logs.logs_handler import logger as logging +from helpers.logs.logs_handler import logger as logging from apache_beam.coders import ProtoCoder import joblib import numpy as np @@ -51,10 +50,8 @@ _IN_LOG_FILE = flags.DEFINE_list( 'in_log_file', None, - ( - 'Input file(s) that contain the Sight log that documents the simulation' - ' run.' - ), + ('Input file(s) that contain the Sight log that documents the simulation' + ' run.'), required=True, ) @@ -69,36 +66,33 @@ class AnalyzeSequence(beam.DoFn): - """Converts sets of named value objects to time-ordered sequences.""" - - def __init__( - self, - named_value_and_object_label: str, - decision_point_label: str, - decision_outcome_label: str, - configuration_label: str, - ): - self.named_value_and_object_label = named_value_and_object_label - self.decision_point_label = decision_point_label - self.decision_outcome_label = decision_outcome_label - self.configuration_label = configuration_label - - def process( - self, - task: Tuple[ - Any, Dict[str, Union[List[Any], List[Dict[str, sight_pb2.Object]]]] - ], - ) -> Iterator[ - Tuple[ - str, - Tuple[ - List[Tuple[Dict[str, Any], float]], - Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], - Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], - ], - ] - ]: - """Time-orders the sequence of objects for a given simulation attribute. + """Converts sets of named value objects to time-ordered sequences.""" + + def __init__( + self, + named_value_and_object_label: str, + decision_point_label: str, + decision_outcome_label: str, + configuration_label: str, + ): + self.named_value_and_object_label = named_value_and_object_label + self.decision_point_label = decision_point_label + self.decision_outcome_label = decision_outcome_label + self.configuration_label = configuration_label + + def process( + self, + task: Tuple[Any, Dict[str, Union[List[Any], + List[Dict[str, sight_pb2.Object]]]]], + ) -> Iterator[Tuple[ + str, + Tuple[ + List[Tuple[Dict[str, Any], float]], + Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], + Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], + ], + ]]: + """Time-orders the sequence of objects for a given simulation attribute. Args: task: A sequence of objects that describe the state of some simulation @@ -107,350 +101,320 @@ def process( Yields: A time-ordered version of the input sequence. """ - named_value_and_object = [ - (x['named_value'].location, x) - for x in task[1][self.named_value_and_object_label] - ] - decision_point = [ - (x['decision_point'].location, x) - for x in task[1][self.decision_point_label] - ] - decision_outcome = [ - (x['decision_outcome'].location, x) - for x in task[1][self.decision_outcome_label] - ] - - # Get the attributes used by the application within this simulation - state_attrs = None - action_attrs = None - for cfg in task[1][self.configuration_label]: - if ( - cfg['configuration'].block_start.configuration.sub_type - == sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION - ): - if state_attrs: - raise ValueError( - 'Multiple decision configurations present in run %s' % task[0] - ) - decision_configuration = cfg[ - 'configuration' - ].block_start.configuration.decision_configuration - state_attrs = decision_configuration.state_attrs - action_attrs = decision_configuration.action_attrs - - if state_attrs is None: - raise ValueError('No decision configuration present in run %s' % task[0]) - - log = [ - x[1] - for x in sorted( - named_value_and_object + decision_point + decision_outcome, - key=lambda x: x[0], - ) - ] - - state = {} - last_decision_point: sight_pb2.DecisionPoint = None - accumulated_outcome = 0 - logging.info('state_attrs=%s', state_attrs) - dataset: Dict[str, List[Tuple[Dict[str, Any], float]]] = {} - for obj in log: - logging.info('obj=%s', obj) - if 'object' in obj: - if obj['object'][0] in state_attrs: - state[obj['object'][0]] = obj['object'][1] - logging.info('updated state=%s', state) - elif 'decision_point' in obj: - if last_decision_point: - observation = last_decision_point_state.copy() - for ( - param_name, - param_value, - ) in last_decision_point.choice_params.items(): - observation['chosen_param_' + param_name] = float(param_value) - if last_decision_point.choice_label not in dataset: - dataset[last_decision_point.choice_label] = [] - dataset[last_decision_point.choice_label].append( - (observation, accumulated_outcome) - ) - logging.info( - 'observation=%s, accumulated_outcome=%s, last_decision_point=%s', - observation, - accumulated_outcome, - last_decision_point, - ) - - last_decision_point = obj['decision_point'].decision_point - last_decision_point_state = state.copy() - logging.info('last_decision_point_state=%s', last_decision_point_state) + named_value_and_object = [ + (x['named_value'].location, x) + for x in task[1][self.named_value_and_object_label] + ] + decision_point = [(x['decision_point'].location, x) + for x in task[1][self.decision_point_label]] + decision_outcome = [(x['decision_outcome'].location, x) + for x in task[1][self.decision_outcome_label]] + + # Get the attributes used by the application within this simulation + state_attrs = None + action_attrs = None + for cfg in task[1][self.configuration_label]: + if (cfg['configuration'].block_start.configuration.sub_type == + sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION): + if state_attrs: + raise ValueError( + 'Multiple decision configurations present in run %s' % + task[0]) + decision_configuration = cfg[ + 'configuration'].block_start.configuration.decision_configuration + state_attrs = decision_configuration.state_attrs + action_attrs = decision_configuration.action_attrs + + if state_attrs is None: + raise ValueError('No decision configuration present in run %s' % + task[0]) + + log = [ + x[1] for x in sorted( + named_value_and_object + decision_point + decision_outcome, + key=lambda x: x[0], + ) + ] + + state = {} + last_decision_point: sight_pb2.DecisionPoint = None accumulated_outcome = 0 - elif 'decision_outcome' in obj: - accumulated_outcome += float( - obj['decision_outcome'].decision_outcome.outcome_value - ) - logging.info( - 'outcome=%s', obj['decision_outcome'].decision_outcome.outcome_value - ) - - if last_decision_point: - observation = last_decision_point_state.copy() - for param_name, param_value in last_decision_point.choice_params.items(): - observation['chosen_param_' + param_name] = float(param_value) - if last_decision_point.choice_label not in dataset: - dataset[last_decision_point.choice_label] = [] - dataset[last_decision_point.choice_label].append( - (observation, accumulated_outcome) - ) - state = {} - - for choice_label, obs_data in dataset.items(): - yield ( - choice_label, - ( - obs_data, - state_attrs, - action_attrs, - ), - ) + logging.info('state_attrs=%s', state_attrs) + dataset: Dict[str, List[Tuple[Dict[str, Any], float]]] = {} + for obj in log: + logging.info('obj=%s', obj) + if 'object' in obj: + if obj['object'][0] in state_attrs: + state[obj['object'][0]] = obj['object'][1] + logging.info('updated state=%s', state) + elif 'decision_point' in obj: + if last_decision_point: + observation = last_decision_point_state.copy() + for ( + param_name, + param_value, + ) in last_decision_point.choice_params.items(): + observation['chosen_param_' + + param_name] = float(param_value) + if last_decision_point.choice_label not in dataset: + dataset[last_decision_point.choice_label] = [] + dataset[last_decision_point.choice_label].append( + (observation, accumulated_outcome)) + logging.info( + 'observation=%s, accumulated_outcome=%s, last_decision_point=%s', + observation, + accumulated_outcome, + last_decision_point, + ) + + last_decision_point = obj['decision_point'].decision_point + last_decision_point_state = state.copy() + logging.info('last_decision_point_state=%s', + last_decision_point_state) + accumulated_outcome = 0 + elif 'decision_outcome' in obj: + accumulated_outcome += float( + obj['decision_outcome'].decision_outcome.outcome_value) + logging.info( + 'outcome=%s', + obj['decision_outcome'].decision_outcome.outcome_value) + + if last_decision_point: + observation = last_decision_point_state.copy() + for param_name, param_value in last_decision_point.choice_params.items( + ): + observation['chosen_param_' + param_name] = float(param_value) + if last_decision_point.choice_label not in dataset: + dataset[last_decision_point.choice_label] = [] + dataset[last_decision_point.choice_label].append( + (observation, accumulated_outcome)) + state = {} + + for choice_label, obs_data in dataset.items(): + yield ( + choice_label, + ( + obs_data, + state_attrs, + action_attrs, + ), + ) class TrainOutcomePrediction(beam.DoFn): - """Trains a model that predicts decision outcome values from decisions.""" - - def process( - self, - task: Tuple[ - str, - Iterable[ - Tuple[ - List[Tuple[Dict[str, Any], float]], - Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], - Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], - ] - ], - ], - ) -> None: - choice_label = task[0] - columns = None - - state_attrs = None - action_attrs = None - for dataset in task[1]: - if state_attrs is None: - _, state_attrs, action_attrs = dataset - else: - if state_attrs != dataset[1] or action_attrs != dataset[2]: - raise ValueError('Inconsistent state/action attributes across runs.') - - input_data = [] - output_data = [] - for dataset in task[1]: - for obs in dataset[0]: - if not columns: - columns = obs[0].keys() - row = [] - for c in columns: - row.append(obs[0][c]) - input_data.append(row) - output_data.append(obs[1]) - - num_total_rows = len(input_data) - num_train_rows = int(num_total_rows * 0.8) - input_array = PolynomialFeatures(2).fit_transform(np.array(input_data)) - output_array = np.array(output_data) - - indices = np.random.permutation(num_total_rows) - train_idx, eval_idx = indices[:num_train_rows], indices[num_train_rows:] - train_input_data = input_array[train_idx, :] - train_output_data = output_array[train_idx] - eval_input_data = input_array[eval_idx, :] - eval_output_data = output_array[eval_idx] - - np.set_printoptions(threshold=sys.maxsize) - - with gfile.Open( - '/tmp/decision_outcomes.' + choice_label + '.csv', 'w' - ) as f: - pd.DataFrame( - np.concatenate( - ( - input_array, - np.reshape(output_array, (output_array.shape[0], 1)), - ), - axis=1, - ) - ).to_csv(f) - - lowest_error = 1e100 - best_model = None - for learner in [ - AdaBoostRegressor(), - GradientBoostingRegressor(), - RandomForestRegressor(), - LinearRegression(), - ]: - model = learner.fit(train_input_data, train_output_data) - - predicted_array = model.predict(eval_input_data) - - logging.info( - 'eval_input_data%s=\n%s', eval_input_data.shape, eval_input_data - ) - logging.info( - 'eval_output_data%s=\n%s', eval_output_data.shape, eval_output_data - ) - logging.info( - 'predicted_array%s=%s', predicted_array.shape, predicted_array - ) - mae = metrics.mean_absolute_error(eval_output_data, predicted_array) - logging.info( - '%s: mae=%s, rmse=%s', - task[0], - mae / abs(np.mean(eval_output_data)), - math.sqrt( - metrics.mean_squared_error(eval_output_data, predicted_array) - ) - / abs(np.mean(eval_output_data)), - ) - if lowest_error > mae: - lowest_error = mae - best_model = model - - with io.BytesIO() as model_bytes: - joblib.dump(best_model, model_bytes) - - with Sight( - sight_pb2.Params( - label='Decision Outcomes', - log_owner='bronevet@google.com', - capacitor_output=True, - ) - ) as sight: - scikit_learn_algorithm = ( - sight_pb2.DecisionConfigurationStart.ScikitLearnAlgorithm() - ) - scikit_learn_algorithm.model_encoding = model_bytes.getvalue() - scikit_learn_algorithm.input_fields.extend(list(columns)) - - choice_algorithm = ( - sight_pb2.DecisionConfigurationStart.ChoiceAlgorithm() - ) - choice_algorithm.scikit_learn.CopyFrom(scikit_learn_algorithm) - - decision_configuration = sight_pb2.DecisionConfigurationStart() - for attr_name, props in state_attrs.items(): - decision_configuration.state_attrs[attr_name].CopyFrom(props) - for attr_name, props in action_attrs.items(): - decision_configuration.action_attrs[attr_name].CopyFrom(props) - decision_configuration.choice_algorithm[choice_label].CopyFrom( - choice_algorithm - ) - - sight.enter_block( - 'Decision Configuration', - sight_pb2.Object( - block_start=sight_pb2.BlockStart( - sub_type=sight_pb2.BlockStart.ST_CONFIGURATION, - configuration=sight_pb2.ConfigurationStart( - sub_type=sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION, - decision_configuration=decision_configuration, + """Trains a model that predicts decision outcome values from decisions.""" + + def process( + self, + task: Tuple[ + str, + Iterable[Tuple[ + List[Tuple[Dict[str, Any], float]], + Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], + Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], + ]], + ], + ) -> None: + choice_label = task[0] + columns = None + + state_attrs = None + action_attrs = None + for dataset in task[1]: + if state_attrs is None: + _, state_attrs, action_attrs = dataset + else: + if state_attrs != dataset[1] or action_attrs != dataset[2]: + raise ValueError( + 'Inconsistent state/action attributes across runs.') + + input_data = [] + output_data = [] + for dataset in task[1]: + for obs in dataset[0]: + if not columns: + columns = obs[0].keys() + row = [] + for c in columns: + row.append(obs[0][c]) + input_data.append(row) + output_data.append(obs[1]) + + num_total_rows = len(input_data) + num_train_rows = int(num_total_rows * 0.8) + input_array = PolynomialFeatures(2).fit_transform(np.array(input_data)) + output_array = np.array(output_data) + + indices = np.random.permutation(num_total_rows) + train_idx, eval_idx = indices[:num_train_rows], indices[ + num_train_rows:] + train_input_data = input_array[train_idx, :] + train_output_data = output_array[train_idx] + eval_input_data = input_array[eval_idx, :] + eval_output_data = output_array[eval_idx] + + np.set_printoptions(threshold=sys.maxsize) + + with gfile.Open('/tmp/decision_outcomes.' + choice_label + '.csv', + 'w') as f: + pd.DataFrame( + np.concatenate( + ( + input_array, + np.reshape(output_array, (output_array.shape[0], 1)), ), + axis=1, + )).to_csv(f) + + lowest_error = 1e100 + best_model = None + for learner in [ + AdaBoostRegressor(), + GradientBoostingRegressor(), + RandomForestRegressor(), + LinearRegression(), + ]: + model = learner.fit(train_input_data, train_output_data) + + predicted_array = model.predict(eval_input_data) + + logging.info('eval_input_data%s=\n%s', eval_input_data.shape, + eval_input_data) + logging.info('eval_output_data%s=\n%s', eval_output_data.shape, + eval_output_data) + logging.info('predicted_array%s=%s', predicted_array.shape, + predicted_array) + mae = metrics.mean_absolute_error(eval_output_data, + predicted_array) + logging.info( + '%s: mae=%s, rmse=%s', + task[0], + mae / abs(np.mean(eval_output_data)), + math.sqrt( + metrics.mean_squared_error(eval_output_data, + predicted_array)) / + abs(np.mean(eval_output_data)), + ) + if lowest_error > mae: + lowest_error = mae + best_model = model + + with io.BytesIO() as model_bytes: + joblib.dump(best_model, model_bytes) + + with Sight( + sight_pb2.Params( + label='Decision Outcomes', + log_owner='bronevet@google.com', + capacitor_output=True, + )) as sight: + scikit_learn_algorithm = (sight_pb2.DecisionConfigurationStart. + ScikitLearnAlgorithm()) + scikit_learn_algorithm.model_encoding = model_bytes.getvalue() + scikit_learn_algorithm.input_fields.extend(list(columns)) + + choice_algorithm = ( + sight_pb2.DecisionConfigurationStart.ChoiceAlgorithm()) + choice_algorithm.scikit_learn.CopyFrom(scikit_learn_algorithm) + + decision_configuration = sight_pb2.DecisionConfigurationStart() + for attr_name, props in state_attrs.items(): + decision_configuration.state_attrs[attr_name].CopyFrom( + props) + for attr_name, props in action_attrs.items(): + decision_configuration.action_attrs[attr_name].CopyFrom( + props) + decision_configuration.choice_algorithm[choice_label].CopyFrom( + choice_algorithm) + + sight.enter_block( + 'Decision Configuration', + sight_pb2.Object(block_start=sight_pb2.BlockStart( + sub_type=sight_pb2.BlockStart.ST_CONFIGURATION, + configuration=sight_pb2.ConfigurationStart( + sub_type=sight_pb2.ConfigurationStart. + ST_DECISION_CONFIGURATION, + decision_configuration=decision_configuration, + ), + )), ) - ), - ) - sight.exit_block('Decision Configuration', sight_pb2.Object()) + sight.exit_block('Decision Configuration', sight_pb2.Object()) def main(argv): - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - root = beam.Pipeline( - runner=runner.FlumeRunner() - ) # beam.runners.DirectRunner()) - reads = [] - for file_path in _IN_LOG_FILE.value: - reads.append( - root - | f'Read {file_path}' - >> capacitorio.ReadFromCapacitor( - file_path, ['*'], ProtoCoder(sight_pb2.Object) - ) + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + root = beam.Pipeline( + runner=runner.FlumeRunner()) # beam.runners.DirectRunner()) + reads = [] + for file_path in _IN_LOG_FILE.value: + reads.append(root + | f'Read {file_path}' >> capacitorio.ReadFromCapacitor( + file_path, ['*'], ProtoCoder(sight_pb2.Object))) + + log: beam.pvalue.PCollection[sight_pb2.Object] = reads | beam.Flatten() + + objects_with_ancestors = log | beam.ParDo( + analysis_utils.ExtractAncestorBlockStartLocations()) + + named_value = analysis_utils.block_start_objects_key_self( + log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value') + decision_point = analysis_utils.single_objects_key_log_uid( + log, sight_pb2.Object.ST_DECISION_POINT, 'decision_point') + decision_outcome = analysis_utils.single_objects_key_log_uid( + log, sight_pb2.Object.ST_DECISION_OUTCOME, 'decision_outcome') + configuration = analysis_utils.block_start_objects_key_log_uid( + log, sight_pb2.BlockStart.ST_CONFIGURATION, 'configuration') + + _ = decision_point | 'decision_point' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.decision_point') + _ = decision_outcome | 'decision_outcome' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.decision_outcome') + + named_value_and_object = analysis_utils.create_log_uid_key( + 'named_values_to_objects log_uid_key', + 'named_value', + analysis_utils.named_values_to_objects( + 'named_value', + named_value, + 'objects', + objects_with_ancestors, + ), ) - - log: beam.pvalue.PCollection[sight_pb2.Object] = reads | beam.Flatten() - - objects_with_ancestors = log | beam.ParDo( - analysis_utils.ExtractAncestorBlockStartLocations() - ) - - named_value = analysis_utils.block_start_objects_key_self( - log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value' - ) - decision_point = analysis_utils.single_objects_key_log_uid( - log, sight_pb2.Object.ST_DECISION_POINT, 'decision_point' - ) - decision_outcome = analysis_utils.single_objects_key_log_uid( - log, sight_pb2.Object.ST_DECISION_OUTCOME, 'decision_outcome' - ) - configuration = analysis_utils.block_start_objects_key_log_uid( - log, sight_pb2.BlockStart.ST_CONFIGURATION, 'configuration' - ) - - _ = decision_point | 'decision_point' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.decision_point' - ) - _ = decision_outcome | 'decision_outcome' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.decision_outcome' - ) - - named_value_and_object = analysis_utils.create_log_uid_key( - 'named_values_to_objects log_uid_key', - 'named_value', - analysis_utils.named_values_to_objects( - 'named_value', - named_value, - 'objects', - objects_with_ancestors, - ), - ) - _ = named_value_and_object | 'named_value_and_object' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.named_value_and_object' - ) - - analyzed = ( - { - 'named_value_and_object': named_value_and_object, - 'decision_point': decision_point, - 'decision_outcome': decision_outcome, - 'configuration': configuration, - } - | 'named_value_and_object decision_point decision_outcome configuration CoGroupByKey' - >> beam.CoGroupByKey() - | 'named_value_and_object decision_point decision_outcome configuration AnalyzeSequence' - >> beam.ParDo( - AnalyzeSequence( - 'named_value_and_object', - 'decision_point', - 'decision_outcome', - 'configuration', - ) - ) - ) - - _ = analyzed | 'analyzed' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.analyzed' - ) - - _ = ( - analyzed - | 'TrainOutcomePrediction GroupByKey' >> beam.GroupByKey() - | 'TrainOutcomePrediction' >> beam.ParDo(TrainOutcomePrediction()) - ) - - results = root.run() - results.wait_until_finish() + _ = named_value_and_object | 'named_value_and_object' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.named_value_and_object') + + analyzed = ( + { + 'named_value_and_object': named_value_and_object, + 'decision_point': decision_point, + 'decision_outcome': decision_outcome, + 'configuration': configuration, + } + | + 'named_value_and_object decision_point decision_outcome configuration CoGroupByKey' + >> beam.CoGroupByKey() + | + 'named_value_and_object decision_point decision_outcome configuration AnalyzeSequence' + >> beam.ParDo( + AnalyzeSequence( + 'named_value_and_object', + 'decision_point', + 'decision_outcome', + 'configuration', + ))) + + _ = analyzed | 'analyzed' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.analyzed') + + _ = (analyzed + | 'TrainOutcomePrediction GroupByKey' >> beam.GroupByKey() + | 'TrainOutcomePrediction' >> beam.ParDo(TrainOutcomePrediction())) + + results = root.run() + results.wait_until_finish() if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/converse.py b/py/sight/widgets/decision/converse.py index c54d82e..eed0a2c 100644 --- a/py/sight/widgets/decision/converse.py +++ b/py/sight/widgets/decision/converse.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Binary that checks the current status of a given Sight optimization run.""" import inspect @@ -22,7 +21,7 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import grpc from sight_service.proto import service_pb2 from sight_service.proto import service_pb2_grpc @@ -32,44 +31,40 @@ from sight.service_utils import generate_metadata _LOG_ID = flags.DEFINE_string( - 'log_id', None, 'ID of the Sight log that tracks this execution.' -) + 'log_id', None, 'ID of the Sight log that tracks this execution.') _DEPLOYMENT_MODE = flags.DEFINE_enum( 'deployment_mode', None, ['distributed', 'dsub_local', 'docker_local', 'local', 'worker_mode'], - ( - 'The procedure to use when training a model to drive applications that ' - 'use the Decision API.' - ), + ('The procedure to use when training a model to drive applications that ' + 'use the Decision API.'), ) -def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - while True: - message = input('# ') - # print ('message=', message) - req = service_pb2.TellRequest() - req.client_id = _LOG_ID.value - req.message_str = message - response = service.call( - lambda s, meta: s.Tell(req, 300, metadata=meta) - ) - print('$ '+response.response_str) +def main(argv: Sequence[str]) -> None: + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") while True: - req = service_pb2.ListenRequest() - req.client_id = _LOG_ID.value - response = service.call( - lambda s, meta: s.Listen(req, 300, metadata=meta) - ) - if response.response_ready: - print(response.response_str) - break - time.sleep(5) + message = input('# ') + # print ('message=', message) + req = service_pb2.TellRequest() + req.client_id = _LOG_ID.value + req.message_str = message + response = service.call( + lambda s, meta: s.Tell(req, 300, metadata=meta)) + print('$ ' + response.response_str) + + while True: + req = service_pb2.ListenRequest() + req.client_id = _LOG_ID.value + response = service.call( + lambda s, meta: s.Listen(req, 300, metadata=meta)) + if response.response_ready: + print(response.response_str) + break + time.sleep(5) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/current_status.py b/py/sight/widgets/decision/current_status.py index f00b971..8139ce7 100644 --- a/py/sight/widgets/decision/current_status.py +++ b/py/sight/widgets/decision/current_status.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Binary that checks the current status of a given Sight optimization run.""" import inspect @@ -22,7 +21,7 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import grpc from sight_service.proto import service_pb2 from sight_service.proto import service_pb2_grpc @@ -32,42 +31,38 @@ from sight.service_utils import generate_metadata _LOG_ID = flags.DEFINE_string( - "log_id", None, "ID of the Sight log that tracks this execution." -) + "log_id", None, "ID of the Sight log that tracks this execution.") _DEPLOYMENT_MODE = flags.DEFINE_enum( - 'deployment_mode', - None, - ['distributed', 'dsub_local', 'docker_local', 'local', 'worker_mode'], - ( - 'The procedure to use when training a model to drive applications that ' - 'use the Decision API.' - ), + 'deployment_mode', + None, + ['distributed', 'dsub_local', 'docker_local', 'local', 'worker_mode'], + ('The procedure to use when training a model to drive applications that ' + 'use the Decision API.'), ) -def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - req = service_pb2.CurrentStatusRequest() - req.client_id = _LOG_ID.value - response = service.call( - lambda s, meta: s.CurrentStatus(req, 300, metadata=meta) - ) +def main(argv: Sequence[str]) -> None: + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - # print('response :', response.response_str) + req = service_pb2.CurrentStatusRequest() + req.client_id = _LOG_ID.value + response = service.call( + lambda s, meta: s.CurrentStatus(req, 300, metadata=meta)) + # print('response :', response.response_str) - if response.status == service_pb2.CurrentStatusResponse.Status.DEFAULT : - print('Experiment is in Default state') - elif response.status == service_pb2.CurrentStatusResponse.Status.IN_PROGRESS : - print('Experiment is in-progress state') - elif response.status == service_pb2.CurrentStatusResponse.Status.SUCCESS : - print('Experiment is in Success state') - elif response.status == service_pb2.CurrentStatusResponse.Status.FAILURE : - print('Experiment is in Failure state') - else: - print('response.status = ', response.status) + if response.status == service_pb2.CurrentStatusResponse.Status.DEFAULT: + print('Experiment is in Default state') + elif response.status == service_pb2.CurrentStatusResponse.Status.IN_PROGRESS: + print('Experiment is in-progress state') + elif response.status == service_pb2.CurrentStatusResponse.Status.SUCCESS: + print('Experiment is in Success state') + elif response.status == service_pb2.CurrentStatusResponse.Status.FAILURE: + print('Experiment is in Failure state') + else: + print('response.status = ', response.status) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index 56b0d02..da4d304 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -27,7 +27,8 @@ from absl import flags # from absl import logging -import logging +from helpers.logs.logs_handler import logger as logging + from sight_service.proto import service_pb2 from sight import service_utils as service from sight.proto import sight_pb2 @@ -178,12 +179,15 @@ def configure( logging.debug("<<<< Out %s of %s", method_name, _file_name) + def init_sight_polling_thread(sight_id): # print - status_update_thread = threading.Thread(target=poll_network_batch_outcome,args=(sight_id,)) + status_update_thread = threading.Thread(target=poll_network_batch_outcome, + args=(sight_id, )) print('*************** starting thread ************') status_update_thread.start() + def attr_dict_to_proto( attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps], attrs_proto: Any, @@ -372,8 +376,8 @@ def run( decision_configuration = sight_pb2.DecisionConfigurationStart() decision_configuration.optimizer_type = optimizer.obj.optimizer_type() - if(_NUM_TRIALS.value): - decision_configuration.num_trials = _NUM_TRIALS.value + if (_NUM_TRIALS.value): + decision_configuration.num_trials = _NUM_TRIALS.value # if FLAGS.deployment_mode == 'worker_mode': # decision_configuration.num_trials = int(os.environ['num_samples']) # else: @@ -475,9 +479,10 @@ def run( print('_DECISON_MODE.value : ', _DECISON_MODE.value) if FLAGS.deployment_mode in ['distributed', 'vm']: if (_OPTIMIZER_TYPE.value == 'exhaustive_search' - and possible_actions < _NUM_TRIALS.value): - raise ValueError( - f"max possible value for num_trials is : {possible_actions}") + and possible_actions < _NUM_TRIALS.value): + raise ValueError( + f"max possible value for num_trials is : {possible_actions}" + ) # logging.info('FLAGS.deployment_mode == distributed') if (not _DOCKER_IMAGE.value): raise ValueError( @@ -563,65 +568,70 @@ def run( # ] # unique_action_ids = propose_actions(sight, actions_list) - if FLAGS.deployment_mode == 'local': + if FLAGS.deployment_mode == 'local': client_id = str(sight.id) worker_location = '0' - elif (FLAGS.deployment_mode == 'worker_mode' - # or FLAGS.deployment_mode == 'docker_mode' - ): + elif (FLAGS.deployment_mode == 'worker_mode' + # or FLAGS.deployment_mode == 'docker_mode' + ): client_id = os.environ['PARENT_LOG_ID'] worker_location = os.environ['worker_location'] - # for _ in range(num_samples_to_run): - # if(FLAGS.optimizer_type == "worklist_scheduler"): - # if (FLAGS.deployment_mode == 'worker_mode'): - while(True): - # #? new rpc just to check move forward or not? - req = service_pb2.WorkerAliveRequest( - client_id=client_id, - worker_id=f'client_{client_id}_worker_{worker_location}' - ) - response = service.call( - lambda s, meta: s.WorkerAlive(req, 300, metadata=meta)) - - logging.info("response from workAlive rpc is : %s", response.status_type) - if(response.status_type == service_pb2.WorkerAliveResponse.StatusType.ST_DONE): - break - elif(response.status_type == service_pb2.WorkerAliveResponse.StatusType.ST_RETRY): - logging.info('sleeping for 5 seconds......') - time.sleep(5) - elif(response.status_type == service_pb2.WorkerAliveResponse.StatusType.ST_ACT): - sight.enter_block('Decision Sample', sight_pb2.Object()) - if 'constant_action' in sight.widget_decision_state: - del sight.widget_decision_state['constant_action'] - sight.widget_decision_state['discount'] = 0 - sight.widget_decision_state['last_reward'] = None - - if env: - driver_fn(env, sight) - else: - driver_fn(sight) - - finalize_episode(sight) - sight.exit_block('Decision Sample', sight_pb2.Object()) - else: - raise ValueError("invalid response from server") - logging.info('exiting from the loop.....') - # else: - # for _ in range(num_samples_to_run): - # sight.enter_block('Decision Sample', sight_pb2.Object()) - # if 'constant_action' in sight.widget_decision_state: - # del sight.widget_decision_state['constant_action'] - # sight.widget_decision_state['discount'] = 0 - # sight.widget_decision_state['last_reward'] = None - - # if env: - # driver_fn(env, sight) - # else: - # driver_fn(sight) - - # finalize_episode(sight) - # sight.exit_block('Decision Sample', sight_pb2.Object()) + # for _ in range(num_samples_to_run): + # if(FLAGS.optimizer_type == "worklist_scheduler"): + # if (FLAGS.deployment_mode == 'worker_mode'): + while (True): + # #? new rpc just to check move forward or not? + req = service_pb2.WorkerAliveRequest( + client_id=client_id, + worker_id=f'client_{client_id}_worker_{worker_location}' + ) + response = service.call( + lambda s, meta: s.WorkerAlive(req, 300, metadata=meta)) + + logging.info("response from workAlive rpc is : %s", + response.status_type) + if (response.status_type == service_pb2. + WorkerAliveResponse.StatusType.ST_DONE): + break + elif (response.status_type == + service_pb2.WorkerAliveResponse.StatusType.ST_RETRY): + logging.info('sleeping for 5 seconds......') + time.sleep(5) + elif (response.status_type == + service_pb2.WorkerAliveResponse.StatusType.ST_ACT): + sight.enter_block('Decision Sample', + sight_pb2.Object()) + if 'constant_action' in sight.widget_decision_state: + del sight.widget_decision_state['constant_action'] + sight.widget_decision_state['discount'] = 0 + sight.widget_decision_state['last_reward'] = None + + if env: + driver_fn(env, sight) + else: + driver_fn(sight) + + finalize_episode(sight) + sight.exit_block('Decision Sample', sight_pb2.Object()) + else: + raise ValueError("invalid response from server") + logging.info('exiting from the loop.....') + # else: + # for _ in range(num_samples_to_run): + # sight.enter_block('Decision Sample', sight_pb2.Object()) + # if 'constant_action' in sight.widget_decision_state: + # del sight.widget_decision_state['constant_action'] + # sight.widget_decision_state['discount'] = 0 + # sight.widget_decision_state['last_reward'] = None + + # if env: + # driver_fn(env, sight) + # else: + # driver_fn(sight) + + # finalize_episode(sight) + # sight.exit_block('Decision Sample', sight_pb2.Object()) # req = service_pb2.TestRequest(client_id=str(sight.id)) # response = service.call( @@ -676,17 +686,15 @@ def get_decision_outcome_proto(outcome_label: str, double_value=val, ) else: - if(isinstance(val, dict)): + if (isinstance(val, dict)): json_value = json.dumps(val) - elif(isinstance(val, pd.Series)): + elif (isinstance(val, pd.Series)): json_value = json.dumps(val.to_dict()) else: raise TypeError("value needs to be dict type") - value = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_JSON, - json_value=json_value - ) + value = sight_pb2.Value(sub_type=sight_pb2.Value.ST_JSON, + json_value=json_value) outcome_params.append( sight_pb2.DecisionParam( @@ -825,11 +833,10 @@ def decision_point( else: raise ValueError("unsupported type!!") - choice_params.append( - sight_pb2.DecisionParam( - key=attr, - value=val, - )) + choice_params.append(sight_pb2.DecisionParam( + key=attr, + value=val, + )) # pytype: disable=attribute-error obj = sight_pb2.Object( @@ -1018,8 +1025,8 @@ def finalize_episode(sight): # , optimizer_obj sight_pb2.DecisionConfigurationStart.OptimizerType. OT_WORKLIST_SCHEDULER, sight) req.decision_outcome.CopyFrom( - # get_fvs_outcome_proto('outcome', sight)) - # whole output of key "fvs_outcome" is stringified, not individual key-value + # get_fvs_outcome_proto('outcome', sight)) + # whole output of key "fvs_outcome" is stringified, not individual key-value get_decision_outcome_proto('outcome', sight)) # print('request : ', req) optimizer_obj = optimizer.get_instance() diff --git a/py/sight/widgets/decision/decision_episode_fn.py b/py/sight/widgets/decision/decision_episode_fn.py index 85e3d50..509e570 100644 --- a/py/sight/widgets/decision/decision_episode_fn.py +++ b/py/sight/widgets/decision/decision_episode_fn.py @@ -16,7 +16,7 @@ from dataclasses import dataclass from typing import Any, Callable, Dict, List, Tuple -from absl import logging +from helpers.logs.logs_handler import logger as logging import numpy as np # import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import @@ -133,8 +133,7 @@ def __init__( self.step_size = { attr: attr_val.step_size - for attr, attr_val in action_attrs.items() - if attr_val.step_size + for attr, attr_val in action_attrs.items() if attr_val.step_size } # for action, attributes in action_attrs.items(): diff --git a/py/sight/widgets/decision/env_driver.py b/py/sight/widgets/decision/env_driver.py index 1d6c8be..bb7478f 100644 --- a/py/sight/widgets/decision/env_driver.py +++ b/py/sight/widgets/decision/env_driver.py @@ -11,10 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Default Driver function to be used while training within the Sight log.""" -import logging +from helpers.logs.logs_handler import logger as logging + import numpy as np from sight import data_structures # from sight.sight import Sight @@ -22,33 +22,35 @@ _file_name = "driver.py" + def driver_fn(env, sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: env: The dm_env type env obcject used to call the reset and step methods. sight: The Sight logger object used to drive decisions. """ - method_name = 'driver_fn' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + method_name = 'driver_fn' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - timestep = env.reset() + timestep = env.reset() - state_attrs = decision.get_state_attrs(sight) - for i in range(len(state_attrs)): - data_structures.log_var(state_attrs[i], timestep.observation[i], sight) + state_attrs = decision.get_state_attrs(sight) + for i in range(len(state_attrs)): + data_structures.log_var(state_attrs[i], timestep.observation[i], sight) - while not timestep.last(): - chosen_action = decision.decision_point("DP_label", sight) + while not timestep.last(): + chosen_action = decision.decision_point("DP_label", sight) - timestep = env.step(chosen_action) + timestep = env.step(chosen_action) - for i in range(len(state_attrs)): - data_structures.log_var(state_attrs[i], timestep.observation[i], sight) - - decision.decision_outcome( - "DO_label", - timestep.reward, - sight, - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + for i in range(len(state_attrs)): + data_structures.log_var(state_attrs[i], timestep.observation[i], + sight) + + decision.decision_outcome( + "DO_label", + timestep.reward, + sight, + ) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/py/sight/widgets/decision/get_outcome.py b/py/sight/widgets/decision/get_outcome.py index 6bf90c8..40bc43e 100644 --- a/py/sight/widgets/decision/get_outcome.py +++ b/py/sight/widgets/decision/get_outcome.py @@ -11,14 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Binary that checks the current status of a given Sight optimization run.""" from typing import Any, Callable, Dict, Optional, Sequence, Text, Tuple from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging from sight_service.proto import service_pb2 from sight_service.proto import service_pb2_grpc from sight import service_utils as service @@ -26,41 +25,38 @@ FLAGS = flags.FLAGS _LOG_ID = flags.DEFINE_string( - "log_id", None, "ID of the Sight log that tracks this execution." -) + "log_id", None, "ID of the Sight log that tracks this execution.") _DEPLOYMENT_MODE = flags.DEFINE_enum( 'deployment_mode', None, ['distributed', 'dsub_local', 'docker_local', 'local', 'worker_mode'], - ( - 'The procedure to use when training a model to drive applications that ' - 'use the Decision API.' - ), + ('The procedure to use when training a model to drive applications that ' + 'use the Decision API.'), ) + def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - request = service_pb2.GetOutcomeRequest() - request.client_id = str(FLAGS.log_id) - # request.unique_ids.append(1) - response = service.call( - lambda s, meta: s.GetOutcome(request, 300, metadata=meta) - ) + request = service_pb2.GetOutcomeRequest() + request.client_id = str(FLAGS.log_id) + # request.unique_ids.append(1) + response = service.call( + lambda s, meta: s.GetOutcome(request, 300, metadata=meta)) - if(response.response_str): - return response.response_str + if (response.response_str): + return response.response_str - outcome_list = [] - for outcome in response.outcome: - outcome_dict = {} - outcome_dict['reward'] = outcome.reward - outcome_dict['action'] = dict(outcome.action_attrs) - outcome_dict['outcome'] = dict(outcome.outcome_attrs) - outcome_list.append(outcome_dict) - return outcome_list + outcome_list = [] + for outcome in response.outcome: + outcome_dict = {} + outcome_dict['reward'] = outcome.reward + outcome_dict['action'] = dict(outcome.action_attrs) + outcome_dict['outcome'] = dict(outcome.outcome_attrs) + outcome_list.append(outcome_dict) + return outcome_list if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/listen.py b/py/sight/widgets/decision/listen.py index db2c71b..3f22362 100644 --- a/py/sight/widgets/decision/listen.py +++ b/py/sight/widgets/decision/listen.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Binary that checks the current status of a given Sight optimization run.""" import inspect @@ -22,7 +21,7 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import grpc from sight_service.proto import service_pb2 from sight_service.proto import service_pb2_grpc @@ -32,34 +31,31 @@ import time _LOG_ID = flags.DEFINE_string( - 'log_id', None, 'ID of the Sight log that tracks this execution.' -) + 'log_id', None, 'ID of the Sight log that tracks this execution.') _DEPLOYMENT_MODE = flags.DEFINE_enum( 'deployment_mode', None, ['distributed', 'dsub_local', 'docker_local', 'local', 'worker_mode'], - ( - 'The procedure to use when training a model to drive applications that ' - 'use the Decision API.' - ), + ('The procedure to use when training a model to drive applications that ' + 'use the Decision API.'), ) + def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - while True: - req = service_pb2.ListenRequest() - req.client_id = _LOG_ID.value - response = service.call( - lambda s, meta: s.Listen(req, 300, metadata=meta) - ) - print('response=',response) - if response.response_ready: - print(response.response_str) - break - time.sleep(5) + while True: + req = service_pb2.ListenRequest() + req.client_id = _LOG_ID.value + response = service.call( + lambda s, meta: s.Listen(req, 300, metadata=meta)) + print('response=', response) + if response.response_ready: + print(response.response_str) + break + time.sleep(5) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/llm_optimizer_client.py b/py/sight/widgets/decision/llm_optimizer_client.py index 4d5a427..583cd80 100644 --- a/py/sight/widgets/decision/llm_optimizer_client.py +++ b/py/sight/widgets/decision/llm_optimizer_client.py @@ -16,8 +16,7 @@ from typing import Optional, Sequence, Tuple -from absl import logging -from sight_service.proto import service_pb2 +from helpers.logs.logs_handler import logger as loggingfrom sight_service.proto import service_pb2 from sight import service_utils as service from sight.proto import sight_pb2 from sight.widgets.decision.optimizer_client import OptimizerClient diff --git a/py/sight/widgets/decision/shower_env_driver.py b/py/sight/widgets/decision/shower_env_driver.py index efaa08b..062a548 100644 --- a/py/sight/widgets/decision/shower_env_driver.py +++ b/py/sight/widgets/decision/shower_env_driver.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of Drivier function to be used in case Sight used without any environment.""" -import logging +from helpers.logs.logs_handler import logger as logging import random import numpy as np from sight import data_structures @@ -23,43 +22,45 @@ _file_name = "shower_env_driver.py" + def driver_fn(sight: Sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: sight: The Sight logger object used to drive decisions. """ - method_name = 'driver_fn' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - - logging.info('sight.widget_decision_state : %s', sight.widget_decision_state) + method_name = 'driver_fn' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + logging.info('sight.widget_decision_state : %s', + sight.widget_decision_state) - temperature = 38 + random.randint(-3, 3) - shower_length = 60 - data_structures.log_var("Temperature", temperature, sight) + temperature = 38 + random.randint(-3, 3) + shower_length = 60 + data_structures.log_var("Temperature", temperature, sight) - for _ in range(shower_length): - # Ask Sight's optimizer for the action to perform. - chosen_action = decision.decision_point("DP_label", sight) - # direction = np.array(chosen_action["Direction"], dtype=np.int64) + for _ in range(shower_length): + # Ask Sight's optimizer for the action to perform. + chosen_action = decision.decision_point("DP_label", sight) + # direction = np.array(chosen_action["Direction"], dtype=np.int64) - # Change temperature based on the Sight-recommended direction. - temperature += chosen_action["Direction"] - logging.info('temperature=%s, direction=%s', temperature, chosen_action["Direction"]) - data_structures.log_var("Temperature", temperature, sight) + # Change temperature based on the Sight-recommended direction. + temperature += chosen_action["Direction"] + logging.info('temperature=%s, direction=%s', temperature, + chosen_action["Direction"]) + data_structures.log_var("Temperature", temperature, sight) - # Calculate reward based on whether the temperature target has - # been achieved. - if temperature >= 37 and temperature <= 39: - current_reward = 1 - else: - current_reward = -abs(temperature - 38) + # Calculate reward based on whether the temperature target has + # been achieved. + if temperature >= 37 and temperature <= 39: + current_reward = 1 + else: + current_reward = -abs(temperature - 38) - # Inform Sight of the outcome of the recommended action. - decision.decision_outcome( - "DO_label", - current_reward, - sight, - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + # Inform Sight of the outcome of the recommended action. + decision.decision_outcome( + "DO_label", + current_reward, + sight, + ) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/py/sight/widgets/decision/single_action_optimizer_client.py b/py/sight/widgets/decision/single_action_optimizer_client.py index dedc93e..f981fde 100644 --- a/py/sight/widgets/decision/single_action_optimizer_client.py +++ b/py/sight/widgets/decision/single_action_optimizer_client.py @@ -13,8 +13,7 @@ # limitations under the License. """Client for optimizers that are called once per episode to communicate with server.""" -from absl import logging -from typing import Optional, Sequence, Tuple +from helpers.logs.logs_handler import logger as loggingfrom typing import Optional, Sequence, Tuple from sight_service.proto import service_pb2 from sight import service_utils as service from sight.proto import sight_pb2 diff --git a/py/sight/widgets/decision/tell.py b/py/sight/widgets/decision/tell.py index 4565f71..ab21023 100644 --- a/py/sight/widgets/decision/tell.py +++ b/py/sight/widgets/decision/tell.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Binary that checks the current status of a given Sight optimization run.""" import inspect @@ -22,7 +21,7 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import grpc from sight_service.proto import service_pb2 from sight_service.proto import service_pb2_grpc @@ -32,33 +31,27 @@ from sight.service_utils import generate_metadata _LOG_ID = flags.DEFINE_string( - 'log_id', None, 'ID of the Sight log that tracks this execution.' -) + 'log_id', None, 'ID of the Sight log that tracks this execution.') _DEPLOYMENT_MODE = flags.DEFINE_enum( 'deployment_mode', None, ['distributed', 'dsub_local', 'docker_local', 'local', 'worker_mode'], - ( - 'The procedure to use when training a model to drive applications that ' - 'use the Decision API.' - ), -) -_TELL = flags.DEFINE_string( - 'tell', '', 'Text to tell the Sight service' + ('The procedure to use when training a model to drive applications that ' + 'use the Decision API.'), ) +_TELL = flags.DEFINE_string('tell', '', 'Text to tell the Sight service') + def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - req = service_pb2.TellRequest() - req.client_id = _LOG_ID.value - req.message_str = _TELL.value - response = service.call( - lambda s, meta: s.Tell(req, 300, metadata=meta) - ) - print(response.response_str) + req = service_pb2.TellRequest() + req.client_id = _LOG_ID.value + req.message_str = _TELL.value + response = service.call(lambda s, meta: s.Tell(req, 300, metadata=meta)) + print(response.response_str) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/trials.py b/py/sight/widgets/decision/trials.py index 8c340e4..f85a1ad 100644 --- a/py/sight/widgets/decision/trials.py +++ b/py/sight/widgets/decision/trials.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Functionality for using Vizier to drive decisions.""" from datetime import datetime @@ -24,7 +23,7 @@ from typing import Any, Dict, Optional from absl import flags -import logging +from helpers.logs.logs_handler import logger as logging import grpc from sight_service.proto import service_pb2 @@ -46,15 +45,12 @@ # _PROJECT_ID = flags.DEFINE_string( # 'project_id', None, 'Id of cloud project' # ) -_PROJECT_ID = flags.DEFINE_string( - 'project_id', os.environ['PROJECT_ID'], 'Id of cloud project' -) -_PROJECT_REGION = flags.DEFINE_string( - 'project_region', 'us-central1', 'location to store project-data' -) -_DSUB_MACHINE_TYPE = flags.DEFINE_string( - 'dsub_machine_type', 'e2-standard-2', '' -) +_PROJECT_ID = flags.DEFINE_string('project_id', os.environ['PROJECT_ID'], + 'Id of cloud project') +_PROJECT_REGION = flags.DEFINE_string('project_region', 'us-central1', + 'location to store project-data') +_DSUB_MACHINE_TYPE = flags.DEFINE_string('dsub_machine_type', 'e2-standard-2', + '') # _DSUB_LOGGING = flags.DEFINE_string( # 'log_path', # # 'tmp/logs', @@ -75,17 +71,12 @@ def _get_experiment_name(sight: Any) -> str: - if _EXPERIMENT_NAME.value: - return _EXPERIMENT_NAME.value - else: - return ( - 'Sight_Decision_Study_' - + sight.params.label.replace(' ', '_') - + '_' - + str(sight.id) - + '_' - + datetime.now().strftime('%Y%m%d_%H%M%S') - ) + if _EXPERIMENT_NAME.value: + return _EXPERIMENT_NAME.value + else: + return ('Sight_Decision_Study_' + + sight.params.label.replace(' ', '_') + '_' + str(sight.id) + + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) def launch( @@ -98,7 +89,7 @@ def launch( num_train_workers: int, sight: Any, ): - """Launches the experiment with the service. + """Launches the experiment with the service. Args: optimizer_type: Type of optimizer we are using. @@ -108,30 +99,33 @@ def launch( num_train_workers: numbers of workers to be spawned sight: The Sight object to be used for logging. """ - method_name = 'launch' - logging.debug('>>>>>>>>> In %s method of %s file.', method_name, _file_name) + method_name = 'launch' + logging.debug('>>>>>>>>> In %s method of %s file.', method_name, + _file_name) - req = service_pb2.LaunchRequest() + req = service_pb2.LaunchRequest() - # config_param = sight_pb2.DecisionConfigurationStart() - # for key, attr in action_attrs.items(): - # config_param.action_attrs[key].CopyFrom(attr) - # for key, attr in state_attrs.items(): - # config_param.state_attrs[key].CopyFrom(attr) - req.decision_config_params.CopyFrom(decision_configuration) + # config_param = sight_pb2.DecisionConfigurationStart() + # for key, attr in action_attrs.items(): + # config_param.action_attrs[key].CopyFrom(attr) + # for key, attr in state_attrs.items(): + # config_param.state_attrs[key].CopyFrom(attr) + req.decision_config_params.CopyFrom(decision_configuration) - req.label = sight.params.label - req.client_id = str(sight.id) + req.label = sight.params.label + req.client_id = str(sight.id) + response = service.call(lambda s, meta: s.Launch(req, 300, metadata=meta)) + # start polling thread, fetching outcome from server for proposed actions + if (decision_configuration.optimizer_type == sight_pb2. + DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER + and response.display_string == "Worklist Scheduler SUCCESS!"): + decision.init_sight_polling_thread(sight.id) + logging.info('##### Launch response=%s #####', response) - response = service.call(lambda s, meta: s.Launch(req, 300, metadata=meta)) - # start polling thread, fetching outcome from server for proposed actions - if(decision_configuration.optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER - and response.display_string == "Worklist Scheduler SUCCESS!"): - decision.init_sight_polling_thread(sight.id) - logging.info('##### Launch response=%s #####', response) + logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, + _file_name) - logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) def append_ist_time_to_logging_path_12hr(): # Define IST timezone @@ -141,6 +135,7 @@ def append_ist_time_to_logging_path_12hr(): formatted_time = current_time.strftime('%Y-%m-%d-%I-%M-%S') return formatted_time + def start_job_in_docker( num_trials: int, binary_path: Optional[str], @@ -152,7 +147,7 @@ def start_job_in_docker( decision_params: str, sight: Any, ): - """Starts a single worker in a docker container. + """Starts a single worker in a docker container. Args: num_trials: The number of times the experiment will be run during training. @@ -165,68 +160,69 @@ def start_job_in_docker( decision_params: add sight: The Sight object to be used for logging. """ - method_name = 'start_job_in_docker' - logging.debug('>>>>>>>>> In %s method of %s file.', method_name, _file_name) - - sight.enter_block('Worker Spawning', sight_pb2.Object()) - # Write the script that will execute the binary within the docker container. - decision_params_arg = ( - f' --decision_params={decision_params}' if decision_params else '' - ) - os.makedirs('/tmp/sight_script', exist_ok=True) - with open('/tmp/sight_script/sight_decision_command.sh', 'w') as f: - f.write('#!/bin/bash\n') - f.write('echo "$PYTHONPATH"') - f.write( - '/usr/bin/python3' - f' /project/{binary_path.split("/")[-1]} --decision_mode={decision_mode} --deployment_mode={deployment_mode}' - f' --worker_mode={worker_mode} --optimizer_type={optimizer_type} --num_trials={num_trials} ' - ) - if FLAGS.service_account: - f.write(f' --service_account={FLAGS.service_account}') - f.write(f' {decision_params_arg}\n ') - os.chmod('/tmp/sight_script/sight_decision_command.sh', 0o755) - subprocess.run(['cp', binary_path, '/tmp'], check=True) - - args = [ - 'docker', - 'run', - '-v', - f'/tmp/{binary_path.split("/")[-1]}:/project/{binary_path.split("/")[-1]}:ro', - '-v', - '/tmp/sight_script:/project/sight_script:ro', - '-v', - # f'{os.path.expanduser("~")}/.config/gcloud:/project/.config/gcloud:ro', - f'{FLAGS.gcloud_dir_path}:/project/.config/gcloud:ro', - '--env', - 'GOOGLE_APPLICATION_CREDENTIALS=/project/.config/gcloud/application_default_credentials.json', - # '--env', - # 'PYTHONPATH=/project', - '--env', - f'GOOGLE_CLOUD_PROJECT={_PROJECT_ID.value}', - '--env', - f'PARENT_LOG_ID={sight.id}', - '--env', - f'SIGHT_SERVICE_ID={service._SERVICE_ID}', - # '--env', - # f'SIGHT_SERVICE_ACCOUNT={_SERVICE_ACCOUNT.value}', - '--env', - f'worker_location={sight.location.get()}', - '--env', - f'num_samples={num_trials}', - '--net=host', - '-t', - '-i', - '--rm', - docker_image, - '/project/sight_script/sight_decision_command.sh', - # 'bash', - ] - logging.info('DOCKER CONTAINER SPAWNING =%s', ' '.join(args)) - subprocess.run(args, check=True) - - sight.exit_block('Worker Spawning', sight_pb2.Object()) - logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) + method_name = 'start_job_in_docker' + logging.debug('>>>>>>>>> In %s method of %s file.', method_name, + _file_name) + + sight.enter_block('Worker Spawning', sight_pb2.Object()) + # Write the script that will execute the binary within the docker container. + decision_params_arg = (f' --decision_params={decision_params}' + if decision_params else '') + os.makedirs('/tmp/sight_script', exist_ok=True) + with open('/tmp/sight_script/sight_decision_command.sh', 'w') as f: + f.write('#!/bin/bash\n') + f.write('echo "$PYTHONPATH"') + f.write( + '/usr/bin/python3' + f' /project/{binary_path.split("/")[-1]} --decision_mode={decision_mode} --deployment_mode={deployment_mode}' + f' --worker_mode={worker_mode} --optimizer_type={optimizer_type} --num_trials={num_trials} ' + ) + if FLAGS.service_account: + f.write(f' --service_account={FLAGS.service_account}') + f.write(f' {decision_params_arg}\n ') + os.chmod('/tmp/sight_script/sight_decision_command.sh', 0o755) + subprocess.run(['cp', binary_path, '/tmp'], check=True) + + args = [ + 'docker', + 'run', + '-v', + f'/tmp/{binary_path.split("/")[-1]}:/project/{binary_path.split("/")[-1]}:ro', + '-v', + '/tmp/sight_script:/project/sight_script:ro', + '-v', + # f'{os.path.expanduser("~")}/.config/gcloud:/project/.config/gcloud:ro', + f'{FLAGS.gcloud_dir_path}:/project/.config/gcloud:ro', + '--env', + 'GOOGLE_APPLICATION_CREDENTIALS=/project/.config/gcloud/application_default_credentials.json', + # '--env', + # 'PYTHONPATH=/project', + '--env', + f'GOOGLE_CLOUD_PROJECT={_PROJECT_ID.value}', + '--env', + f'PARENT_LOG_ID={sight.id}', + '--env', + f'SIGHT_SERVICE_ID={service._SERVICE_ID}', + # '--env', + # f'SIGHT_SERVICE_ACCOUNT={_SERVICE_ACCOUNT.value}', + '--env', + f'worker_location={sight.location.get()}', + '--env', + f'num_samples={num_trials}', + '--net=host', + '-t', + '-i', + '--rm', + docker_image, + '/project/sight_script/sight_decision_command.sh', + # 'bash', + ] + logging.info('DOCKER CONTAINER SPAWNING =%s', ' '.join(args)) + subprocess.run(args, check=True) + + sight.exit_block('Worker Spawning', sight_pb2.Object()) + logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, + _file_name) def start_jobs( @@ -240,7 +236,7 @@ def start_jobs( worker_mode: str, sight: Any, ): - """Starts the dsub workers that will run the optimization. + """Starts the dsub workers that will run the optimization. Args: num_train_workers: Number of workers to use in a training run. @@ -253,104 +249,105 @@ def start_jobs( worker_mode: add sight: The Sight object to be used for logging. """ - method_name = 'start_jobs' - logging.debug('>>>>>>>>> In %s method of %s file.', method_name, _file_name) - - sight.enter_block('Worker Spawning', sight_pb2.Object()) - with open('/tmp/optimization_tasks.tsv', 'w') as outf: - outf.write('--env worker_id\t--env worker_location\n') - # num_tasks_per_worker = math.floor(num_trials / num_train_workers) - for worker_id in range(num_train_workers): - # tasks_for_cur_worker = num_tasks_per_worker - # # If _NUM_TRIALS is not evenly divisible by num_train_workers, add - # # the extra extra tasks to the first few workers. - # if worker_id < num_trials % num_train_workers: - # tasks_for_cur_worker += 1 - outf.write(f'{worker_id}\t{sight.location.get()}\n') - sight.location.get().next() - - - remote_script = ( - # 'gs://dsub_cameltrain/cameltrain/' + binary_path.split('/')[-1] - f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/binary/{str(sight.id)}/' + binary_path.split('/')[-1] - ) - print(f'Uploading {binary_path}...') - subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], check=True) - - if not FLAGS.service_account: - raise ValueError( - 'flag --service_account required for worker_mode as dsub_cloud_worker.' + method_name = 'start_jobs' + logging.debug('>>>>>>>>> In %s method of %s file.', method_name, + _file_name) + + sight.enter_block('Worker Spawning', sight_pb2.Object()) + with open('/tmp/optimization_tasks.tsv', 'w') as outf: + outf.write('--env worker_id\t--env worker_location\n') + # num_tasks_per_worker = math.floor(num_trials / num_train_workers) + for worker_id in range(num_train_workers): + # tasks_for_cur_worker = num_tasks_per_worker + # # If _NUM_TRIALS is not evenly divisible by num_train_workers, add + # # the extra extra tasks to the first few workers. + # if worker_id < num_trials % num_train_workers: + # tasks_for_cur_worker += 1 + outf.write(f'{worker_id}\t{sight.location.get()}\n') + sight.location.get().next() + + remote_script = ( + # 'gs://dsub_cameltrain/cameltrain/' + binary_path.split('/')[-1] + f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/binary/{str(sight.id)}/' + + binary_path.split('/')[-1]) + print(f'Uploading {binary_path}...') + subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], + check=True) + + if not FLAGS.service_account: + raise ValueError( + 'flag --service_account required for worker_mode as dsub_cloud_worker.' + ) + + # provider = 'local' if deployment_mode == 'local' else 'google-cls-v2' + + # cd /x-sight && + command = ( + 'ls -l && echo "${SCRIPT}" && echo "${PYTHONPATH}" && python3 "${SCRIPT}"' + + f' --decision_mode={decision_mode}' + + f' --deployment_mode={deployment_mode}' + + f' --worker_mode={worker_mode}' + f' --optimizer_type={optimizer_type}' + # + f' --project_id={os.environ["PROJECT_ID"]}' ) - - # provider = 'local' if deployment_mode == 'local' else 'google-cls-v2' - - # cd /x-sight && - command = ( - 'ls -l && echo "${SCRIPT}" && echo "${PYTHONPATH}" && python3 "${SCRIPT}"' - + f' --decision_mode={decision_mode}' - + f' --deployment_mode={deployment_mode}' - + f' --worker_mode={worker_mode}' - + f' --optimizer_type={optimizer_type}' - # + f' --project_id={os.environ["PROJECT_ID"]}' - ) - if FLAGS.env_name: - command += f' --env_name={FLAGS.env_name}' - - logging_path = f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/logs/{sight.params.label}/{append_ist_time_to_logging_path_12hr()}/' - if(FLAGS.parent_id): - logging_path += f'{FLAGS.parent_id}/' - logging_path += str(sight.id) - - env_vars = [ - '--env', f'PARENT_LOG_ID={sight.id}', - '--env', f'PORT={service.get_port_number()}' - ] - - print("FLAGS.deployment_mode : ", FLAGS.deployment_mode) - if FLAGS.deployment_mode == 'vm': - if FLAGS.ip_addr == 'localhost': - raise ValueError("ip_address must be provided for workers") - env_vars += ['--env', f'IP_ADDR={FLAGS.ip_addr}'] - elif FLAGS.deployment_mode == 'distributed': - env_vars += ['--env', f'SIGHT_SERVICE_ID={service._SERVICE_ID}'] - - - print('sight.id=%s' % sight.id) - args = [ - 'dsub', - '--provider=google-cls-v2', - f'--regions={_PROJECT_REGION.value}', - '--use-private-address', - # f'--location={_PROJECT_REGION.value}', - f'--image={docker_image}', - f'--machine-type={_DSUB_MACHINE_TYPE.value}', - f'--project={_PROJECT_ID.value}', - # f'--logging=gs://{os.environ["PROJECT_ID"]}-sight/d-sub/logs/{service._SERVICE_ID}/{sight.id}', - f'--logging={logging_path}', - # '--env', - # f'PARENT_LOG_ID={sight.id}', - # '--env', - # f'SIGHT_SERVICE_ID={service._SERVICE_ID}', - # '--env', - # f'PORT_NUMBER={service.get_port_number()}', - *env_vars, - '--input', - f'SCRIPT={remote_script}', - f'--command={command}', - f'--service-account={FLAGS.service_account}@{os.environ["PROJECT_ID"]}.iam.gserviceaccount.com', - f'--boot-disk-size={_DSUB_BOOT_DISK_SIZE.value}', - '--tasks', - '/tmp/optimization_tasks.tsv', - '--name', - _get_experiment_name(sight)[:63], - ] - - logging.info('CLI=%s', ' '.join(args)) - subprocess.run(args, check=True) - - sight.exit_block('Worker Spawning', sight_pb2.Object()) - logging.info('worker logs available at : %s', f'gs://{os.environ["PROJECT_ID"]}/d-sub/logs/default') - logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) + if FLAGS.env_name: + command += f' --env_name={FLAGS.env_name}' + + logging_path = f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/logs/{sight.params.label}/{append_ist_time_to_logging_path_12hr()}/' + if (FLAGS.parent_id): + logging_path += f'{FLAGS.parent_id}/' + logging_path += str(sight.id) + + env_vars = [ + '--env', f'PARENT_LOG_ID={sight.id}', '--env', + f'PORT={service.get_port_number()}' + ] + + print("FLAGS.deployment_mode : ", FLAGS.deployment_mode) + if FLAGS.deployment_mode == 'vm': + if FLAGS.ip_addr == 'localhost': + raise ValueError("ip_address must be provided for workers") + env_vars += ['--env', f'IP_ADDR={FLAGS.ip_addr}'] + elif FLAGS.deployment_mode == 'distributed': + env_vars += ['--env', f'SIGHT_SERVICE_ID={service._SERVICE_ID}'] + + print('sight.id=%s' % sight.id) + args = [ + 'dsub', + '--provider=google-cls-v2', + f'--regions={_PROJECT_REGION.value}', + '--use-private-address', + # f'--location={_PROJECT_REGION.value}', + f'--image={docker_image}', + f'--machine-type={_DSUB_MACHINE_TYPE.value}', + f'--project={_PROJECT_ID.value}', + # f'--logging=gs://{os.environ["PROJECT_ID"]}-sight/d-sub/logs/{service._SERVICE_ID}/{sight.id}', + f'--logging={logging_path}', + # '--env', + # f'PARENT_LOG_ID={sight.id}', + # '--env', + # f'SIGHT_SERVICE_ID={service._SERVICE_ID}', + # '--env', + # f'PORT_NUMBER={service.get_port_number()}', + *env_vars, + '--input', + f'SCRIPT={remote_script}', + f'--command={command}', + f'--service-account={FLAGS.service_account}@{os.environ["PROJECT_ID"]}.iam.gserviceaccount.com', + f'--boot-disk-size={_DSUB_BOOT_DISK_SIZE.value}', + '--tasks', + '/tmp/optimization_tasks.tsv', + '--name', + _get_experiment_name(sight)[:63], + ] + + logging.info('CLI=%s', ' '.join(args)) + subprocess.run(args, check=True) + + sight.exit_block('Worker Spawning', sight_pb2.Object()) + logging.info('worker logs available at : %s', + f'gs://{os.environ["PROJECT_ID"]}/d-sub/logs/default') + logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, + _file_name) def start_job_in_dsub_local( @@ -364,7 +361,7 @@ def start_job_in_dsub_local( worker_mode: str, sight: Any, ): - """Starts the dsub workers that will run the optimization. + """Starts the dsub workers that will run the optimization. Args: num_train_workers: Number of workers to use in a training run. @@ -377,70 +374,74 @@ def start_job_in_dsub_local( worker_mode: add sight: The Sight object to be used for logging. """ - method_name = 'start_job_in_dsub_local' - logging.debug('>>>>>>>>> In %s method of %s file.', method_name, _file_name) - - sight.enter_block('Worker Spawning locally', sight_pb2.Object()) - with open('/tmp/optimization_tasks.tsv', 'w') as outf: - outf.write('--env worker_id\t--env num_samples\t--env worker_location\n') - num_tasks_per_worker = math.floor(num_trials / num_train_workers) - for worker_id in range(num_train_workers): - tasks_for_cur_worker = num_tasks_per_worker - # If _NUM_TRIALS is not evenly divisible by num_train_workers, add - # the extra extra tasks to the first few workers. - if worker_id < num_trials % num_train_workers: - tasks_for_cur_worker += 1 - outf.write(f'{worker_id}\t{tasks_for_cur_worker}\t{sight.location.get()}\n') - sight.location.get().next() - - remote_script = ( - f'gs://{os.environ["PROJECT_ID"]}/sight/d-sub/binary/' + binary_path.split('/')[-1] - ) - print(f'Uploading {binary_path}...') - subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], check=True) - - # provider = 'google-cls-v2' if deployment_mode == 'distributed' else 'local' - - script_args = ( - f'--decision_mode={decision_mode} --deployment_mode={deployment_mode} --worker_mode={worker_mode} --optimizer_type={optimizer_type} ' - ) - if FLAGS.service_account: + method_name = 'start_job_in_dsub_local' + logging.debug('>>>>>>>>> In %s method of %s file.', method_name, + _file_name) + + sight.enter_block('Worker Spawning locally', sight_pb2.Object()) + with open('/tmp/optimization_tasks.tsv', 'w') as outf: + outf.write( + '--env worker_id\t--env num_samples\t--env worker_location\n') + num_tasks_per_worker = math.floor(num_trials / num_train_workers) + for worker_id in range(num_train_workers): + tasks_for_cur_worker = num_tasks_per_worker + # If _NUM_TRIALS is not evenly divisible by num_train_workers, add + # the extra extra tasks to the first few workers. + if worker_id < num_trials % num_train_workers: + tasks_for_cur_worker += 1 + outf.write( + f'{worker_id}\t{tasks_for_cur_worker}\t{sight.location.get()}\n' + ) + sight.location.get().next() + + remote_script = (f'gs://{os.environ["PROJECT_ID"]}/sight/d-sub/binary/' + + binary_path.split('/')[-1]) + print(f'Uploading {binary_path}...') + subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], + check=True) + + # provider = 'google-cls-v2' if deployment_mode == 'distributed' else 'local' + script_args = ( - script_args + f'--service_account={FLAGS.service_account}' + f'--decision_mode={decision_mode} --deployment_mode={deployment_mode} --worker_mode={worker_mode} --optimizer_type={optimizer_type} ' ) - - print('sight.id=%s' % sight.id) - args = [ - 'dsub', - '--provider=local', - f'--image={docker_image}', - f'--project={_PROJECT_ID.value}', - f'--logging=gs://{os.environ["PROJECT_ID"]}/d-sub/logs/local/{sight.id}', - '--env', - f'GOOGLE_CLOUD_PROJECT={os.environ["PROJECT_ID"]}', - '--env', - 'GOOGLE_APPLICATION_CREDENTIALS=/mnt/data/mount/file' - + f'{FLAGS.gcloud_dir_path}/application_default_credentials.json', - '--env', - f'PARENT_LOG_ID={sight.id}', - # '--env', - # 'PYTHONPATH=/project', - '--env', - f'SIGHT_SERVICE_ID={service._SERVICE_ID}', - '--input', - f'SCRIPT={remote_script}', - f'--command=cd /x-sight && python3 "${{SCRIPT}}" {script_args}', - # + f'--optimizer_type={optimizer_type}', - '--mount', - 'RESOURCES=file:/' + f'{FLAGS.gcloud_dir_path}', - # + f'{os.path.expanduser("~")}/.config/gcloud', - '--tasks', - '/tmp/optimization_tasks.tsv', - '--name', - _get_experiment_name(sight)[:63], - ] - logging.info('CLI=%s', ' '.join(args)) - subprocess.run(args, check=True) - - sight.exit_block('Worker Spawning', sight_pb2.Object()) - logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) + if FLAGS.service_account: + script_args = (script_args + + f'--service_account={FLAGS.service_account}') + + print('sight.id=%s' % sight.id) + args = [ + 'dsub', + '--provider=local', + f'--image={docker_image}', + f'--project={_PROJECT_ID.value}', + f'--logging=gs://{os.environ["PROJECT_ID"]}/d-sub/logs/local/{sight.id}', + '--env', + f'GOOGLE_CLOUD_PROJECT={os.environ["PROJECT_ID"]}', + '--env', + 'GOOGLE_APPLICATION_CREDENTIALS=/mnt/data/mount/file' + + f'{FLAGS.gcloud_dir_path}/application_default_credentials.json', + '--env', + f'PARENT_LOG_ID={sight.id}', + # '--env', + # 'PYTHONPATH=/project', + '--env', + f'SIGHT_SERVICE_ID={service._SERVICE_ID}', + '--input', + f'SCRIPT={remote_script}', + f'--command=cd /x-sight && python3 "${{SCRIPT}}" {script_args}', + # + f'--optimizer_type={optimizer_type}', + '--mount', + 'RESOURCES=file:/' + f'{FLAGS.gcloud_dir_path}', + # + f'{os.path.expanduser("~")}/.config/gcloud', + '--tasks', + '/tmp/optimization_tasks.tsv', + '--name', + _get_experiment_name(sight)[:63], + ] + logging.info('CLI=%s', ' '.join(args)) + subprocess.run(args, check=True) + + sight.exit_block('Worker Spawning', sight_pb2.Object()) + logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, + _file_name) diff --git a/py/sight/widgets/numpy_sight/numpy_sight.py b/py/sight/widgets/numpy_sight/numpy_sight.py index 275db95..76543fb 100644 --- a/py/sight/widgets/numpy_sight/numpy_sight.py +++ b/py/sight/widgets/numpy_sight/numpy_sight.py @@ -11,14 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Documentation of numpy events and data in the Sight log.""" import dataclasses import inspect from typing import Any, List, Optional, Union -from absl import logging +from helpers.logs.logs_handler import logger as logging import numpy as np from sight.proto import sight_pb2 @@ -27,16 +26,16 @@ @dataclasses.dataclass class LabeledNpArray: - """A variant on np.ndarrays where the dimensions are labeled.""" + """A variant on np.ndarrays where the dimensions are labeled.""" - array: np.ndarray + array: np.ndarray - # The labels of all the array dimensions. - dim_label: List[str] + # The labels of all the array dimensions. + dim_label: List[str] - # For each dimension of array contains the string labels of each slice - # in that dimension. - dim_axis_values: List[List[str]] + # For each dimension of array contains the string labels of each slice + # in that dimension. + dim_axis_values: List[List[str]] def log( @@ -45,7 +44,7 @@ def log( sight: Any, frame: Optional[Any] = None, ) -> Optional[Location]: - """Documents numpy object in the Sight log if Sight is being used. + """Documents numpy object in the Sight log if Sight is being used. Args: label: The label that identifies this object. @@ -57,88 +56,82 @@ def log( Returns: The location of this object within the log. """ - if sight is None: - return None - - if not sight.is_logging_enabled(): - return None + if sight is None: + return None + + if not sight.is_logging_enabled(): + return None + + obj = sight_pb2.Object() + + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + sight.set_object_code_loc(obj, frame) + + # obj_to_log is a scalar + if isinstance(obj_to_log, np.int64): + obj.sub_type = sight_pb2.Object.SubType.ST_VALUE + obj.value.sub_type = sight_pb2.Value.ST_INT64 + obj.value.int64_value = int(obj_to_log) + return sight.log_object(obj, True) + + if isinstance(obj_to_log, np.float64): + obj.sub_type = sight_pb2.Object.SubType.ST_VALUE + obj.value.sub_type = sight_pb2.Value.ST_DOUBLE + obj.value.double_value = int(obj_to_log) + return sight.log_object(obj, True) + + if isinstance(obj_to_log, bool): + obj.sub_type = sight_pb2.Object.SubType.ST_VALUE + obj.value.sub_type = sight_pb2.Value.ST_BOOL + obj.value.bool_value = int(obj_to_log) + return sight.log_object(obj, True) + + # obj_to_log is an array + if isinstance(obj_to_log, np.ndarray): + labeled_array = LabeledNpArray( + obj_to_log, + [f'dim{i}' for i in range(len(obj_to_log.shape))], + [[f'v{v}' for v in range(obj_to_log.shape[i])] + for i in range(len(obj_to_log.shape))], + ) + elif isinstance(obj_to_log, LabeledNpArray): + labeled_array = obj_to_log + else: + logging.error('Invalid type for array: %s', obj_to_log) + return None + + obj.sub_type = sight_pb2.Object.SubType.ST_TENSOR + obj.tensor.label = label + obj.tensor.shape.extend(labeled_array.array.shape) + # print('labeled_array=%s' % labeled_array) + # print('labeled_array.array.dtype=%s' % labeled_array.array.dtype) + if (labeled_array.array.dtype == float + or labeled_array.array.dtype == np.float32 + or labeled_array.array.dtype == np.float64): + obj.tensor.sub_type = sight_pb2.Tensor.ST_DOUBLE + obj.tensor.double_values.value.extend( + labeled_array.array.reshape(labeled_array.array.size).tolist()) + elif ( + # labeled_array.array.dtype == np.int + # or + labeled_array.array.dtype == np.int32 + or labeled_array.array.dtype == np.int64): + obj.tensor.sub_type = sight_pb2.Tensor.ST_INT64 + obj.tensor.int64_values.value.extend( + labeled_array.array.reshape(labeled_array.array.size).tolist()) + obj.tensor.dim_label.extend(labeled_array.dim_label) + for dav in labeled_array.dim_axis_values: + obj.tensor.dim_axis_values.append( + sight_pb2.Tensor.StringValues(value=dav)) - obj = sight_pb2.Object() - - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - sight.set_object_code_loc(obj, frame) - - # obj_to_log is a scalar - if isinstance(obj_to_log, np.int64): - obj.sub_type = sight_pb2.Object.SubType.ST_VALUE - obj.value.sub_type = sight_pb2.Value.ST_INT64 - obj.value.int64_value = int(obj_to_log) return sight.log_object(obj, True) - if isinstance(obj_to_log, np.float64): - obj.sub_type = sight_pb2.Object.SubType.ST_VALUE - obj.value.sub_type = sight_pb2.Value.ST_DOUBLE - obj.value.double_value = int(obj_to_log) - return sight.log_object(obj, True) - - if isinstance(obj_to_log, bool): - obj.sub_type = sight_pb2.Object.SubType.ST_VALUE - obj.value.sub_type = sight_pb2.Value.ST_BOOL - obj.value.bool_value = int(obj_to_log) - return sight.log_object(obj, True) - - # obj_to_log is an array - if isinstance(obj_to_log, np.ndarray): - labeled_array = LabeledNpArray( - obj_to_log, - [f'dim{i}' for i in range(len(obj_to_log.shape))], - [ - [f'v{v}' for v in range(obj_to_log.shape[i])] - for i in range(len(obj_to_log.shape)) - ], - ) - elif isinstance(obj_to_log, LabeledNpArray): - labeled_array = obj_to_log - else: - logging.error('Invalid type for array: %s', obj_to_log) - return None - - obj.sub_type = sight_pb2.Object.SubType.ST_TENSOR - obj.tensor.label = label - obj.tensor.shape.extend(labeled_array.array.shape) - # print('labeled_array=%s' % labeled_array) - # print('labeled_array.array.dtype=%s' % labeled_array.array.dtype) - if ( - labeled_array.array.dtype == float - or labeled_array.array.dtype == np.float32 - or labeled_array.array.dtype == np.float64 - ): - obj.tensor.sub_type = sight_pb2.Tensor.ST_DOUBLE - obj.tensor.double_values.value.extend( - labeled_array.array.reshape(labeled_array.array.size).tolist() - ) - elif ( - # labeled_array.array.dtype == np.int - # or - labeled_array.array.dtype == np.int32 - or labeled_array.array.dtype == np.int64 - ): - obj.tensor.sub_type = sight_pb2.Tensor.ST_INT64 - obj.tensor.int64_values.value.extend( - labeled_array.array.reshape(labeled_array.array.size).tolist() - ) - obj.tensor.dim_label.extend(labeled_array.dim_label) - for dav in labeled_array.dim_axis_values: - obj.tensor.dim_axis_values.append(sight_pb2.Tensor.StringValues(value=dav)) - - return sight.log_object(obj, True) - def from_log(sub_log: List[sight_pb2.Object]) -> Optional[np.ndarray]: - """Loads a numpy array from a log sub-sequence. + """Loads a numpy array from a log sub-sequence. Args: sub_log: The sub-sequence of log objects to load from. @@ -146,15 +139,17 @@ def from_log(sub_log: List[sight_pb2.Object]) -> Optional[np.ndarray]: Returns: The loaded numpy array. """ - obj = sub_log[0] + obj = sub_log[0] - if obj.sub_type != sight_pb2.Object.ST_TENSOR: - return None + if obj.sub_type != sight_pb2.Object.ST_TENSOR: + return None - # No case for int64 since it is treated as a Python int for now - if obj.tensor.sub_type == sight_pb2.Tensor.ST_DOUBLE: - return np.array(obj.tensor.double_values.value).reshape(obj.tensor.shape) - if obj.tensor.sub_type == sight_pb2.Tensor.ST_INT64: - return np.array(obj.tensor.int64_values.value).reshape(obj.tensor.shape) + # No case for int64 since it is treated as a Python int for now + if obj.tensor.sub_type == sight_pb2.Tensor.ST_DOUBLE: + return np.array(obj.tensor.double_values.value).reshape( + obj.tensor.shape) + if obj.tensor.sub_type == sight_pb2.Tensor.ST_INT64: + return np.array(obj.tensor.int64_values.value).reshape( + obj.tensor.shape) - return None + return None diff --git a/py/sight/widgets/pandas_sight/pandas_sight.py b/py/sight/widgets/pandas_sight/pandas_sight.py index 30f6da2..e9c4121 100644 --- a/py/sight/widgets/pandas_sight/pandas_sight.py +++ b/py/sight/widgets/pandas_sight/pandas_sight.py @@ -11,46 +11,48 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Documentation of numpy events and data in the Sight log.""" import dataclasses import inspect from typing import Any, List, Optional, Union -from absl import logging +from helpers.logs.logs_handler import logger as logging import numpy as np import pandas as pd from sight.proto import sight_pb2 from sight.location import Location + def _df_start( - label: str, - sight: Any, - frame: Any, + label: str, + sight: Any, + frame: Any, ) -> None: - start_obj = sight_pb2.Object() - start_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START - start_obj.block_start.sub_type = sight_pb2.BlockStart.ST_LIST - start_obj.block_start.list.sub_type = sight_pb2.ListStart.ST_HETEROGENEOUS - sight.enter_block(label, start_obj, frame) + start_obj = sight_pb2.Object() + start_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START + start_obj.block_start.sub_type = sight_pb2.BlockStart.ST_LIST + start_obj.block_start.list.sub_type = sight_pb2.ListStart.ST_HETEROGENEOUS + sight.enter_block(label, start_obj, frame) + def _df_end( - label: str, - sight: Any, - frame: Any, + label: str, + sight: Any, + frame: Any, ) -> None: - end_obj = sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_BLOCK_END) - sight.exit_block(label, end_obj) + end_obj = sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_BLOCK_END) + sight.exit_block(label, end_obj) + def log( - label: str, - df: pd.DataFrame, - sight: Any, - frame: Optional[Any] = None, + label: str, + df: pd.DataFrame, + sight: Any, + frame: Optional[Any] = None, ) -> Optional[Location]: - """Documents pandas DataFrame object in the Sight log if Sight is being used. + """Documents pandas DataFrame object in the Sight log if Sight is being used. Args: label: The label that identifies this object. obj_to_log: The pandas frame to be logged. @@ -60,79 +62,76 @@ def log( Returns: The location of this object within the log. """ - if sight is None: - return None - - if not sight.is_logging_enabled(): - return None - - - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - - _df_start(label, sight, frame) - - for i in range(df.shape[1]): - nv_start_obj = sight_pb2.Object() - nv_start_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START - nv_start_obj.block_start.sub_type = sight_pb2.BlockStart.ST_NAMED_VALUE - sight.enter_block(str(df.columns[i]), nv_start_obj, frame) - - obj = sight_pb2.Object() - sight.set_object_code_loc(obj, frame) - - obj.sub_type = sight_pb2.Object.SubType.ST_TENSOR - obj.tensor.label = str(df.columns[i]) - obj.tensor.shape.append(df.shape[0]) - if ( - df.dtypes[df.columns[i]] == float - or df.dtypes[df.columns[i]] == np.float32 - or df.dtypes[df.columns[i]] == np.float64 - ): - obj.tensor.sub_type = sight_pb2.Tensor.ST_DOUBLE - obj.tensor.double_values.value.extend(df[df.columns[i]].tolist()) - elif ( - # df.dtypes[df.columns[i]] == np.int - # or - df.dtypes[df.columns[i]] == np.int32 - or df.dtypes[df.columns[i]] == np.int64 - ): - obj.tensor.sub_type = sight_pb2.Tensor.ST_INT64 - obj.tensor.int64_values.value.extend(df[df.columns[i]].tolist()) - else: - obj.tensor.sub_type = sight_pb2.Tensor.ST_STRING - obj.tensor.string_values.value.extend( - [str(v) for v in df[df.columns[i]].tolist()]) - obj.tensor.dim_label.append(str(df.columns[i])) - - sight.log_object(obj, True) - - nv_end_obj = sight_pb2.Object() - nv_end_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_END - sight.exit_block(label, nv_end_obj) - - _df_end(label, sight, frame) - + if sight is None: + return None + + if not sight.is_logging_enabled(): + return None + + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + + _df_start(label, sight, frame) + + for i in range(df.shape[1]): + nv_start_obj = sight_pb2.Object() + nv_start_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START + nv_start_obj.block_start.sub_type = sight_pb2.BlockStart.ST_NAMED_VALUE + sight.enter_block(str(df.columns[i]), nv_start_obj, frame) + + obj = sight_pb2.Object() + sight.set_object_code_loc(obj, frame) + + obj.sub_type = sight_pb2.Object.SubType.ST_TENSOR + obj.tensor.label = str(df.columns[i]) + obj.tensor.shape.append(df.shape[0]) + if (df.dtypes[df.columns[i]] == float + or df.dtypes[df.columns[i]] == np.float32 + or df.dtypes[df.columns[i]] == np.float64): + obj.tensor.sub_type = sight_pb2.Tensor.ST_DOUBLE + obj.tensor.double_values.value.extend(df[df.columns[i]].tolist()) + elif ( + # df.dtypes[df.columns[i]] == np.int + # or + df.dtypes[df.columns[i]] == np.int32 + or df.dtypes[df.columns[i]] == np.int64): + obj.tensor.sub_type = sight_pb2.Tensor.ST_INT64 + obj.tensor.int64_values.value.extend(df[df.columns[i]].tolist()) + else: + obj.tensor.sub_type = sight_pb2.Tensor.ST_STRING + obj.tensor.string_values.value.extend( + [str(v) for v in df[df.columns[i]].tolist()]) + obj.tensor.dim_label.append(str(df.columns[i])) + + sight.log_object(obj, True) + + nv_end_obj = sight_pb2.Object() + nv_end_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_END + sight.exit_block(label, nv_end_obj) + + _df_end(label, sight, frame) def from_log(sub_log: List[sight_pb2.Object]) -> Optional[np.ndarray]: - """Loads a numpy array from a log sub-sequence. + """Loads a numpy array from a log sub-sequence. Args: sub_log: The sub-sequence of log objects to load from. Returns: The loaded numpy array. """ - obj = sub_log[0] + obj = sub_log[0] - if obj.sub_type != sight_pb2.Object.ST_TENSOR: - return None + if obj.sub_type != sight_pb2.Object.ST_TENSOR: + return None - # No case for int64 since it is treated as a Python int for now - if obj.tensor.sub_type == sight_pb2.Tensor.ST_DOUBLE: - return np.array(obj.tensor.double_values.value).reshape(obj.tensor.shape) - if obj.tensor.sub_type == sight_pb2.Tensor.ST_INT64: - return np.array(obj.tensor.int64_values.value).reshape(obj.tensor.shape) + # No case for int64 since it is treated as a Python int for now + if obj.tensor.sub_type == sight_pb2.Tensor.ST_DOUBLE: + return np.array(obj.tensor.double_values.value).reshape( + obj.tensor.shape) + if obj.tensor.sub_type == sight_pb2.Tensor.ST_INT64: + return np.array(obj.tensor.int64_values.value).reshape( + obj.tensor.shape) - return None + return None diff --git a/py/sight/widgets/simulation/analysis_utils.py b/py/sight/widgets/simulation/analysis_utils.py index 919ce9c..c872323 100644 --- a/py/sight/widgets/simulation/analysis_utils.py +++ b/py/sight/widgets/simulation/analysis_utils.py @@ -11,13 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Utilities for analyzing Sight logs that document simulation runs.""" from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple -from absl import logging -import apache_beam as beam +from helpers.logs.logs_handler import logger as logging +from helpers.logs.logs_handler import logger as logging import numpy as np from sight import data_structures @@ -29,27 +28,23 @@ KeyedObjMap = Tuple[str, Dict[str, Any]] -def single_objects_filter( - obj: sight_pb2.Object, sub_type: sight_pb2.Object.SubType -) -> bool: - return obj.sub_type == sub_type +def single_objects_filter(obj: sight_pb2.Object, + sub_type: sight_pb2.Object.SubType) -> bool: + return obj.sub_type == sub_type -def start_objects_filter( - obj: sight_pb2.Object, block_sub_type: sight_pb2.BlockStart.SubType -) -> bool: - return ( - obj.sub_type == sight_pb2.Object.ST_BLOCK_START - and obj.block_start.sub_type == block_sub_type - ) +def start_objects_filter(obj: sight_pb2.Object, + block_sub_type: sight_pb2.BlockStart.SubType) -> bool: + return (obj.sub_type == sight_pb2.Object.ST_BLOCK_START + and obj.block_start.sub_type == block_sub_type) def log_uid(obj: sight_pb2.Object) -> str: - for a in obj.attribute: - if a.key != 'log_uid': - continue - return a.value - return '' + for a in obj.attribute: + if a.key != 'log_uid': + continue + return a.value + return '' def single_objects_key_parent( @@ -57,18 +52,15 @@ def single_objects_key_parent( sub_type: sight_pb2.Object.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - object_col - | 'single_objects_key_parent Filter ' + label - >> beam.Filter(lambda obj: single_objects_filter(obj, sub_type)) - | 'single_objects_key_parent Map ' + label - >> beam.Map( - lambda x: ( - f'{x.ancestor_start_location[-2]} - {log_uid(x)}', - {label: x}, - ) - ) - ) + return (object_col + | 'single_objects_key_parent Filter ' + label >> + beam.Filter(lambda obj: single_objects_filter(obj, sub_type)) + | 'single_objects_key_parent Map ' + label >> beam.Map(lambda x: ( + f'{x.ancestor_start_location[-2]} - {log_uid(x)}', + { + label: x + }, + ))) def single_objects_key_log_uid( @@ -76,13 +68,13 @@ def single_objects_key_log_uid( sub_type: sight_pb2.Object.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - object_col - | 'single_objects_key_log_uid Filter ' + label - >> beam.Filter(lambda obj: single_objects_filter(obj, sub_type)) - | 'single_objects_key_log_uid Map ' + label - >> beam.Map(lambda x: (log_uid(x), {label: x})) - ) + return (object_col + | 'single_objects_key_log_uid Filter ' + label >> + beam.Filter(lambda obj: single_objects_filter(obj, sub_type)) + | 'single_objects_key_log_uid Map ' + label >> + beam.Map(lambda x: (log_uid(x), { + label: x + }))) def block_start_objects( @@ -90,12 +82,12 @@ def block_start_objects( block_sub_type: sight_pb2.BlockStart.SubType, label: str, ) -> beam.pvalue.PCollection[ObjMap]: - return ( - object_col - | 'objects Filter ' + label - >> beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) - | 'objects Map ' + label >> beam.Map(lambda x: ({label: x})) - ) + return (object_col + | 'objects Filter ' + label >> + beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) + | 'objects Map ' + label >> beam.Map(lambda x: ({ + label: x + }))) def block_start_objects_key_self( @@ -103,13 +95,13 @@ def block_start_objects_key_self( block_sub_type: sight_pb2.BlockStart.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - object_col - | 'objects_key_self Filter ' + label - >> beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) - | 'objects_key_self Map ' + label - >> beam.Map(lambda x: (f'{x.location} - {log_uid(x)}', {label: x})) - ) + return (object_col + | 'objects_key_self Filter ' + label >> + beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) + | 'objects_key_self Map ' + label >> + beam.Map(lambda x: (f'{x.location} - {log_uid(x)}', { + label: x + }))) def block_start_objects_key_parent( @@ -117,18 +109,15 @@ def block_start_objects_key_parent( block_sub_type: sight_pb2.BlockStart.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - object_col - | 'objects_key_parent Filter ' + label - >> beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) - | 'objects_key_parent Map ' + label - >> beam.Map( - lambda x: ( - f'{x.ancestor_start_location[-2]} - {log_uid(x)}', - {label: x}, - ) - ) - ) + return (object_col + | 'objects_key_parent Filter ' + label >> + beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) + | 'objects_key_parent Map ' + label >> beam.Map(lambda x: ( + f'{x.ancestor_start_location[-2]} - {log_uid(x)}', + { + label: x + }, + ))) def block_start_objects_key_log_uid( @@ -136,131 +125,108 @@ def block_start_objects_key_log_uid( block_sub_type: sight_pb2.BlockStart.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - object_col - | 'objects_key_log_uid Filter ' + label - >> beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) - | 'objects_key_log_uid Map ' + label - >> beam.Map(lambda x: (log_uid(x), {label: x})) - ) + return ( + object_col + | 'objects_key_log_uid Filter ' + label >> + beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) + | + 'objects_key_log_uid Map ' + label >> beam.Map(lambda x: (log_uid(x), { + label: x + }))) def create_constant_key( pcol_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return pcol | pcol_label + ' create_constant_key' >> beam.Map( - lambda x: ('', x) - ) + return pcol | pcol_label + ' create_constant_key' >> beam.Map(lambda x: + ('', x)) def create_log_uid_key( pcol_label: str, new_key_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - pcol - | pcol_label + ' ' + new_key_label + ' create_log_uid_key' - >> beam.Map(lambda x: (log_uid(x[new_key_label]), x)) - ) + return (pcol + | pcol_label + ' ' + new_key_label + ' create_log_uid_key' >> + beam.Map(lambda x: (log_uid(x[new_key_label]), x))) def create_loc_log_uid_key( pcol_label: str, new_key_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - pcol - | pcol_label + ' ' + new_key_label + ' create_loc_log_uid_key' - >> beam.Map( - lambda x: ( - f'{x[new_key_label].location} - {log_uid(x[new_key_label])}', - x, - ) - ) - ) + return (pcol + | pcol_label + ' ' + new_key_label + ' create_loc_log_uid_key' >> + beam.Map(lambda x: ( + f'{x[new_key_label].location} - {log_uid(x[new_key_label])}', + x, + ))) def create_named_value_label_log_uid_key( pcol_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - pcol - | pcol_label + ' create_named_value_label_log_uid_key' - >> beam.Map( - lambda x: ( - ( - f'{x["named_value"].block_start.label} -' - f' {log_uid(x["named_value"])}' - ), - x, - ) - ) - ) + return (pcol + | pcol_label + ' create_named_value_label_log_uid_key' >> + beam.Map(lambda x: ( + (f'{x["named_value"].block_start.label} -' + f' {log_uid(x["named_value"])}'), + x, + ))) def create_var_key( pcol_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return pcol | pcol_label + ' create_var_key' >> beam.Map( - lambda x: (x['variable'], x) - ) + return pcol | pcol_label + ' create_var_key' >> beam.Map( + lambda x: (x['variable'], x)) def create_sim_ts_index_key( pcol_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return pcol | pcol_label + ' create_sim_ts_index_key' >> beam.Map( - lambda x: ( - ( - f'{log_uid(x["simulation"])}-{x["simulation"].location} {x["simulation_time_step"].block_start.simulation_time_step_start.time_step_index[0]}' - ), - x, - ) - ) + return pcol | pcol_label + ' create_sim_ts_index_key' >> beam.Map(lambda x: ( + (f'{log_uid(x["simulation"])}-{x["simulation"].location} {x["simulation_time_step"].block_start.simulation_time_step_start.time_step_index[0]}' + ), + x, + )) def adjust_sim_ts_to_next_index_key( pcol_label: str, pcol: beam.pvalue.PCollection[Dict[str, sight_pb2.Object]] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return pcol | pcol_label + ' adjust_sim_ts_to_next_index_key' >> beam.Map( - lambda x: (f'{x[0].split()[0]} {int(x[0].split()[1]) + 1}', x[1]) - ) + return pcol | pcol_label + ' adjust_sim_ts_to_next_index_key' >> beam.Map( + lambda x: (f'{x[0].split()[0]} {int(x[0].split()[1]) + 1}', x[1])) def remove_key( pcol_label: str, pcol: beam.pvalue.PCollection[KeyedObjMap] ) -> beam.pvalue.PCollection[ObjMap]: - return pcol | pcol_label + ' remove_key' >> beam.Map(lambda x: x[1]) + return pcol | pcol_label + ' remove_key' >> beam.Map(lambda x: x[1]) def change_key_to_self( pcol_label: str, obj_label: str, pcol: beam.pvalue.PCollection[KeyedObjMap] ) -> beam.pvalue.PCollection[ObjMap]: - return pcol | pcol_label + ' change_key_to_self' >> beam.Map( - lambda x: ( - f'{x[1][obj_label].location} - {log_uid(x[1][obj_label])}', - x[1], - ) - ) + return pcol | pcol_label + ' change_key_to_self' >> beam.Map(lambda x: ( + f'{x[1][obj_label].location} - {log_uid(x[1][obj_label])}', + x[1], + )) def change_key_to_parent( pcol_label: str, obj_label: str, pcol: beam.pvalue.PCollection[KeyedObjMap] ) -> beam.pvalue.PCollection[ObjMap]: - return pcol | pcol_label + ' change_key_to_parent' >> beam.Map( - lambda x: ( - ( - f'{x[1][obj_label].ancestor_start_location[-2]} -' - f' {log_uid(x[1][obj_label])}' - ), - x[1], - ) - ) + return pcol | pcol_label + ' change_key_to_parent' >> beam.Map(lambda x: ( + (f'{x[1][obj_label].ancestor_start_location[-2]} -' + f' {log_uid(x[1][obj_label])}'), + x[1], + )) class ExtractAncestorBlockStartLocations(beam.DoFn): - """Beam stage that extracts each object's ancestor context locations.""" + """Beam stage that extracts each object's ancestor context locations.""" - def process(self, obj: sight_pb2.Object) -> Iterator[KeyedObjMap]: - """Extracts each object's ancestor context locations. + def process(self, obj: sight_pb2.Object) -> Iterator[KeyedObjMap]: + """Extracts each object's ancestor context locations. Includes the starting point of the block's containing blocks and of the object ends a block, the starting point of that block. @@ -272,24 +238,28 @@ def process(self, obj: sight_pb2.Object) -> Iterator[KeyedObjMap]: Pairs with the starting point of each object's ancestral context block and the object itself. """ - for ancestor_start_location in obj.ancestor_start_location: - yield (f'{ancestor_start_location} - {log_uid(obj)}', {'object': obj}) + for ancestor_start_location in obj.ancestor_start_location: + yield (f'{ancestor_start_location} - {log_uid(obj)}', { + 'object': obj + }) - if obj.sub_type == sight_pb2.Object.ST_BLOCK_END: - yield ( - f'{obj.block_end.location_of_block_start} - {log_uid(obj)}', - {'object': obj}, - ) + if obj.sub_type == sight_pb2.Object.ST_BLOCK_END: + yield ( + f'{obj.block_end.location_of_block_start} - {log_uid(obj)}', + { + 'object': obj + }, + ) class AddAncestorKeysToObjs(beam.DoFn): - """Beam stage that extracts each object's ancestor context locations.""" + """Beam stage that extracts each object's ancestor context locations.""" - def __init__(self, anchor_obj_label: str): - self.anchor_obj_label = anchor_obj_label + def __init__(self, anchor_obj_label: str): + self.anchor_obj_label = anchor_obj_label - def process(self, task: ObjMap) -> Iterator[KeyedObjMap]: - """Attaches the ancestor locations of each object under .anchor_obj_label. + def process(self, task: ObjMap) -> Iterator[KeyedObjMap]: + """Attaches the ancestor locations of each object under .anchor_obj_label. Includes the starting point of the block's containing blocks and of the object ends a block, the starting point of that block. @@ -301,39 +271,40 @@ def process(self, task: ObjMap) -> Iterator[KeyedObjMap]: Pairs with the starting point of each object's ancestral context block and the map itself. """ - obj = task[self.anchor_obj_label] - for ancestor_start_location in obj.ancestor_start_location: - yield (f'{ancestor_start_location} - {log_uid(obj)}', task) + obj = task[self.anchor_obj_label] + for ancestor_start_location in obj.ancestor_start_location: + yield (f'{ancestor_start_location} - {log_uid(obj)}', task) - if obj.sub_type == sight_pb2.Object.ST_BLOCK_END: - yield (f'{obj.block_end.location_of_block_start} - {log_uid(obj)}', task) + if obj.sub_type == sight_pb2.Object.ST_BLOCK_END: + yield (f'{obj.block_end.location_of_block_start} - {log_uid(obj)}', + task) def objs_with_ancestor_keys( - objects_map: KeyedObjMap, anchor_obj_label: str -) -> beam.pvalue.PCollection[KeyedObjMap]: - return remove_key( - 'objs_with_ancestor_keys ' + anchor_obj_label, objects_map - ) | 'objs_with_ancestor_keys ' + anchor_obj_label >> beam.ParDo( - AddAncestorKeysToObjs(anchor_obj_label) - ) + objects_map: KeyedObjMap, + anchor_obj_label: str) -> beam.pvalue.PCollection[KeyedObjMap]: + return remove_key( + 'objs_with_ancestor_keys ' + anchor_obj_label, objects_map + ) | 'objs_with_ancestor_keys ' + anchor_obj_label >> beam.ParDo( + AddAncestorKeysToObjs(anchor_obj_label)) class CombineRecords(beam.DoFn): - """Combines CoGroupByKey-joined dicts from two sources.""" - - def __init__( - self, - source1_label: str, - source2_label: str, - ): - self.source1_label = source1_label - self.source2_label = source2_label - - def process( - self, task: Tuple[Any, Dict[str, Sequence[Dict[str, sight_pb2.Object]]]] - ) -> Iterator[ObjMap]: - """Combines CoGroupByKey-joined dicts from two sources. + """Combines CoGroupByKey-joined dicts from two sources.""" + + def __init__( + self, + source1_label: str, + source2_label: str, + ): + self.source1_label = source1_label + self.source2_label = source2_label + + def process( + self, task: Tuple[Any, Dict[str, Sequence[Dict[str, + sight_pb2.Object]]]] + ) -> Iterator[ObjMap]: + """Combines CoGroupByKey-joined dicts from two sources. Args: task: Length <=1 sequences of dicts from two sources, indexed at labels @@ -345,36 +316,36 @@ def process( If the length of a given source is 0, its key-value pairs are not included in the output dict. """ - x: Dict[str, Sequence[ObjMap]] = task[1] - source1: Sequence[ObjMap] = x[self.source1_label] - if len(source1) > 1: - logging.error( - 'Source 1 (%s) has %d entries, which is >1.', - self.source1_label, - len(source1), - ) - return - source2: List[ObjMap] = list(task[1][self.source2_label]) - if len(source2) > 1: - logging.error( - 'Source 2 (%s) has %d entries, which is >1.', - self.source2_label, - len(source2), - ) - return - - result: ObjMap = {} - if source1: - for key, val in source1[0].items(): - result[key] = val - if source2: - for key, val in source2[0].items(): - result[key] = val - yield result + x: Dict[str, Sequence[ObjMap]] = task[1] + source1: Sequence[ObjMap] = x[self.source1_label] + if len(source1) > 1: + logging.error( + 'Source 1 (%s) has %d entries, which is >1.', + self.source1_label, + len(source1), + ) + return + source2: List[ObjMap] = list(task[1][self.source2_label]) + if len(source2) > 1: + logging.error( + 'Source 2 (%s) has %d entries, which is >1.', + self.source2_label, + len(source2), + ) + return + + result: ObjMap = {} + if source1: + for key, val in source1[0].items(): + result[key] = val + if source2: + for key, val in source2[0].items(): + result[key] = val + yield result class ParentChildPairs(beam.DoFn): - """Given a parent and a list of children, emits parent-child pairs. + """Given a parent and a list of children, emits parent-child pairs. The key of these pairs is the location of the child object. @@ -385,20 +356,21 @@ class ParentChildPairs(beam.DoFn): the location of the parent or the child object. """ - def __init__( - self, - ancestors: str, - child: str, - index_by_parent: bool, - ): - self.ancestors = ancestors - self.child = child - self.index_by_parent = index_by_parent - - def process( - self, task: Tuple[str, Dict[str, Sequence[Dict[str, sight_pb2.Object]]]] - ) -> Iterator[KeyedObjMap]: - """Combines objects and their ancestors. + def __init__( + self, + ancestors: str, + child: str, + index_by_parent: bool, + ): + self.ancestors = ancestors + self.child = child + self.index_by_parent = index_by_parent + + def process( + self, task: Tuple[str, Dict[str, Sequence[Dict[str, + sight_pb2.Object]]]] + ) -> Iterator[KeyedObjMap]: + """Combines objects and their ancestors. Args: task: A pair of a key and - a sequence of ancestor log objects (assumed to @@ -410,58 +382,55 @@ def process( second is a dictionary that contains all the ancestors and the child object. """ - ancestors_objs = task[1][self.ancestors] - child_objs = task[1][self.child] - - # Skip named values that are not directly contained by ancestors. - if not ancestors_objs: - return - - if len(ancestors_objs) != 1: - logging.error( - ( - 'Child objects cannot be contained within multiple ancestors!.' - ' task=%s' - ), - task, - ) - return - - for child_obj in child_objs: - cur = ancestors_objs[0].copy() - for key in child_obj: - if key not in cur: - cur[key] = child_obj[key] - if self.index_by_parent: - location_idx = task[0] - else: - location_idx = ( - f'{child_obj[self.child].location} -' - f' {log_uid(child_obj[self.child])}' - ) - yield (location_idx, cur) + ancestors_objs = task[1][self.ancestors] + child_objs = task[1][self.child] + + # Skip named values that are not directly contained by ancestors. + if not ancestors_objs: + return + + if len(ancestors_objs) != 1: + logging.error( + ('Child objects cannot be contained within multiple ancestors!.' + ' task=%s'), + task, + ) + return + + for child_obj in child_objs: + cur = ancestors_objs[0].copy() + for key in child_obj: + if key not in cur: + cur[key] = child_obj[key] + if self.index_by_parent: + location_idx = task[0] + else: + location_idx = (f'{child_obj[self.child].location} -' + f' {log_uid(child_obj[self.child])}') + yield (location_idx, cur) class SimulationStateNamedValuesToObjects(beam.DoFn): - """Converts named value sub-logs within simulation containers into objects. + """Converts named value sub-logs within simulation containers into objects. Attributes: ancestors: Key of the ancestors object within the task dicts. value_objects: Key of the value_objects within the task dicts. """ - def __init__( - self, - ancestors: str, - value_objects: str, - ): - self.ancestors = ancestors - self.value_objects = value_objects + def __init__( + self, + ancestors: str, + value_objects: str, + ): + self.ancestors = ancestors + self.value_objects = value_objects - def process( - self, task: Tuple[str, Dict[str, Sequence[Dict[str, sight_pb2.Object]]]] - ) -> Iterator[KeyedObjMap]: - """Converts named value sub-logs within simulation containers into values. + def process( + self, task: Tuple[str, Dict[str, Sequence[Dict[str, + sight_pb2.Object]]]] + ) -> Iterator[KeyedObjMap]: + """Converts named value sub-logs within simulation containers into values. Args: task: A simulation container and the start of a named object, paired with @@ -471,44 +440,38 @@ def process( Tuples where the first element is the location of the container object and the second maps the container and the value object. """ - # Skip named values that are not directly contained by a simulation - # block (parameters or state). - if not task[1][self.ancestors]: - return - - if len(task[1][self.ancestors]) != 1: - logging.error( - ( - 'Named values sub-logs cannot be contained within multiple named' - ' values or containers!. task=%s' - ), - task, - ) - return - - if isinstance(task[1][self.ancestors][0], dict): - log_and_obj: ObjMap = task[1][self.ancestors][0].copy() - else: - log_and_obj: ObjMap = {} - log_and_obj['object'] = data_structures.from_log( - [o['object'] for o in task[1][self.value_objects]] - ) - yield ( - ( - f'{log_and_obj["named_value"].location} -' - f' {log_uid(log_and_obj["named_value"])}' - ), - log_and_obj, - ) + # Skip named values that are not directly contained by a simulation + # block (parameters or state). + if not task[1][self.ancestors]: + return + + if len(task[1][self.ancestors]) != 1: + logging.error( + ('Named values sub-logs cannot be contained within multiple named' + ' values or containers!. task=%s'), + task, + ) + return + + if isinstance(task[1][self.ancestors][0], dict): + log_and_obj: ObjMap = task[1][self.ancestors][0].copy() + else: + log_and_obj: ObjMap = {} + log_and_obj['object'] = data_structures.from_log( + [o['object'] for o in task[1][self.value_objects]]) + yield ( + (f'{log_and_obj["named_value"].location} -' + f' {log_uid(log_and_obj["named_value"])}'), + log_and_obj, + ) class NamedObjectsToSequence(beam.DoFn): - """Converts sets of named value objects to time-ordered sequences.""" + """Converts sets of named value objects to time-ordered sequences.""" - def process( - self, task: Tuple[Any, Iterable[AnyObjMap]] - ) -> Iterator[AnyObjMap]: - """Time-orders the sequence of objects for a given simulation attribute. + def process(self, task: Tuple[Any, + Iterable[AnyObjMap]]) -> Iterator[AnyObjMap]: + """Time-orders the sequence of objects for a given simulation attribute. Args: task: A sequence of objects that describe the state of some simulation @@ -517,55 +480,45 @@ def process( Yields: A time-ordered version of the input sequence. """ - ordered_seq = sorted( - task[1], - key=lambda x: list( - x[ - 'simulation_time_step' - ].block_start.simulation_time_step_start.time_step_index - ), - ) - ts_indexes = np.array( - [ - x[ - 'simulation_time_step' - ].block_start.simulation_time_step_start.time_step_index - for x in ordered_seq - ], - ) - time_steps = np.array( - [ - x[ - 'simulation_time_step' - ].block_start.simulation_time_step_start.time_step - for x in ordered_seq - ], - ) - values = np.array([x['object'][1] for x in ordered_seq]) - - yield { - 'simulation': ordered_seq[0]['simulation'], - 'cluster_id': ordered_seq[0].get('cluster_id'), - 'variable': ordered_seq[0]['named_value'].block_start.label, - 'values': values, - 'ts_indexes': ts_indexes, - 'time_steps': time_steps, - } + ordered_seq = sorted( + task[1], + key=lambda x: list(x['simulation_time_step'].block_start. + simulation_time_step_start.time_step_index), + ) + ts_indexes = np.array([ + x['simulation_time_step'].block_start.simulation_time_step_start. + time_step_index for x in ordered_seq + ], ) + time_steps = np.array([ + x['simulation_time_step'].block_start.simulation_time_step_start. + time_step for x in ordered_seq + ], ) + values = np.array([x['object'][1] for x in ordered_seq]) + + yield { + 'simulation': ordered_seq[0]['simulation'], + 'cluster_id': ordered_seq[0].get('cluster_id'), + 'variable': ordered_seq[0]['named_value'].block_start.label, + 'values': values, + 'ts_indexes': ts_indexes, + 'time_steps': time_steps, + } class CombineParametersAndTimeSeries(beam.DoFn): - """Combines the parameters and variable state time series of a simulation.""" + """Combines the parameters and variable state time series of a simulation.""" - def __init__( - self, - params_label: str, - variables_label: str, - ): - self.params_label = params_label - self.variables_label = variables_label + def __init__( + self, + params_label: str, + variables_label: str, + ): + self.params_label = params_label + self.variables_label = variables_label - def process(self, task: Tuple[Any, Dict[str, List[ObjMap]]]) -> Iterator[Log]: - """Combines the parameters and variable state time series of a simulation. + def process(self, task: Tuple[Any, Dict[str, + List[ObjMap]]]) -> Iterator[Log]: + """Combines the parameters and variable state time series of a simulation. Args: task: A sequence of objects that describe the state of some simulation @@ -574,15 +527,15 @@ def process(self, task: Tuple[Any, Dict[str, List[ObjMap]]]) -> Iterator[Log]: Yields: A time-ordered version of the input sequence. """ - parameters = list(task[1][self.params_label]) - variables = list(task[1][self.variables_label]) + parameters = list(task[1][self.params_label]) + variables = list(task[1][self.variables_label]) - all_parameters = [p['object'] for p in parameters] + all_parameters = [p['object'] for p in parameters] - for v in variables: - res = v.copy() - res['parameters'] = all_parameters - yield res + for v in variables: + res = v.copy() + res['parameters'] = all_parameters + yield res def combine_parent_and_child( @@ -592,7 +545,7 @@ def combine_parent_and_child( child_pcol: beam.pvalue.PCollection[KeyedObjMap], index_by_parent: bool, ) -> beam.pvalue.PCollection[KeyedObjMap]: - """Joins a parent Objects to child Objects. + """Joins a parent Objects to child Objects. Args: parent_label: identifies the parent PCollection. @@ -609,18 +562,15 @@ def combine_parent_and_child( (the parent Object's location and log_uid). This collection is keyed by the location and log_uid of the child Object. """ - return ( - { - parent_label: parent_pcol, - child_label: child_pcol, - } - | parent_label + ' ' + child_label + ' CoGroupByKey' - >> beam.CoGroupByKey() - | parent_label + ' ' + child_label + ' ParentChildPairs' - >> beam.ParDo( - ParentChildPairs(parent_label, child_label, index_by_parent) - ) - ) + return ( + { + parent_label: parent_pcol, + child_label: child_pcol, + } + | parent_label + ' ' + child_label + ' CoGroupByKey' >> + beam.CoGroupByKey() + | parent_label + ' ' + child_label + ' ParentChildPairs' >> beam.ParDo( + ParentChildPairs(parent_label, child_label, index_by_parent))) def named_values_to_objects( @@ -629,7 +579,7 @@ def named_values_to_objects( child_label: str, objects_with_ancestors: beam.pvalue.PCollection[KeyedObjMap], ) -> beam.pvalue.PCollection[KeyedObjMap]: - """Converts named value log regions into their corresponding Python objects. + """Converts named value log regions into their corresponding Python objects. Args: parent_label: Unique label (among pipeline stages) for the collection of @@ -644,21 +594,16 @@ def named_values_to_objects( Maps that contain the ST_NAMED_VALUES and their corresponding Python value objects and with the key of the ST_NAMED_VALUES Object. """ - return ( - { - parent_label: parent_pcol, - child_label: objects_with_ancestors, - } - | parent_label + ' ' + child_label + ' CoGroupByKey' - >> beam.CoGroupByKey() - | parent_label - + ' ' - + child_label - + ' SimulationStateNamedValuesToObjects' - >> beam.ParDo( - SimulationStateNamedValuesToObjects(parent_label, child_label) - ) - ) + return ( + { + parent_label: parent_pcol, + child_label: objects_with_ancestors, + } + | parent_label + ' ' + child_label + ' CoGroupByKey' >> + beam.CoGroupByKey() + | parent_label + ' ' + child_label + + ' SimulationStateNamedValuesToObjects' >> beam.ParDo( + SimulationStateNamedValuesToObjects(parent_label, child_label))) def create_simulation_and_parameter_objects( @@ -669,7 +614,7 @@ def create_simulation_and_parameter_objects( named_value: beam.pvalue.PCollection[KeyedObjMap], log_file_path_prefix: Optional[str], ) -> beam.pvalue.PCollection[AnyObjMap]: - """Combines simulations and their parameter values. + """Combines simulations and their parameter values. Args: log: All log objects. @@ -685,33 +630,32 @@ def create_simulation_and_parameter_objects( AnyObjMaps that contain simulation objects, their contained simulation parameter objects, and the named values of those parameters. """ - simulations_and_parameters = combine_parent_and_child( - 'simulation', - simulation, - 'simulation_parameters', - simulation_parameters, - index_by_parent=False, - ) - - simulation_parameters_and_named_values_key_named_value = ( - combine_parent_and_child( - 'simulations_and_parameters', - simulations_and_parameters, - 'named_value', - named_value, - index_by_parent=False, - ) - ) - - return remove_key( - 'simulation_and_parameter_objects', - named_values_to_objects( - 'simulation_parameters_and_named_values_key_named_value_objects', - simulation_parameters_and_named_values_key_named_value, - 'objects', - objects_with_ancestors, - ), - ) + simulations_and_parameters = combine_parent_and_child( + 'simulation', + simulation, + 'simulation_parameters', + simulation_parameters, + index_by_parent=False, + ) + + simulation_parameters_and_named_values_key_named_value = ( + combine_parent_and_child( + 'simulations_and_parameters', + simulations_and_parameters, + 'named_value', + named_value, + index_by_parent=False, + )) + + return remove_key( + 'simulation_and_parameter_objects', + named_values_to_objects( + 'simulation_parameters_and_named_values_key_named_value_objects', + simulation_parameters_and_named_values_key_named_value, + 'objects', + objects_with_ancestors, + ), + ) def create_simulation_states_params_and_named_value_objects( @@ -720,10 +664,9 @@ def create_simulation_states_params_and_named_value_objects( simulation_parameters: beam.pvalue.PCollection[KeyedObjMap], named_value: beam.pvalue.PCollection[KeyedObjMap], log_file_path_prefix: Optional[str], -) -> Tuple[ - beam.pvalue.PCollection[AnyObjMap], beam.pvalue.PCollection[AnyObjMap] -]: - """Combines simulation states and the named values within them. +) -> Tuple[beam.pvalue.PCollection[AnyObjMap], + beam.pvalue.PCollection[AnyObjMap]]: + """Combines simulation states and the named values within them. Args: objects_with_ancestors: Objects, keyed by the start locations of any blocks @@ -738,37 +681,37 @@ def create_simulation_states_params_and_named_value_objects( AnyObjMaps that contain simulation state objects and their associated named values. """ - named_value_objects = named_values_to_objects( - 'named_value', - change_key_to_self('named_value_to_key_self', 'named_value', named_value), - 'objects', - objects_with_ancestors, - ) - named_value_objects_to_key_parent = change_key_to_parent( - 'named_value_objects_to_key_parent', 'named_value', named_value_objects - ) - - sim_state_named_values_key_state = combine_parent_and_child( - 'simulation_state', - change_key_to_self( - 'simulation_state_to_key_self', 'simulation_state', simulation_state - ), - 'named_value', - named_value_objects_to_key_parent, - index_by_parent=True, - ) - sim_params_named_values_key_params = combine_parent_and_child( - 'simulation_parameters', - change_key_to_self( - 'simulation_parameters_to_key_self', - 'simulation_parameters', - simulation_parameters, - ), - 'named_value', - named_value_objects_to_key_parent, - index_by_parent=True, - ) - return sim_state_named_values_key_state, sim_params_named_values_key_params + named_value_objects = named_values_to_objects( + 'named_value', + change_key_to_self('named_value_to_key_self', 'named_value', + named_value), + 'objects', + objects_with_ancestors, + ) + named_value_objects_to_key_parent = change_key_to_parent( + 'named_value_objects_to_key_parent', 'named_value', + named_value_objects) + + sim_state_named_values_key_state = combine_parent_and_child( + 'simulation_state', + change_key_to_self('simulation_state_to_key_self', 'simulation_state', + simulation_state), + 'named_value', + named_value_objects_to_key_parent, + index_by_parent=True, + ) + sim_params_named_values_key_params = combine_parent_and_child( + 'simulation_parameters', + change_key_to_self( + 'simulation_parameters_to_key_self', + 'simulation_parameters', + simulation_parameters, + ), + 'named_value', + named_value_objects_to_key_parent, + index_by_parent=True, + ) + return sim_state_named_values_key_state, sim_params_named_values_key_params def create_simulation_time_step_state_objects( @@ -779,7 +722,7 @@ def create_simulation_time_step_state_objects( named_value: beam.pvalue.PCollection[KeyedObjMap], log_file_path_prefix: Optional[str], ) -> beam.pvalue.PCollection[AnyObjMap]: - """Combines simulations and their time step values. + """Combines simulations and their time step values. Args: objects_with_ancestors: Objects, keyed by the start locations of any blocks @@ -796,59 +739,59 @@ def create_simulation_time_step_state_objects( time step objects, the simulation state objects within those and their associated named values. """ - named_value_objects = named_values_to_objects( - 'named_value', - change_key_to_self('named_value_to_key_self', 'named_value', named_value), - 'objects', - objects_with_ancestors, - ) - - # Connect simulation states to the named values logged within them. - sim_state_named_values_key_state = combine_parent_and_child( - 'simulation_state', - change_key_to_self( - 'simulation_state_to_key_self', 'simulation_state', simulation_state - ), - 'named_value', - change_key_to_parent( - 'named_value_objects_to_key_parent', - 'named_value', - named_value_objects, - ), - index_by_parent=True, - ) - - # Connect simulation time steps to their logged states and their named values. - sim_ts_state_named_values_key_state = combine_parent_and_child( - 'simulation_time_step', - change_key_to_self( - 'simulation_time_step_to_key_self', - 'simulation_time_step', - simulation_time_step, - ), - 'sim_state_named_values_key_state', - change_key_to_parent( - 'sim_state_named_values_key_state_to_key_parent', - 'simulation_state', - sim_state_named_values_key_state, - ), - index_by_parent=True, - ) - - # Connect simulations to their timesteps and logged states. - sim_simul_ts_state_named_values_key_state = combine_parent_and_child( - 'simulation', - change_key_to_self('simulation_to_key_self', 'simulation', simulation), - 'simulation_time_step', - change_key_to_parent( - 'sim_ts_state_named_values_key_state_to_key_parent', - 'simulation_time_step', - sim_ts_state_named_values_key_state, - ), - index_by_parent=True, - ) - - return remove_key( - 'sim_simul_ts_state_named_values_key_state', - sim_simul_ts_state_named_values_key_state, - ) + named_value_objects = named_values_to_objects( + 'named_value', + change_key_to_self('named_value_to_key_self', 'named_value', + named_value), + 'objects', + objects_with_ancestors, + ) + + # Connect simulation states to the named values logged within them. + sim_state_named_values_key_state = combine_parent_and_child( + 'simulation_state', + change_key_to_self('simulation_state_to_key_self', 'simulation_state', + simulation_state), + 'named_value', + change_key_to_parent( + 'named_value_objects_to_key_parent', + 'named_value', + named_value_objects, + ), + index_by_parent=True, + ) + + # Connect simulation time steps to their logged states and their named values. + sim_ts_state_named_values_key_state = combine_parent_and_child( + 'simulation_time_step', + change_key_to_self( + 'simulation_time_step_to_key_self', + 'simulation_time_step', + simulation_time_step, + ), + 'sim_state_named_values_key_state', + change_key_to_parent( + 'sim_state_named_values_key_state_to_key_parent', + 'simulation_state', + sim_state_named_values_key_state, + ), + index_by_parent=True, + ) + + # Connect simulations to their timesteps and logged states. + sim_simul_ts_state_named_values_key_state = combine_parent_and_child( + 'simulation', + change_key_to_self('simulation_to_key_self', 'simulation', simulation), + 'simulation_time_step', + change_key_to_parent( + 'sim_ts_state_named_values_key_state_to_key_parent', + 'simulation_time_step', + sim_ts_state_named_values_key_state, + ), + index_by_parent=True, + ) + + return remove_key( + 'sim_simul_ts_state_named_values_key_state', + sim_simul_ts_state_named_values_key_state, + ) diff --git a/py/sight/widgets/simulation/bulk_inference.py b/py/sight/widgets/simulation/bulk_inference.py index 687cab3..b4258cd 100644 --- a/py/sight/widgets/simulation/bulk_inference.py +++ b/py/sight/widgets/simulation/bulk_inference.py @@ -2,7 +2,7 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging from datetime import datetime import json import os @@ -60,17 +60,16 @@ ) - def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') - with open(_INPUT_META_PATH.value) as f: - meta = json.load(f) - max_input_len = meta['max_input_len'] - max_pred_len = meta['max_pred_len'] + with open(_INPUT_META_PATH.value) as f: + meta = json.load(f) + max_input_len = meta['max_input_len'] + max_pred_len = meta['max_pred_len'] - cmd = [ + cmd = [ '/google/bin/releases/tunelab/public/bulk_inference_jax_on_beam', f'--input_spec=arrayrecord:/cns/oj-d/home/bronevet/kokua/experiments/bronevet/dataset/simulation_transformer_{_LOG_ID.value}/simulation_transformer_{_LOG_ID.value}/validation/simulation_transformer_{_LOG_ID.value}.array_record-00000-of-00001', f'--output_spec=arrayrecord:/cns/oj-d/home/bronevet/kokua/experiments/bronevet/dataset/simulation_transformer_{_LOG_ID.value}/simulation_transformer_{_LOG_ID.value}/validation/predictions/model_output.recordio@*', @@ -90,18 +89,19 @@ def main(argv: Sequence[str]) -> None: f'--platform={_PLATFORM_NAME.value}', f'--topology={_PLATFORM_MESH.value}', f'--ici_mesh_shape="{_MESH.value}"', - ] - if _CHECKPOINT_PATH.value: - cmd.append(f'--model_checkpoint={_CHECKPOINT_PATH.value}') - elif _TRAINER_XID.value: - cmd.append(f'--trainer_xid={_TRAINER_XID.value}') - print(' '.join(cmd)) - out = subprocess.run(cmd, - capture_output=True, - check=True, - ) - print(out) + ] + if _CHECKPOINT_PATH.value: + cmd.append(f'--model_checkpoint={_CHECKPOINT_PATH.value}') + elif _TRAINER_XID.value: + cmd.append(f'--trainer_xid={_TRAINER_XID.value}') + print(' '.join(cmd)) + out = subprocess.run( + cmd, + capture_output=True, + check=True, + ) + print(out) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/simulation/fine_tune_gemini.py b/py/sight/widgets/simulation/fine_tune_gemini.py index 0499129..b55a2d3 100644 --- a/py/sight/widgets/simulation/fine_tune_gemini.py +++ b/py/sight/widgets/simulation/fine_tune_gemini.py @@ -2,7 +2,7 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging from datetime import datetime import json import os @@ -92,20 +92,20 @@ def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') - date = datetime.today().strftime('%Y-%m-%d.%H:%M:%S') - dataset_id = f'simulation_transformer_{_LOG_ID.value}' - output_path=f'{_BASE_OUTPUT_PATH.value}/experiments/{os.environ["USER"]}/{dataset_id}/{_VARIANT.value}/{date}' + date = datetime.today().strftime('%Y-%m-%d.%H:%M:%S') + dataset_id = f'simulation_transformer_{_LOG_ID.value}' + output_path = f'{_BASE_OUTPUT_PATH.value}/experiments/{os.environ["USER"]}/{dataset_id}/{_VARIANT.value}/{date}' - with open(_INPUT_META_PATH.value) as f: - meta = json.load(f) - max_input_len = meta['max_input_len'] - max_pred_len = meta['max_pred_len'] + with open(_INPUT_META_PATH.value) as f: + meta = json.load(f) + max_input_len = meta['max_input_len'] + max_pred_len = meta['max_pred_len'] - with open('/tmp/mixtures.textproto', 'w') as f: - f.write(f""" + with open('/tmp/mixtures.textproto', 'w') as f: + f.write(f""" # proto-file: google3/learning/language/tunelab/tunekit/api/common/proto/task.proto # proto-message: Task @@ -125,8 +125,8 @@ def main(argv: Sequence[str]) -> None: label_key: "pred" }} """) - cmd = [ - '/google/bin/releases/tunelab/public/finetune', + cmd = [ + '/google/bin/releases/tunelab/public/finetune', f'--family={_FAMILY.value}', f'--variant={_VARIANT.value}', f'--task_proto_data_path=/tmp/mixtures.textproto', @@ -147,16 +147,17 @@ def main(argv: Sequence[str]) -> None: f'--cell={_CELL.value}', f'--platform={_PLATFORM_NAME.value}_{_PLATFORM_MESH.value}', f'--mesh={_MESH.value}', - ] - if _CHECKPOINT_PATH.value: - cmd.append(f'--checkpoint_path={_CHECKPOINT_PATH.value}') - print(' '.join(cmd)) - out = subprocess.run(cmd, - capture_output=True, - # check=True, - ) - print(out) + ] + if _CHECKPOINT_PATH.value: + cmd.append(f'--checkpoint_path={_CHECKPOINT_PATH.value}') + print(' '.join(cmd)) + out = subprocess.run( + cmd, + capture_output=True, + # check=True, + ) + print(out) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/simulation/generate_log_trans_dataset.py b/py/sight/widgets/simulation/generate_log_trans_dataset.py index f7779dd..5eaf9cd 100644 --- a/py/sight/widgets/simulation/generate_log_trans_dataset.py +++ b/py/sight/widgets/simulation/generate_log_trans_dataset.py @@ -2,7 +2,7 @@ from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import csv from dataclasses import dataclass import json @@ -54,167 +54,174 @@ @dataclass class Dataset: - train_df: pd.DataFrame - train_data_df: pd.DataFrame - validate_df: pd.DataFrame - validate_data_df: pd.DataFrame - max_input_len: int - max_pred_len: int + train_df: pd.DataFrame + train_data_df: pd.DataFrame + validate_df: pd.DataFrame + validate_data_df: pd.DataFrame + max_input_len: int + max_pred_len: int + def generate_prediction(row: np.ndarray, columns: Sequence[str]) -> str: - """Returns the representation of row to use as the string the transformer will predict. - + """Returns the representation of row to use as the string the transformer will predict. + Arguments: row: The data row, containing data for all the columns. - columns: The names of the columns, one for each row element. Their names include the + columns: The names of the columns, one for each row element. Their names include the prefix 'autoreg:', 'boundary:' or 'initial:' to indicate their role in the simulation. """ - data = [] - for i, c in enumerate(columns): - if c.startswith('autoreg:'): - data.append(str(row[i])) - return ' '.join(data) - -def generate_input(rows: Sequence[np.ndarray], next_row: np.ndarray, columns: Sequence[str]) -> str: - """Returns the representation of rows to use as the string the transformer will take as input. - + data = [] + for i, c in enumerate(columns): + if c.startswith('autoreg:'): + data.append(str(row[i])) + return ' '.join(data) + + +def generate_input(rows: Sequence[np.ndarray], next_row: np.ndarray, + columns: Sequence[str]) -> str: + """Returns the representation of rows to use as the string the transformer will take as input. + Arguments: rows: The data rows, containing data for all columns. - columns: The names of the columns, one for each row element. Their names include the + columns: The names of the columns, one for each row element. Their names include the prefix 'autoreg:', 'boundary:' or 'initial:' to indicate their role in the simulation. """ - # print('rows: ', type(rows)) - # print('columns: ', type(columns)) - out = '' - for row_idx, row in enumerate(rows): - if row_idx==0: - out += 'initial:' - # print('row=', row) - # print('columns=', columns) - for i, c in enumerate(columns): - # print(i,': ', i, ' row[i]=', row[i], ' c=', c) - if c.startswith('initial:'): - out +=' ' + str(row[i]) - out += ', ' - else: - out += '| ' - out += 'boundary:' + # print('rows: ', type(rows)) + # print('columns: ', type(columns)) + out = '' + for row_idx, row in enumerate(rows): + if row_idx == 0: + out += 'initial:' + # print('row=', row) + # print('columns=', columns) + for i, c in enumerate(columns): + # print(i,': ', i, ' row[i]=', row[i], ' c=', c) + if c.startswith('initial:'): + out += ' ' + str(row[i]) + out += ', ' + else: + out += '| ' + out += 'boundary:' + for i, c in enumerate(columns): + if c.startswith('boundary:'): + out += ' ' + str(row[i]) + out += ', autoreg:' + for i, c in enumerate(columns): + if c.startswith('autoreg:'): + out += ' ' + str(row[i]) + out += '| boundary:' for i, c in enumerate(columns): - if c.startswith('boundary:'): - out +=' ' + str(row[i]) - out += ', autoreg:' - for i, c in enumerate(columns): - if c.startswith('autoreg:'): - out +=' ' + str(row[i]) - out += '| boundary:' - for i, c in enumerate(columns): - if c.startswith('boundary:'): - out +=' ' + str(next_row[i]) - - return out - - -def build_dataset(sim_log: pd.DataFrame, hist_len: int, train_frac:float) -> Dataset: - """Loads the simulation log dataset and splits it into a training and a validation set. - + if c.startswith('boundary:'): + out += ' ' + str(next_row[i]) + + return out + + +def build_dataset(sim_log: pd.DataFrame, hist_len: int, + train_frac: float) -> Dataset: + """Loads the simulation log dataset and splits it into a training and a validation set. + Arguments: sim_log: The full log that contains the time series of all simiulation runs. - hist_len: the number of time steps to use as input for each model + hist_len: the number of time steps to use as input for each model prediction. train_frac: the fraction of the dataset to use for training. - + Returns: The training and validation datasets, each of which has columns input and target. """ - simulations = sim_log.groupby(['sim_location']) - - train_inputs = [] - train_preds = [] - train_data = [] - validate_inputs = [] - validate_preds = [] - validate_data = [] - max_input_len = 0 - max_pred_len = 0 - - for _, sim_log in simulations: - if rn.random() < train_frac: - inputs = train_inputs - preds = train_preds - data = train_data - else: - inputs = validate_inputs - preds = validate_preds - data = validate_data - - hist = [] - data_columns = list(sim_log.columns[3:]) - for idx in range(sim_log.shape[0]): - cur_row = sim_log.iloc[idx].values.astype(str) - data.append(cur_row) - # logging.info('inputs(#%d)=%s', len(cur_row), cur_row) - if len(hist) == hist_len: - # next_input = ' '.join(hist) - input = generate_input(hist, cur_row[3:], data_columns) - prediction = generate_prediction(cur_row[3:], data_columns) - - max_input_len = max(max_input_len, len(input)) - inputs.append(input) - - max_pred_len = max(max_pred_len, len(prediction)) - preds.append(prediction) - - hist.pop(0) - hist.append(cur_row[3:]) - # logging.info('inputs(#%d)=%s', len(inputs), inputs) - # logging.info('preds(#%d)=%s', len(preds), preds) - - - train_df = pd.DataFrame( - { - 'input': train_inputs, - 'pred': train_preds, - } - ) - - validate_df = pd.DataFrame( - { - 'input': validate_inputs, - 'pred': validate_preds, - } - ) - - return Dataset( - train_df, - pd.DataFrame(train_data), - validate_df, - pd.DataFrame(validate_data), - max_input_len, - max_pred_len, - ) + simulations = sim_log.groupby(['sim_location']) + + train_inputs = [] + train_preds = [] + train_data = [] + validate_inputs = [] + validate_preds = [] + validate_data = [] + max_input_len = 0 + max_pred_len = 0 + + for _, sim_log in simulations: + if rn.random() < train_frac: + inputs = train_inputs + preds = train_preds + data = train_data + else: + inputs = validate_inputs + preds = validate_preds + data = validate_data + + hist = [] + data_columns = list(sim_log.columns[3:]) + for idx in range(sim_log.shape[0]): + cur_row = sim_log.iloc[idx].values.astype(str) + data.append(cur_row) + # logging.info('inputs(#%d)=%s', len(cur_row), cur_row) + if len(hist) == hist_len: + # next_input = ' '.join(hist) + input = generate_input(hist, cur_row[3:], data_columns) + prediction = generate_prediction(cur_row[3:], data_columns) + + max_input_len = max(max_input_len, len(input)) + inputs.append(input) + + max_pred_len = max(max_pred_len, len(prediction)) + preds.append(prediction) + + hist.pop(0) + hist.append(cur_row[3:]) + # logging.info('inputs(#%d)=%s', len(inputs), inputs) + # logging.info('preds(#%d)=%s', len(preds), preds) + + train_df = pd.DataFrame({ + 'input': train_inputs, + 'pred': train_preds, + }) + + validate_df = pd.DataFrame({ + 'input': validate_inputs, + 'pred': validate_preds, + }) + + return Dataset( + train_df, + pd.DataFrame(train_data), + validate_df, + pd.DataFrame(validate_data), + max_input_len, + max_pred_len, + ) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - dataset = build_dataset( - sim_log = pd.read_csv(_INPUT_PATH.value), - hist_len = _HIST_LEN.value, - train_frac = _TRAIN_FRAC.value, - - ) - dataset.train_df.to_csv(_OUTPUT_PATH_TRAIN.value, index=False, quoting=csv.QUOTE_ALL) - dataset.train_data_df.to_csv(_OUTPUT_PATH_TRAIN_DATA.value, index=False, quoting=csv.QUOTE_ALL) - dataset.validate_df.to_csv(_OUTPUT_PATH_VAL.value, index=False, quoting=csv.QUOTE_ALL) - dataset.train_data_df.to_csv(_OUTPUT_PATH_VAL_DATA.value, index=False, quoting=csv.QUOTE_ALL) - - with open(_OUTPUT_META_PATH.value, 'w') as f: - json.dump({ - 'max_input_len': dataset.max_input_len, - 'max_pred_len': dataset.max_pred_len, - }, f) + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + dataset = build_dataset( + sim_log=pd.read_csv(_INPUT_PATH.value), + hist_len=_HIST_LEN.value, + train_frac=_TRAIN_FRAC.value, + ) + dataset.train_df.to_csv(_OUTPUT_PATH_TRAIN.value, + index=False, + quoting=csv.QUOTE_ALL) + dataset.train_data_df.to_csv(_OUTPUT_PATH_TRAIN_DATA.value, + index=False, + quoting=csv.QUOTE_ALL) + dataset.validate_df.to_csv(_OUTPUT_PATH_VAL.value, + index=False, + quoting=csv.QUOTE_ALL) + dataset.train_data_df.to_csv(_OUTPUT_PATH_VAL_DATA.value, + index=False, + quoting=csv.QUOTE_ALL) + + with open(_OUTPUT_META_PATH.value, 'w') as f: + json.dump( + { + 'max_input_len': dataset.max_input_len, + 'max_pred_len': dataset.max_pred_len, + }, f) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/simulation/run_trace.py b/py/sight/widgets/simulation/run_trace.py index 35107c8..8e389b2 100644 --- a/py/sight/widgets/simulation/run_trace.py +++ b/py/sight/widgets/simulation/run_trace.py @@ -11,14 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Extract and log the sequences for each value in Sight logs.""" from typing import Any, Dict, Iterable, Iterator, List, Sequence, Tuple from absl import app from absl import flags -from absl import logging +from helpers.logs.logs_handler import logger as logging import apache_beam as beam import numpy as np @@ -37,24 +36,19 @@ _IN_CLUSTERS_FILE = gpath_flag.DEFINE_path( 'in_clusters_file', None, - ( - 'Input file that contains the Sight log that documents how the ' - 'simulation runs are clustered.' - ), + ('Input file that contains the Sight log that documents how the ' + 'simulation runs are clustered.'), required=False, ) _NUM_CLUSTERS = flags.DEFINE_integer( - 'num_clusters', 0, 'The number of clusters to divide the runs into' -) + 'num_clusters', 0, 'The number of clusters to divide the runs into') _IN_LOG_FILE = flags.DEFINE_list( 'in_log_file', None, - ( - 'Input file(s) that contain the Sight log that documents the simulation' - ' run.' - ), + ('Input file(s) that contain the Sight log that documents the simulation' + ' run.'), required=True, ) @@ -69,18 +63,18 @@ class LogVarSequence(beam.DoFn): - """Converts sets of named value objects to time-ordered sequences.""" + """Converts sets of named value objects to time-ordered sequences.""" - def __init__( - self, - file_name_prefix: str, - ): - self.file_name_prefix = file_name_prefix + def __init__( + self, + file_name_prefix: str, + ): + self.file_name_prefix = file_name_prefix - def process( - self, task: Tuple[Any, Iterable[Any]] - ) -> Iterator[Tuple[str, Tuple[str, List[numpy_sight.LabeledNpArray]]]]: - """Time-orders the sequence of objects for a given simulation attribute. + def process( + self, task: Tuple[Any, Iterable[Any]] + ) -> Iterator[Tuple[str, Tuple[str, List[numpy_sight.LabeledNpArray]]]]: + """Time-orders the sequence of objects for a given simulation attribute. Args: task: A sequence of objects that describe the state of some simulation @@ -89,132 +83,128 @@ def process( Yields: A time-ordered version of the input sequence. """ - variables = list(task[1]) - logging.info('LogVarSequence task=%s', task) - logging.info('LogVarSequence variables=%s', list(task[1])) - - # Group variables by their cluster - cluster_to_variables = {} - for v in variables: - cluster_id = data_structures.from_ordered_log([v['cluster_id']]) - if cluster_id not in cluster_to_variables: - cluster_to_variables[cluster_id] = [] - cluster_to_variables[cluster_id].append(v) - - logging.info('LogVarSequence cluster_to_variables=%s', cluster_to_variables) - cluster_vars = [] - for cluster_id in sorted(cluster_to_variables): - cluster_variables = cluster_to_variables[cluster_id] - all_parameter_values = {} - for v in cluster_variables: - for p in v['parameters']: - if p[0] not in all_parameter_values: - all_parameter_values[p[0]] = set() - all_parameter_values[p[0]].add(p[1]) - logging.info('all_parameter_values=%s', all_parameter_values) - - varied_parameters = set() - for param_name, values in all_parameter_values.items(): - if len(values) > 1: - varied_parameters.add(param_name) - logging.info('varied_parameters=%s', varied_parameters) - ordered_varied_parameters = sorted(varied_parameters) - - all_time_steps = {} - for v in cluster_variables: - logging.info('v["time_steps"]=%s', v['time_steps']) - for ts in v['time_steps']: - if ts not in all_time_steps: - all_time_steps[ts] = 0 - all_time_steps[ts] += 1 - logging.info('all_time_steps=%s', all_time_steps) - - for v in cluster_variables: - logging.info( - '%s: v["values"](%s)=%s', - v['parameters'], - v['values'].shape, - v['values'], - ) - - all_values = np.array([v['values'] for v in cluster_variables]) - logging.info('all_values(%s)=%s', all_values.shape, all_values) - - var_params = [] - for v in cluster_variables: - var_params.append( - ', '.join( - [ + variables = list(task[1]) + logging.info('LogVarSequence task=%s', task) + logging.info('LogVarSequence variables=%s', list(task[1])) + + # Group variables by their cluster + cluster_to_variables = {} + for v in variables: + cluster_id = data_structures.from_ordered_log([v['cluster_id']]) + if cluster_id not in cluster_to_variables: + cluster_to_variables[cluster_id] = [] + cluster_to_variables[cluster_id].append(v) + + logging.info('LogVarSequence cluster_to_variables=%s', + cluster_to_variables) + cluster_vars = [] + for cluster_id in sorted(cluster_to_variables): + cluster_variables = cluster_to_variables[cluster_id] + all_parameter_values = {} + for v in cluster_variables: + for p in v['parameters']: + if p[0] not in all_parameter_values: + all_parameter_values[p[0]] = set() + all_parameter_values[p[0]].add(p[1]) + logging.info('all_parameter_values=%s', all_parameter_values) + + varied_parameters = set() + for param_name, values in all_parameter_values.items(): + if len(values) > 1: + varied_parameters.add(param_name) + logging.info('varied_parameters=%s', varied_parameters) + ordered_varied_parameters = sorted(varied_parameters) + + all_time_steps = {} + for v in cluster_variables: + logging.info('v["time_steps"]=%s', v['time_steps']) + for ts in v['time_steps']: + if ts not in all_time_steps: + all_time_steps[ts] = 0 + all_time_steps[ts] += 1 + logging.info('all_time_steps=%s', all_time_steps) + + for v in cluster_variables: + logging.info( + '%s: v["values"](%s)=%s', + v['parameters'], + v['values'].shape, + v['values'], + ) + + all_values = np.array([v['values'] for v in cluster_variables]) + logging.info('all_values(%s)=%s', all_values.shape, all_values) + + var_params = [] + for v in cluster_variables: + var_params.append(', '.join([ f'{p}={dict(v["parameters"])[p]}' for p in ordered_varied_parameters - ] - ) - ) + ])) - cluster_vars.append( - numpy_sight.LabeledNpArray( - all_values, - ['params', 'time_steps'], - [var_params, [str(i) for i in all_time_steps.keys()]], - ) - ) + cluster_vars.append( + numpy_sight.LabeledNpArray( + all_values, + ['params', 'time_steps'], + [var_params, [str(i) for i in all_time_steps.keys()]], + )) - yield ('', (task[0], cluster_vars)) + yield ('', (task[0], cluster_vars)) class AggregateLogs(beam.DoFn): - """Collects the logs for multiple variables into a single log.""" + """Collects the logs for multiple variables into a single log.""" - def process( - self, - task: Tuple[str, Iterable[Tuple[str, List[numpy_sight.LabeledNpArray]]]], - ) -> None: - """Time-orders the sequence of objects for a given simulation attribute. + def process( + self, + task: Tuple[str, Iterable[Tuple[str, + List[numpy_sight.LabeledNpArray]]]], + ) -> None: + """Time-orders the sequence of objects for a given simulation attribute. Args: task: A sequence of objects that describe the state of some simulation attribute over time. """ - with Sight( - sight_pb2.Params( - label='Simulation', - log_owner='bronevet@google.com', - capacitor_output=True, - log_dir_path='/tmp/', - ) - ) as sight: - for var in task[1]: - logging.info('AggregateLogs: var=%s', var) - with Attribute('variable', var[0], sight): - var_clusters = var[1] - for i in range(len(var_clusters)): - with Attribute('cluster', str(i), sight): - data_structures.log_var(var[0], var_clusters[i], sight) - - -def read_capacitor_file( - filename: str, fields: Sequence[str] = ('*',), timeout: float = 60.0 -) -> Iterator[Any]: - """Yields all records from a capacitor file. + with Sight( + sight_pb2.Params( + label='Simulation', + log_owner='bronevet@google.com', + capacitor_output=True, + log_dir_path='/tmp/', + )) as sight: + for var in task[1]: + logging.info('AggregateLogs: var=%s', var) + with Attribute('variable', var[0], sight): + var_clusters = var[1] + for i in range(len(var_clusters)): + with Attribute('cluster', str(i), sight): + data_structures.log_var(var[0], var_clusters[i], + sight) + + +def read_capacitor_file(filename: str, + fields: Sequence[str] = ('*', ), + timeout: float = 60.0) -> Iterator[Any]: + """Yields all records from a capacitor file. Args: filename: May be single file, or pattern. fields: Subset of fields to read. Default is to read all fields. timeout: I/O timeout. """ - filenames = gfile.Glob(filename) - if not filenames: - raise ValueError(f'No such file: {filename}') - for filename in filenames: - reader = pywrap_record_reader.RecordReader.CreateFromPath( - filename, fields, timeout - ) - for r in reader.IterRecords(): - yield r + filenames = gfile.Glob(filename) + if not filenames: + raise ValueError(f'No such file: {filename}') + for filename in filenames: + reader = pywrap_record_reader.RecordReader.CreateFromPath( + filename, fields, timeout) + for r in reader.IterRecords(): + yield r def sight_encode_value(val: int) -> sight_pb2.Object: - """Encodes a value as a Sight object. + """Encodes a value as a Sight object. This is done to ensure type consistency among the many data structures being used to describe simulation behavior. @@ -225,15 +215,16 @@ def sight_encode_value(val: int) -> sight_pb2.Object: Returns: The single Sight log object that encodes val. """ - with Sight(sight_pb2.Params(capacitor_output=True, in_memory=True)) as sight: - data_structures.log(val, sight) - return sight.get_in_memory_log().obj[0] + with Sight(sight_pb2.Params(capacitor_output=True, + in_memory=True)) as sight: + data_structures.log(val, sight) + return sight.get_in_memory_log().obj[0] def load_log_uid_clusters( root: beam.Pipeline, simulation_log_uid: beam.pvalue.PCollection ) -> beam.pvalue.PCollection[Tuple[str, Dict[str, sight_pb2.Object]]]: - """Loads clusters of logs into a PCollection. + """Loads clusters of logs into a PCollection. Args: root: The Beam pipeline. @@ -245,238 +236,194 @@ def load_log_uid_clusters( cluster that log was assigned to. """ - if _IN_CLUSTERS_FILE.value: - cluster_assignment = {} - for clusters_fname in gfile.Glob(_IN_CLUSTERS_FILE.value): - for message in read_capacitor_file( - clusters_fname, - [ - '*', - ], - 60, - ): - cluster_assignment_log = sight_pb2.Log() - cluster_assignment_log.ParseFromString(message.SerializeToString()) - cluster_assignment = data_structures.from_log( - list(cluster_assignment_log.obj) - ) - if cluster_assignment['num_clusters'] == _NUM_CLUSTERS.value: - for key, value in data_structures.from_log( - list(cluster_assignment_log.obj) - ).items(): - cluster_assignment[key] = value - break - if not cluster_assignment: - logging.error( - 'Failed to find a clustering with %d clusters.', _NUM_CLUSTERS.value - ) - return - logging.info('cluster_assignment=%s', cluster_assignment) - - log_to_cluster_id = [] - for log_uid, cluster_id in cluster_assignment['cluster_assignment'].items(): - log_to_cluster_id.append( - (log_uid, {'cluster_id': sight_encode_value(cluster_id)}) - ) - return root | beam.Create(log_to_cluster_id) - else: - return simulation_log_uid | beam.Map( - lambda x: (x[0], {'cluster_id': sight_encode_value(0)}) - ) + if _IN_CLUSTERS_FILE.value: + cluster_assignment = {} + for clusters_fname in gfile.Glob(_IN_CLUSTERS_FILE.value): + for message in read_capacitor_file( + clusters_fname, + [ + '*', + ], + 60, + ): + cluster_assignment_log = sight_pb2.Log() + cluster_assignment_log.ParseFromString( + message.SerializeToString()) + cluster_assignment = data_structures.from_log( + list(cluster_assignment_log.obj)) + if cluster_assignment['num_clusters'] == _NUM_CLUSTERS.value: + for key, value in data_structures.from_log( + list(cluster_assignment_log.obj)).items(): + cluster_assignment[key] = value + break + if not cluster_assignment: + logging.error('Failed to find a clustering with %d clusters.', + _NUM_CLUSTERS.value) + return + logging.info('cluster_assignment=%s', cluster_assignment) + + log_to_cluster_id = [] + for log_uid, cluster_id in cluster_assignment[ + 'cluster_assignment'].items(): + log_to_cluster_id.append((log_uid, { + 'cluster_id': + sight_encode_value(cluster_id) + })) + return root | beam.Create(log_to_cluster_id) + else: + return simulation_log_uid | beam.Map( + lambda x: (x[0], { + 'cluster_id': sight_encode_value(0) + })) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - root = beam.Pipeline( - runner=runner.FlumeRunner() - ) # beam.runners.DirectRunner()) - reads = [] - for file_path in _IN_LOG_FILE.value: - reads.append( - root - | f'Read {file_path}' - >> capacitorio.ReadFromCapacitor( - file_path, ['*'], beam.coders.ProtoCoder(sight_pb2.Object) - ) + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + root = beam.Pipeline( + runner=runner.FlumeRunner()) # beam.runners.DirectRunner()) + reads = [] + for file_path in _IN_LOG_FILE.value: + reads.append( + root + | f'Read {file_path}' >> capacitorio.ReadFromCapacitor( + file_path, ['*'], beam.coders.ProtoCoder(sight_pb2.Object))) + + log: beam.pvalue.PCollection[sight_pb2.Object] = reads | beam.Flatten() + + simulation = analysis_utils.objects(log, + sight_pb2.BlockStart.ST_SIMULATION, + 'simulation') + + simulation_log_uid: beam.pvalue.PCollection[analysis_utils.KeyedObjMap] = ( + analysis_utils.create_log_uid_key('simulation_and_cluster', + 'simulation', simulation)) + + clusters_key_log_uid = load_log_uid_clusters(root, simulation_log_uid) + _ = clusters_key_log_uid | 'clusters_key_log_uid' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.clusters_key_log_uid') + + objects_with_ancestors = log | beam.ParDo( + analysis_utils.ExtractAncestorBlockStartLocations()) + + simulation_and_cluster: beam.pvalue.PCollection[analysis_utils.ObjMap] = ( + { + 'simulation': simulation_log_uid, + 'clusters_key_log_uid': clusters_key_log_uid, + } + | 'simulation_and_cluster CoGroupByKey' >> beam.CoGroupByKey() + | beam.ParDo( + analysis_utils.CombineRecords('simulation', + 'clusters_key_log_uid'))) + simulation_and_cluster_sim_loc_uid: beam.pvalue.PCollection[ + analysis_utils.KeyedObjMap] = analysis_utils.create_loc_log_uid_key( + 'simulation_and_cluster', 'simulation', simulation_and_cluster) + + _ = (simulation_and_cluster + | 'write simulation_and_cluster' >> + beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_and_cluster')) + _ = (simulation_and_cluster_sim_loc_uid + | 'write simulation_and_cluster_sim_loc_uid' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_and_cluster_sim_loc_uid')) + + simulation_parameters = analysis_utils.block_start_objects_key_parent( + log, + sight_pb2.BlockStart.ST_SIMULATION_PARAMETERS, + 'simulation_parameters', ) - - log: beam.pvalue.PCollection[sight_pb2.Object] = reads | beam.Flatten() - - simulation = analysis_utils.objects( - log, sight_pb2.BlockStart.ST_SIMULATION, 'simulation' - ) - - simulation_log_uid: beam.pvalue.PCollection[analysis_utils.KeyedObjMap] = ( - analysis_utils.create_log_uid_key( - 'simulation_and_cluster', 'simulation', simulation - ) - ) - - clusters_key_log_uid = load_log_uid_clusters(root, simulation_log_uid) - _ = clusters_key_log_uid | 'clusters_key_log_uid' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.clusters_key_log_uid' - ) - - objects_with_ancestors = log | beam.ParDo( - analysis_utils.ExtractAncestorBlockStartLocations() - ) - - simulation_and_cluster: beam.pvalue.PCollection[analysis_utils.ObjMap] = ( - { - 'simulation': simulation_log_uid, - 'clusters_key_log_uid': clusters_key_log_uid, - } - | 'simulation_and_cluster CoGroupByKey' >> beam.CoGroupByKey() - | beam.ParDo( - analysis_utils.CombineRecords('simulation', 'clusters_key_log_uid') - ) - ) - simulation_and_cluster_sim_loc_uid: beam.pvalue.PCollection[ - analysis_utils.KeyedObjMap - ] = analysis_utils.create_loc_log_uid_key( - 'simulation_and_cluster', 'simulation', simulation_and_cluster - ) - - _ = ( - simulation_and_cluster - | 'write simulation_and_cluster' - >> beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_and_cluster') - ) - _ = ( - simulation_and_cluster_sim_loc_uid - | 'write simulation_and_cluster_sim_loc_uid' - >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_and_cluster_sim_loc_uid' - ) - ) - - simulation_parameters = analysis_utils.block_start_objects_key_parent( - log, - sight_pb2.BlockStart.ST_SIMULATION_PARAMETERS, - 'simulation_parameters', - ) - _ = ( - simulation_parameters - | 'write simulation_parameters' - >> beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_parameters') - ) - simulation_time_step = analysis_utils.block_start_objects_key_parent( - log, sight_pb2.BlockStart.ST_SIMULATION_TIME_STEP, 'simulation_time_step' - ) - simulation_state = analysis_utils.block_start_objects_key_parent( - log, sight_pb2.BlockStart.ST_SIMULATION_STATE, 'simulation_state' - ) - named_value = analysis_utils.block_start_objects_key_parent( - log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value' - ) - _ = simulation_time_step | 'simulation_time_step' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_time_step' - ) - - simulation_and_parameter_objects = ( - analysis_utils.create_simulation_and_parameter_objects( - log, - objects_with_ancestors, - simulation_and_cluster_sim_loc_uid, - simulation_parameters, - named_value, - str(_OUT_FILE.value), - ) - ) - _ = ( - simulation_and_parameter_objects - | 'simulation_and_parameter_objects' - >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_and_parameter_objects' - ) - ) - - simulation_time_step_state_objects = ( - analysis_utils.create_simulation_time_step_state_objects( - objects_with_ancestors, - simulation_and_cluster_sim_loc_uid, - simulation_time_step, - simulation_state, - named_value, - str(_OUT_FILE.value), - ) - ) - _ = ( - simulation_time_step_state_objects - | 'simulation_time_step_state_objects' - >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_time_step_state_objects' - ) - ) - - simulation_time_step_state_objects_in_time_order = ( - analysis_utils.create_named_value_label_log_uid_key( - 'simulation_time_step_state_objects', - simulation_time_step_state_objects, - ) - | 'simulation_time_step_state_objects GroupByKey' >> beam.GroupByKey() - | beam.ParDo(analysis_utils.NamedObjectsToSequence()) - ) - - _ = ( - simulation_time_step_state_objects_in_time_order - | 'simulation_time_step_state_objects_in_time_order' - >> beam.io.WriteToText( - str(_OUT_FILE.value) - + '.simulation_time_step_state_objects_in_time_order' - ) - ) - - simulation_params_and_vars = ( - { - 'simulation_and_parameter_objects': ( - analysis_utils.create_loc_log_uid_key( - 'simulation_and_parameter_objects', - 'simulation', - simulation_and_parameter_objects, - ) - ), - 'simulation_time_step_state_objects_in_time_order': ( - analysis_utils.create_loc_log_uid_key( - 'simulation_time_step_state_objects_in_time_order', - 'simulation', - simulation_time_step_state_objects_in_time_order, - ) - ), - } - | 'simulation_params_steps_objects_in_time_order CoGroupByKey' - >> beam.CoGroupByKey() - | beam.ParDo( - analysis_utils.CombineParametersAndTimeSeries( - 'simulation_and_parameter_objects', - 'simulation_time_step_state_objects_in_time_order', - ) - ) - ) - - _ = ( - simulation_params_and_vars - | 'simulation_params_and_vars' - >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_params_and_vars' - ) - ) - - _ = ( - analysis_utils.create_var_key( - 'simulation_params_and_vars', simulation_params_and_vars - ) - | 'simulation_params_and_vars GroupByKey - varname' >> beam.GroupByKey() - | beam.ParDo(LogVarSequence(str(_OUT_FILE.value))) - | 'log var sequences gather all' >> beam.GroupByKey() - | beam.ParDo(AggregateLogs()) - ) - - results = root.run() - results.wait_until_finish() + _ = (simulation_parameters + | 'write simulation_parameters' >> + beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_parameters')) + simulation_time_step = analysis_utils.block_start_objects_key_parent( + log, sight_pb2.BlockStart.ST_SIMULATION_TIME_STEP, + 'simulation_time_step') + simulation_state = analysis_utils.block_start_objects_key_parent( + log, sight_pb2.BlockStart.ST_SIMULATION_STATE, 'simulation_state') + named_value = analysis_utils.block_start_objects_key_parent( + log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value') + _ = simulation_time_step | 'simulation_time_step' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_time_step') + + simulation_and_parameter_objects = ( + analysis_utils.create_simulation_and_parameter_objects( + log, + objects_with_ancestors, + simulation_and_cluster_sim_loc_uid, + simulation_parameters, + named_value, + str(_OUT_FILE.value), + )) + _ = (simulation_and_parameter_objects + | 'simulation_and_parameter_objects' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_and_parameter_objects')) + + simulation_time_step_state_objects = ( + analysis_utils.create_simulation_time_step_state_objects( + objects_with_ancestors, + simulation_and_cluster_sim_loc_uid, + simulation_time_step, + simulation_state, + named_value, + str(_OUT_FILE.value), + )) + _ = (simulation_time_step_state_objects + | 'simulation_time_step_state_objects' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_time_step_state_objects')) + + simulation_time_step_state_objects_in_time_order = ( + analysis_utils.create_named_value_label_log_uid_key( + 'simulation_time_step_state_objects', + simulation_time_step_state_objects, + ) + | 'simulation_time_step_state_objects GroupByKey' >> beam.GroupByKey() + | beam.ParDo(analysis_utils.NamedObjectsToSequence())) + + _ = (simulation_time_step_state_objects_in_time_order + | 'simulation_time_step_state_objects_in_time_order' >> + beam.io.WriteToText( + str(_OUT_FILE.value) + + '.simulation_time_step_state_objects_in_time_order')) + + simulation_params_and_vars = ( + { + 'simulation_and_parameter_objects': + (analysis_utils.create_loc_log_uid_key( + 'simulation_and_parameter_objects', + 'simulation', + simulation_and_parameter_objects, + )), + 'simulation_time_step_state_objects_in_time_order': + (analysis_utils.create_loc_log_uid_key( + 'simulation_time_step_state_objects_in_time_order', + 'simulation', + simulation_time_step_state_objects_in_time_order, + )), + } + | 'simulation_params_steps_objects_in_time_order CoGroupByKey' >> + beam.CoGroupByKey() + | beam.ParDo( + analysis_utils.CombineParametersAndTimeSeries( + 'simulation_and_parameter_objects', + 'simulation_time_step_state_objects_in_time_order', + ))) + + _ = (simulation_params_and_vars + | 'simulation_params_and_vars' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_params_and_vars')) + + _ = ( + analysis_utils.create_var_key('simulation_params_and_vars', + simulation_params_and_vars) + | + 'simulation_params_and_vars GroupByKey - varname' >> beam.GroupByKey() + | beam.ParDo(LogVarSequence(str(_OUT_FILE.value))) + | 'log var sequences gather all' >> beam.GroupByKey() + | beam.ParDo(AggregateLogs())) + + results = root.run() + results.wait_until_finish() if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/simulation/simulation.py b/py/sight/widgets/simulation/simulation.py index 3336be1..73ef6de 100644 --- a/py/sight/widgets/simulation/simulation.py +++ b/py/sight/widgets/simulation/simulation.py @@ -11,13 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Simulation runs in the Sight log.""" import inspect from typing import Any, Callable, Dict, Optional, Text, Tuple -from absl import logging - +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 from sight.exception import exception from sight.trace import Trace @@ -26,7 +24,7 @@ class Simulation(object): - """Encapsulates start and stop points where a Simulation is active. + """Encapsulates start and stop points where a Simulation is active. Attributes: sight: Reference to the Sight logger via which this simulation is logged. @@ -35,14 +33,14 @@ class Simulation(object): this particular run should be compared. """ - def __init__( - self, - label: str, - sight: Any, - parameters: Optional[Dict[Text, Any]], - reference_trace_file_path: Optional[str] = None, - ): - """Creates and enters a simulation block with a given label and parameters. + def __init__( + self, + label: str, + sight: Any, + parameters: Optional[Dict[Text, Any]], + reference_trace_file_path: Optional[str] = None, + ): + """Creates and enters a simulation block with a given label and parameters. Args: label: The label that identifies this block. @@ -52,89 +50,85 @@ def __init__( reference_trace_file_path: Path of the file that contains the Sight log of a reference simulation run to compare this run to. """ - self.sight = sight - if sight is None: - logging.info('<<>> %s', self.label) - if self.sight is None: - logging.info('>>> %s', self.label) - return - - # Unregister the associated simulation parameters object with Sight. - self.sight.widget_simulation_state.simulation_parameters = None - - # Unregister this simulation object with Sight. - self.sight.widget_simulation_state.simulation = None - self.sight.widget_simulation_state.state = {} - - # pytype: disable=attribute-error - self.sight.exit_block( - self.label, sight_pb2.Object(), inspect.currentframe().f_back - ) - # pytype: enable=attribute-error - - @classmethod - def run_decision_configuration( - cls, - label: str, - parameters: Optional[Dict[Text, Any]], - driver_fn: Callable[[Any], Any], - state_attrs: Dict[str, Tuple[float, float]], - action_attrs: Dict[str, Tuple[float, float]], - sight: Any, - reference_trace_file_path: Optional[str] = None, - ): - """Runs this simulation, using the Decision API to configure it. + self.sight = sight + if sight is None: + logging.info('<<>> %s', self.label) + if self.sight is None: + logging.info('>>> %s', self.label) + return + + # Unregister the associated simulation parameters object with Sight. + self.sight.widget_simulation_state.simulation_parameters = None + + # Unregister this simulation object with Sight. + self.sight.widget_simulation_state.simulation = None + self.sight.widget_simulation_state.state = {} + + # pytype: disable=attribute-error + self.sight.exit_block(self.label, sight_pb2.Object(), + inspect.currentframe().f_back) + # pytype: enable=attribute-error + + @classmethod + def run_decision_configuration( + cls, + label: str, + parameters: Optional[Dict[Text, Any]], + driver_fn: Callable[[Any], Any], + state_attrs: Dict[str, Tuple[float, float]], + action_attrs: Dict[str, Tuple[float, float]], + sight: Any, + reference_trace_file_path: Optional[str] = None, + ): + """Runs this simulation, using the Decision API to configure it. Args: label: The label that identifies this simulation. @@ -154,13 +148,14 @@ def run_decision_configuration( a reference simulation run to compare this run to. """ - def run(sight): - with Simulation(label, sight, parameters, reference_trace_file_path): - driver_fn(sight) - - decision.run( - driver_fn=run, - state_attrs=state_attrs.copy(), - action_attrs=action_attrs, - sight=sight, - ) + def run(sight): + with Simulation(label, sight, parameters, + reference_trace_file_path): + driver_fn(sight) + + decision.run( + driver_fn=run, + state_attrs=state_attrs.copy(), + action_attrs=action_attrs, + sight=sight, + ) diff --git a/py/sight/widgets/simulation/simulation_parameters.py b/py/sight/widgets/simulation/simulation_parameters.py index 834ef9b..a9d43aa 100644 --- a/py/sight/widgets/simulation/simulation_parameters.py +++ b/py/sight/widgets/simulation/simulation_parameters.py @@ -11,12 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Simulation parameters in the Sight log.""" import inspect from typing import Any, Dict, Text -from absl import logging +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 from sight import data_structures @@ -24,10 +23,10 @@ class SimulationParameters(object): - """Encapsulates log region that documents a simulation's parameters.""" + """Encapsulates log region that documents a simulation's parameters.""" - def __init__(self, parameters: Dict[Text, Any], sight: Any) -> None: - """Creates and enters a block of a simulation's parameters. + def __init__(self, parameters: Dict[Text, Any], sight: Any) -> None: + """Creates and enters a block of a simulation's parameters. Args: parameters: Key-value pairs that identify this block and all of its @@ -37,77 +36,71 @@ def __init__(self, parameters: Dict[Text, Any], sight: Any) -> None: Returns: The starting location of this simulation parameters block. """ - self.parameters = {} - self.sight = sight - if sight is None: - logging.info('<<>>') - return - - # Unregister this simulation parameters object with Sight. - # self.sight.widget_simulation_state.simulation_parameters = None - - # pytype: disable=attribute-error - self.sight.exit_block( - 'SimulationParameters', - sight_pb2.Object( - block_end=sight_pb2.BlockEnd( - sub_type=sight_pb2.BlockEnd.ST_SIMULATION_PARAMETERS + self.parameters = {} + self.sight = sight + if sight is None: + logging.info('<<>>') + return + + # Unregister this simulation parameters object with Sight. + # self.sight.widget_simulation_state.simulation_parameters = None + + # pytype: disable=attribute-error + self.sight.exit_block( + 'SimulationParameters', + sight_pb2.Object(block_end=sight_pb2.BlockEnd( + sub_type=sight_pb2.BlockEnd.ST_SIMULATION_PARAMETERS)), + inspect.currentframe().f_back, + ) + # pytype: enable=attribute-error diff --git a/py/sight/widgets/simulation/simulation_state.py b/py/sight/widgets/simulation/simulation_state.py index 6c31cc1..66afbb5 100644 --- a/py/sight/widgets/simulation/simulation_state.py +++ b/py/sight/widgets/simulation/simulation_state.py @@ -11,14 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Simulation state in the Sight log.""" from enum import Enum import inspect from typing import Any, Dict, Text -from absl import logging - +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 from sight import data_structures from sight.exception import exception @@ -26,15 +24,18 @@ class SimulationState(object): - """Encapsulates log region that documents a simulation's state.""" + """Encapsulates log region that documents a simulation's state.""" - class Type(Enum): - INITIAL = 1 - BOUNDARY = 2 - DYNAMIC = 3 + class Type(Enum): + INITIAL = 1 + BOUNDARY = 2 + DYNAMIC = 3 - def __init__(self, state: Dict[Text, Any], sight: Any, type: Type = Type.DYNAMIC) -> None: - """Creates and enters a block of a simulation's state. + def __init__(self, + state: Dict[Text, Any], + sight: Any, + type: Type = Type.DYNAMIC) -> None: + """Creates and enters a block of a simulation's state. Args: state: Key-value pairs that identify this block and all of its contained @@ -44,140 +45,126 @@ def __init__(self, state: Dict[Text, Any], sight: Any, type: Type = Type.DYNAMIC Returns: The starting location of this simulation state block. """ - self.sight = sight - if sight is None: - logging.info('<<>>') - return - - self.type = type - proto_type = sight_pb2.BlockEnd.ST_SIMULATION_STATE - if type == self.Type.INITIAL: - proto_type = sight_pb2.BlockEnd.ST_SIMULATION_INITIAL_STATE - elif type == self.Type.BOUNDARY: - proto_type = sight_pb2.BlockEnd.ST_SIMULATION_BOUNDARY_STATE - - # pytype: disable=attribute-error - self.sight.exit_block( - 'SimulationState', - sight_pb2.Object( - block_end=sight_pb2.BlockEnd( - sub_type=proto_type - ) - ), - inspect.currentframe().f_back, - ) - # pytype: enable=attribute-error - - # If there is a reference trace, report the difference between - # this trace and the reference trace via the Decision API. - reference_trace = self.sight.widget_simulation_state.reference_trace - if reference_trace: - reference_state = {} - while True: - cur_named_var = reference_trace.advance_to_within_block([ - sight_pb2.Object.ST_BLOCK_START, - sight_pb2.BlockStart.ST_NAMED_VALUE, - ]) - if not cur_named_var: - break - name, value = data_structures.from_ordered_log( - reference_trace.collect_current_block() + self.sight = sight + if sight is None: + logging.info('<<>>') + return + + self.type = type + proto_type = sight_pb2.BlockEnd.ST_SIMULATION_STATE + if type == self.Type.INITIAL: + proto_type = sight_pb2.BlockEnd.ST_SIMULATION_INITIAL_STATE + elif type == self.Type.BOUNDARY: + proto_type = sight_pb2.BlockEnd.ST_SIMULATION_BOUNDARY_STATE + + # pytype: disable=attribute-error + self.sight.exit_block( + 'SimulationState', + sight_pb2.Object(block_end=sight_pb2.BlockEnd( + sub_type=proto_type)), + inspect.currentframe().f_back, ) - reference_state[name] = value - - observed_state_vars = reference_state.keys() - sum_relative_errors = 0 - num_vars = 0 - for name in observed_state_vars: - if ( - max( - abs(self.sight.widget_simulation_state.state[name]), - abs(reference_state[name]), + # pytype: enable=attribute-error + + # If there is a reference trace, report the difference between + # this trace and the reference trace via the Decision API. + reference_trace = self.sight.widget_simulation_state.reference_trace + if reference_trace: + reference_state = {} + while True: + cur_named_var = reference_trace.advance_to_within_block([ + sight_pb2.Object.ST_BLOCK_START, + sight_pb2.BlockStart.ST_NAMED_VALUE, + ]) + if not cur_named_var: + break + name, value = data_structures.from_ordered_log( + reference_trace.collect_current_block()) + reference_state[name] = value + + observed_state_vars = reference_state.keys() + sum_relative_errors = 0 + num_vars = 0 + for name in observed_state_vars: + if (max( + abs(self.sight.widget_simulation_state.state[name]), + abs(reference_state[name]), + ) > 0): + sum_relative_errors += abs( + (self.sight.widget_simulation_state.state[name] - + reference_state[name]) / max( + abs(self.sight.widget_simulation_state.state[name] + ), + abs(reference_state[name]), + )) + num_vars += 1 + + error_relative_to_reference_run = (sum_relative_errors / + num_vars if num_vars > 0 else 0) + decision.decision_outcome('distance', + 0 - error_relative_to_reference_run, + self.sight) + + # Unregister this simulation state object with Sight. + if self.sight.widget_simulation_state.reference_trace: + self.sight.widget_simulation_state.reference_trace.collect_current_block( ) - > 0 - ): - sum_relative_errors += abs( - ( - self.sight.widget_simulation_state.state[name] - - reference_state[name] - ) - / max( - abs(self.sight.widget_simulation_state.state[name]), - abs(reference_state[name]), - ) - ) - num_vars += 1 - - error_relative_to_reference_run = ( - sum_relative_errors / num_vars if num_vars > 0 else 0 - ) - decision.decision_outcome( - 'distance', 0 - error_relative_to_reference_run, self.sight - ) - - # Unregister this simulation state object with Sight. - if self.sight.widget_simulation_state.reference_trace: - self.sight.widget_simulation_state.reference_trace.collect_current_block() - self.sight.widget_simulation_state.simulation_state = None + self.sight.widget_simulation_state.simulation_state = None def state_updated( @@ -185,15 +172,13 @@ def state_updated( obj_to_log: Any, sight: Any, ) -> None: - """Informs the Simulation API that the current state has been updated. + """Informs the Simulation API that the current state has been updated. Args: name: The name of the updated state variable. obj_to_log: The value of the state variable. sight: Instance of a Sight logger. """ - if ( - sight.widget_simulation_state - and sight.widget_simulation_state.simulation_state - ): - sight.widget_simulation_state.state[name] = obj_to_log + if (sight.widget_simulation_state + and sight.widget_simulation_state.simulation_state): + sight.widget_simulation_state.state[name] = obj_to_log diff --git a/py/sight/widgets/simulation/simulation_time_step.py b/py/sight/widgets/simulation/simulation_time_step.py index bacfdd0..13ddf8d 100644 --- a/py/sight/widgets/simulation/simulation_time_step.py +++ b/py/sight/widgets/simulation/simulation_time_step.py @@ -11,29 +11,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Individual simulation time steps in the Sight log.""" import inspect from typing import Any, Sequence -from absl import logging - +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 from sight.exception import exception class SimulationTimeStep(object): - """Encapsulates a single simulation time step un the Sight log.""" - - def __init__( - self, - time_step_index: Sequence[int], - time_step: float, - time_step_size: float, - time_step_units: sight_pb2.SimulationTimeStepStart.TimeStepUnits, - sight: Any, - ): - """Creates and enters a simulation time step block. + """Encapsulates a single simulation time step un the Sight log.""" + + def __init__( + self, + time_step_index: Sequence[int], + time_step: float, + time_step_size: float, + time_step_units: sight_pb2.SimulationTimeStepStart.TimeStepUnits, + sight: Any, + ): + """Creates and enters a simulation time step block. Args: time_step_index: Integral index of the time step within the overall @@ -48,33 +46,33 @@ def __init__( Returns: The starting location of this time step block. """ - self.sight = sight - if sight is None: - logging.info( - '<<>>') + return + + # Unregister this simulation time step object with Sight. + if self.sight.widget_simulation_state.reference_trace: + self.sight.widget_simulation_state.reference_trace.collect_current_block( ) - ), - inspect.currentframe().f_back.f_back, - ) - # pytype: enable=attribute-error - - def __enter__(self): - return self - - def __exit__(self, exc_type, value, traceback): - if not self.sight: - return - - if not self.sight.is_logging_enabled(): - return - - if exc_type is not None: - # pytype: disable=attribute-error - exception( - exc_type, value, traceback, self.sight, inspect.currentframe().f_back - ) - # pytype: enable=attribute-error - - if self.sight is None: - logging.info('SimulationTimeStep>>>') - return - - # Unregister this simulation time step object with Sight. - if self.sight.widget_simulation_state.reference_trace: - self.sight.widget_simulation_state.reference_trace.collect_current_block() - self.sight.widget_simulation_state.simulation_time_step = None - - # pytype: disable=attribute-error - self.sight.exit_block( - 'SimulationTimeStep', - sight_pb2.Object( - block_end=sight_pb2.BlockEnd( - sub_type=sight_pb2.BlockEnd.ST_SIMULATION_TIME_STEP - ) - ), - inspect.currentframe().f_back, - ) - # pytype: enable=attribute-error - self.sight.unset_attribute('SimulationTimeStep') + self.sight.widget_simulation_state.simulation_time_step = None + + # pytype: disable=attribute-error + self.sight.exit_block( + 'SimulationTimeStep', + sight_pb2.Object(block_end=sight_pb2.BlockEnd( + sub_type=sight_pb2.BlockEnd.ST_SIMULATION_TIME_STEP)), + inspect.currentframe().f_back, + ) + # pytype: enable=attribute-error + self.sight.unset_attribute('SimulationTimeStep') diff --git a/py/sight/widgets/simulation/train_surrogate.py b/py/sight/widgets/simulation/train_surrogate.py index 780fb2d..5693a26 100644 --- a/py/sight/widgets/simulation/train_surrogate.py +++ b/py/sight/widgets/simulation/train_surrogate.py @@ -20,8 +20,7 @@ from absl import app from absl import flags -from absl import logging -import apache_beam as beam +from helpers.logs.logs_handler import logger as loggingimport apache_beam as beam import numpy as np from sklearn import metrics from sklearn.ensemble import GradientBoostingRegressor diff --git a/py/sight/widgets/tensorflow_sight/tensorflow_sight.py b/py/sight/widgets/tensorflow_sight/tensorflow_sight.py index 49c0b73..8470ab3 100644 --- a/py/sight/widgets/tensorflow_sight/tensorflow_sight.py +++ b/py/sight/widgets/tensorflow_sight/tensorflow_sight.py @@ -17,8 +17,7 @@ import inspect from typing import Any, Optional -from absl import logging -import tensorflow as tf +from helpers.logs.logs_handler import logger as loggingimport tensorflow as tf from proto import sight_pb2 from py.exception import exception diff --git a/sight_service/acme_optimizer.py b/sight_service/acme_optimizer.py index 1f739e5..9813c25 100644 --- a/sight_service/acme_optimizer.py +++ b/sight_service/acme_optimizer.py @@ -14,7 +14,7 @@ """Acme reinforcement learning for driving Sight applications.""" import concurrent.futures -import logging +from helpers.logs.logs_handler import logger as logging import time import json import pickle @@ -252,8 +252,8 @@ def generate_env_spec(self, state_attrs, action_attrs): action_min = [] action_max = [] for key, attr_props in action_attrs.items(): - action_min.append(attr_props.min_value) - action_max.append(attr_props.max_value) + action_min.append(attr_props.min_value) + action_max.append(attr_props.max_value) if (attr_props.valid_int_values): actions = specs.DiscreteArray(num_values=len( @@ -261,13 +261,13 @@ def generate_env_spec(self, state_attrs, action_attrs): dtype=np.int64, name="action") else: - if(attr_props.step_size): - default_dtype=np.int64 + if (attr_props.step_size): + default_dtype = np.int64 actions = specs.BoundedArray(shape=(len(action_max), ), - dtype=default_dtype, - name='action', - minimum=action_min, - maximum=action_max) + dtype=default_dtype, + name='action', + minimum=action_min, + maximum=action_max) # print(state_min, state_max, len(state_max), state_dtype) # print(action_min, action_max, len(action_max), action_dtype) @@ -293,7 +293,6 @@ def create_learner(self, client_id, acme_config, state_attrs, method_name = "create_learner" logging.info(">>>> In %s of %s", method_name, _file_name) - environment_spec = self.generate_env_spec(state_attrs, action_attrs) if (acme_config.acme_agent == @@ -449,9 +448,7 @@ def convert_np_to_list(obj): # directly serializing the weights structure # serialized_weights = json.dumps( # latest_weights, default=convert_np_to_list).encode('utf-8') - serialized_weights = pickle.dumps( - latest_weights) - + serialized_weights = pickle.dumps(latest_weights) response.weights = serialized_weights diff --git a/sight_service/bayesian_opt.py b/sight_service/bayesian_opt.py index efa26c4..90645bc 100644 --- a/sight_service/bayesian_opt.py +++ b/sight_service/bayesian_opt.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """LLM-based optimization for driving Sight applications.""" -import logging +from helpers.logs.logs_handler import logger as logging from overrides import overrides from typing import Any, Dict, List, Tuple @@ -34,112 +33,119 @@ _file_name = "bayesian_opt.py" + class BayesianOpt(OptimizerInstance): - """Uses an LLM to choose the parameters of the code. + """Uses an LLM to choose the parameters of the code. """ - def __init__(self): - super().__init__() - self._lock = threading.RLock() - self._total_count = 0 - self._completed_count = 0 - - @overrides - def launch( - self, request: service_pb2.LaunchRequest - ) -> service_pb2.LaunchResponse: - response = super(BayesianOpt, self).launch(request) - self._total_count = request.decision_config_params.num_trials - self._optimizer = BayesianOptimization( - f=None, - pbounds={key: (p.min_value, p.max_value) for key, p in self.actions.items()}, - verbose=2, - allow_duplicate_points=True, - # random_state=1, - ) - self._utility = UtilityFunction(kind='ucb', kappa=1.96, xi=0.01) - response.display_string = 'BayesianOpt Start' - return response - - # def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: - # """Returns the dict representation of a DecisionParams proto""" - # d = {} - # for a in dp: - # d[a.key] = a.value.double_value - # return d - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - logging.info('DecisionPoint request=%s', request) - print('DecisionPoint request=%s' % request) - - self._lock.acquire() - selected_actions = self._optimizer.suggest(self._utility) - self._lock.release() - - dp_response = service_pb2.DecisionPointResponse() - for key, value in selected_actions.items(): - a = dp_response.action.add() - a.key = key - a.value.sub_type = sight_pb2.Value.ST_DOUBLE - a.value.double_value = float(value) - - print('DecisionPoint response=%s' % dp_response) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - logging.info('FinalizeEpisode request=%s', request) - d = {} - for a in request.decision_point.choice_params: - d[a.key] = a.value.double_value - - self._lock.acquire() - logging.info('FinalizeEpisode outcome=%s / %s', request.decision_outcome.reward, d) - self._optimizer.register( - params=d, - target=request.decision_outcome.reward) - # self._completed_count += 1 - self._lock.release() - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - output = '[BayesianOpt (#%s trials)\n' % len(self._optimizer.res) - for trial in sorted(self._optimizer.res, key=lambda x: x['target'], reverse=True): - output += ' '+str(trial) + '\n' - output += ']\n' - - if(self._completed_count == self._total_count): - status = service_pb2.CurrentStatusResponse.Status.SUCCESS - elif(self._completed_count < self._total_count): - status = service_pb2.CurrentStatusResponse.Status.IN_PROGRESS - else: - status = service_pb2.CurrentStatusResponse.Status.FAILURE - - return service_pb2.CurrentStatusResponse(response_str=output, status=status) - - @overrides - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) - if(self._completed_count == self._total_count): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - # elif(not self.pending_samples): - # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY - else: - # Increasing count here so that multiple workers can't enter the dp call for same sample at last - self._completed_count += 1 - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) + def __init__(self): + super().__init__() + self._lock = threading.RLock() + self._total_count = 0 + self._completed_count = 0 + + @overrides + def launch( + self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(BayesianOpt, self).launch(request) + self._total_count = request.decision_config_params.num_trials + self._optimizer = BayesianOptimization( + f=None, + pbounds={ + key: (p.min_value, p.max_value) + for key, p in self.actions.items() + }, + verbose=2, + allow_duplicate_points=True, + # random_state=1, + ) + self._utility = UtilityFunction(kind='ucb', kappa=1.96, xi=0.01) + response.display_string = 'BayesianOpt Start' + return response + + # def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: + # """Returns the dict representation of a DecisionParams proto""" + # d = {} + # for a in dp: + # d[a.key] = a.value.double_value + # return d + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + logging.info('DecisionPoint request=%s', request) + print('DecisionPoint request=%s' % request) + + self._lock.acquire() + selected_actions = self._optimizer.suggest(self._utility) + self._lock.release() + + dp_response = service_pb2.DecisionPointResponse() + for key, value in selected_actions.items(): + a = dp_response.action.add() + a.key = key + a.value.sub_type = sight_pb2.Value.ST_DOUBLE + a.value.double_value = float(value) + + print('DecisionPoint response=%s' % dp_response) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + logging.info('FinalizeEpisode request=%s', request) + d = {} + for a in request.decision_point.choice_params: + d[a.key] = a.value.double_value + + self._lock.acquire() + logging.info('FinalizeEpisode outcome=%s / %s', + request.decision_outcome.reward, d) + self._optimizer.register(params=d, + target=request.decision_outcome.reward) + # self._completed_count += 1 + self._lock.release() + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + output = '[BayesianOpt (#%s trials)\n' % len(self._optimizer.res) + for trial in sorted(self._optimizer.res, + key=lambda x: x['target'], + reverse=True): + output += ' ' + str(trial) + '\n' + output += ']\n' + + if (self._completed_count == self._total_count): + status = service_pb2.CurrentStatusResponse.Status.SUCCESS + elif (self._completed_count < self._total_count): + status = service_pb2.CurrentStatusResponse.Status.IN_PROGRESS + else: + status = service_pb2.CurrentStatusResponse.Status.FAILURE + + return service_pb2.CurrentStatusResponse(response_str=output, + status=status) + + @overrides + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) + if (self._completed_count == self._total_count): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + # elif(not self.pending_samples): + # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + else: + # Increasing count here so that multiple workers can't enter the dp call for same sample at last + self._completed_count += 1 + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/exhaustive_search.py b/sight_service/exhaustive_search.py index 954cdfe..5b388eb 100644 --- a/sight_service/exhaustive_search.py +++ b/sight_service/exhaustive_search.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Exhaustive search for driving Sight applications.""" -import logging +from helpers.logs.logs_handler import logger as logging from overrides import overrides from typing import Any, Dict, List, Tuple @@ -25,224 +24,228 @@ _file_name = "exhaustive_search.py" + class ExhaustiveSearch(OptimizerInstance): - """Exhaustively searches over all the possible values of the action attributes. + """Exhaustively searches over all the possible values of the action attributes. Attributes: possible_values: Maps each action attributes to the list of possible values of this attribute. """ - def __init__(self): - super().__init__() - self.next_sample_to_issue = [] - self.active_samples = {} - self.complete_samples = {} - self.last_sample = False - self.sweep_issue_done = False - self.possible_values = {} - self.max_reward_sample = {} - self._lock = threading.RLock() - - @overrides - def launch( - self, request: service_pb2.LaunchRequest - ) -> service_pb2.LaunchResponse: - method_name = "launch" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - response = super(ExhaustiveSearch, self).launch(request) - print("self.actions : ", self.actions) - self.next_sample_to_issue = [0] * len(self.actions) - print("self.next_sample_to_issue : ", self.next_sample_to_issue) - - self.possible_values = {} - for i, key in enumerate(sorted(self.actions.keys())): - if self.actions[key].valid_float_values: - self.possible_values[key] = list(self.actions[key].valid_float_values) - elif self.actions[key].step_size: - self.possible_values[key] = [] - cur = self.actions[key].min_value - while cur <= self.actions[key].max_value: - self.possible_values[key].append(cur) - cur += self.actions[key].step_size - - logging.info('possible_values=%s', self.possible_values) - response.display_string = 'Exhaustive Search SUCCESS!' - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return response - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - method_name = "decision_point" - logging.debug(">>>> In %s of %s", method_name, _file_name) - logging.info( - ( - 'Running for exhaustive search...., last_sample=%s,' - ' sweep_issue_done=%s' - ), - self.last_sample, - self.sweep_issue_done, - ) - logging.info('self.next_sample_to_issue=%s', self.next_sample_to_issue) - # logging.info('self.possible_values=%s', self.possible_values) - - if self.sweep_issue_done: - return service_pb2.DecisionPointResponse(action={}) - - next_action = {} - for i, key in enumerate(self.actions): - next_action[key] = self.possible_values[key][self.next_sample_to_issue[i]] - - self._lock.acquire() - self.active_samples[request.worker_id] = { - 'action': next_action, - 'sample': tuple(self.next_sample_to_issue), - } - if self.last_sample: - self.sweep_issue_done = True - else: - # Advance next_sample_to_issue - num_dims_advanced = 0 - keys = sorted(self.actions.keys()) - for i, key in reversed(list(enumerate(keys))): + def __init__(self): + super().__init__() + self.next_sample_to_issue = [] + self.active_samples = {} + self.complete_samples = {} + self.last_sample = False + self.sweep_issue_done = False + self.possible_values = {} + self.max_reward_sample = {} + self._lock = threading.RLock() + + @overrides + def launch( + self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + method_name = "launch" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + response = super(ExhaustiveSearch, self).launch(request) + print("self.actions : ", self.actions) + self.next_sample_to_issue = [0] * len(self.actions) + print("self.next_sample_to_issue : ", self.next_sample_to_issue) + + self.possible_values = {} + for i, key in enumerate(sorted(self.actions.keys())): + if self.actions[key].valid_float_values: + self.possible_values[key] = list( + self.actions[key].valid_float_values) + elif self.actions[key].step_size: + self.possible_values[key] = [] + cur = self.actions[key].min_value + while cur <= self.actions[key].max_value: + self.possible_values[key].append(cur) + cur += self.actions[key].step_size + + logging.info('possible_values=%s', self.possible_values) + response.display_string = 'Exhaustive Search SUCCESS!' + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return response + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + method_name = "decision_point" + logging.debug(">>>> In %s of %s", method_name, _file_name) logging.info( - 'Advancing i=%s, key=%s, next_sample=%s, possible_values=%s', - i, - key, - self.next_sample_to_issue[i], - self.possible_values[keys[i]], + ('Running for exhaustive search...., last_sample=%s,' + ' sweep_issue_done=%s'), + self.last_sample, + self.sweep_issue_done, ) - if self.next_sample_to_issue[i] < len(self.possible_values[key]) - 1: - self.next_sample_to_issue[i] += 1 - break + logging.info('self.next_sample_to_issue=%s', self.next_sample_to_issue) + # logging.info('self.possible_values=%s', self.possible_values) + + if self.sweep_issue_done: + return service_pb2.DecisionPointResponse(action={}) + + next_action = {} + for i, key in enumerate(self.actions): + next_action[key] = self.possible_values[key][ + self.next_sample_to_issue[i]] + + self._lock.acquire() + self.active_samples[request.worker_id] = { + 'action': next_action, + 'sample': tuple(self.next_sample_to_issue), + } + if self.last_sample: + self.sweep_issue_done = True else: - self.next_sample_to_issue[i] = 0 - num_dims_advanced += 1 - - self.last_sample = num_dims_advanced == len(self.actions) - self._lock.release() - - logging.info('next_action=%s', next_action) - dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend(param_dict_to_proto(next_action)) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - method_name = "finalize_episode" - logging.debug(">>>> In %s of %s", method_name, _file_name) - # logging.info('Running for exhaustive search....') - # logging.info("req in finalize episode of exhaustive_search.py : %s", request) - - # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) - self._lock.acquire() - self.complete_samples[ - tuple(self.active_samples[request.worker_id]['sample']) - ] = { - 'reward': request.decision_outcome.reward, - 'action': self.active_samples[request.worker_id]['action'], - 'outcome': request.decision_outcome.outcome_params - } - logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) - - # if(self.max_reward_sample == {} or self.max_reward_sample['outcome'] < request.decision_outcome.outcome_value): - if(self.max_reward_sample == {} or self.max_reward_sample['reward'] < request.decision_outcome.reward): - self.max_reward_sample = { - # 'outcome': request.decision_outcome.outcome_value, - 'reward': request.decision_outcome.reward, - 'action': self.active_samples[request.worker_id]['action'], - } - self._lock.release() - - del self.active_samples[request.worker_id] - # logging.info('FinalizeEpisode active_samples=%s' % self.active_samples) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - method_name = "current_status" - logging.debug(">>>> In %s of %s", method_name, _file_name) - response = ( - '[ExhaustiveSearch: {"Done" if self.sweep_issue_done else "In' - ' Progress"}\n' - ) - self._lock.acquire() - response += f' #active_samples={len(self.active_samples)}\n' - response += ' completed_samples=\n' - response += ', '.join(list(self.actions)) + ', outcome\n' - - cur = [0] * len(self.actions) - # action_keys = list(self.actions.keys()) - keys = sorted(self.actions.keys()) - logging.info('self.complete_samples=%s', self.complete_samples) - - reached_last = False - while not reached_last: - logging.info('cur(#%d)=%s', len(cur), cur) - response += ', '.join( - [str(self.possible_values[key][cur[i]]) for i, key in enumerate(keys)] - ) - if tuple(cur) in self.complete_samples: - response += ', ' + str(self.complete_samples[tuple(cur)]['outcome']) - else: - response += ', ?' - response += '\n' - - # Advance cur, starting from the last dimension and going to the first. - for i, key in reversed(list(enumerate(keys))): - logging.info( - 'i=%d, key=%s, cur=%s, self.possible_values[key]=%s', - i, - key, - cur[i], - self.possible_values[key], - ) - if cur[i] < len(self.possible_values[key]) - 1: - cur[i] += 1 - break + # Advance next_sample_to_issue + num_dims_advanced = 0 + keys = sorted(self.actions.keys()) + for i, key in reversed(list(enumerate(keys))): + logging.info( + 'Advancing i=%s, key=%s, next_sample=%s, possible_values=%s', + i, + key, + self.next_sample_to_issue[i], + self.possible_values[keys[i]], + ) + if self.next_sample_to_issue[i] < len( + self.possible_values[key]) - 1: + self.next_sample_to_issue[i] += 1 + break + else: + self.next_sample_to_issue[i] = 0 + num_dims_advanced += 1 + + self.last_sample = num_dims_advanced == len(self.actions) + self._lock.release() + + logging.info('next_action=%s', next_action) + dp_response = service_pb2.DecisionPointResponse() + dp_response.action.extend(param_dict_to_proto(next_action)) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + method_name = "finalize_episode" + logging.debug(">>>> In %s of %s", method_name, _file_name) + # logging.info('Running for exhaustive search....') + # logging.info("req in finalize episode of exhaustive_search.py : %s", request) + + # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) + self._lock.acquire() + self.complete_samples[tuple( + self.active_samples[request.worker_id]['sample'])] = { + 'reward': request.decision_outcome.reward, + 'action': self.active_samples[request.worker_id]['action'], + 'outcome': request.decision_outcome.outcome_params + } + logging.info('FinalizeEpisode complete_samples=%s' % + self.complete_samples) + + # if(self.max_reward_sample == {} or self.max_reward_sample['outcome'] < request.decision_outcome.outcome_value): + if (self.max_reward_sample == {} or self.max_reward_sample['reward'] + < request.decision_outcome.reward): + self.max_reward_sample = { + # 'outcome': request.decision_outcome.outcome_value, + 'reward': request.decision_outcome.reward, + 'action': self.active_samples[request.worker_id]['action'], + } + self._lock.release() + + del self.active_samples[request.worker_id] + # logging.info('FinalizeEpisode active_samples=%s' % self.active_samples) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + method_name = "current_status" + logging.debug(">>>> In %s of %s", method_name, _file_name) + response = ( + '[ExhaustiveSearch: {"Done" if self.sweep_issue_done else "In' + ' Progress"}\n') + self._lock.acquire() + response += f' #active_samples={len(self.active_samples)}\n' + response += ' completed_samples=\n' + response += ', '.join(list(self.actions)) + ', outcome\n' + + cur = [0] * len(self.actions) + # action_keys = list(self.actions.keys()) + keys = sorted(self.actions.keys()) + logging.info('self.complete_samples=%s', self.complete_samples) + + reached_last = False + while not reached_last: + logging.info('cur(#%d)=%s', len(cur), cur) + response += ', '.join([ + str(self.possible_values[key][cur[i]]) + for i, key in enumerate(keys) + ]) + if tuple(cur) in self.complete_samples: + response += ', ' + str( + self.complete_samples[tuple(cur)]['outcome']) + else: + response += ', ?' + response += '\n' + + # Advance cur, starting from the last dimension and going to the first. + for i, key in reversed(list(enumerate(keys))): + logging.info( + 'i=%d, key=%s, cur=%s, self.possible_values[key]=%s', + i, + key, + cur[i], + self.possible_values[key], + ) + if cur[i] < len(self.possible_values[key]) - 1: + cur[i] += 1 + break + else: + cur[i] = 0 + if i == 0: + reached_last = True + self._lock.release() + + response += ']' + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=response) + + @overrides + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + method_name = "fetch_optimal_action" + logging.debug(">>>> In %s of %s", method_name, _file_name) + best_action = self.max_reward_sample + print(" : ", best_action) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=str(best_action)) + + @overrides + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) + if (self.sweep_issue_done): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + # elif(not self.pending_samples): + # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY else: - cur[i] = 0 - if i == 0: - reached_last = True - self._lock.release() - - response += ']' - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=response) - - @overrides - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - method_name = "fetch_optimal_action" - logging.debug(">>>> In %s of %s", method_name, _file_name) - best_action = self.max_reward_sample - print(" : ", best_action) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=str(best_action)) - - @overrides - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) - if(self.sweep_issue_done): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - # elif(not self.pending_samples): - # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY - else: - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/genetic_algorithm.py b/sight_service/genetic_algorithm.py index c2a40ab..25e6698 100644 --- a/sight_service/genetic_algorithm.py +++ b/sight_service/genetic_algorithm.py @@ -11,11 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Genetic Algorithms for driving Sight applications.""" from concurrent import futures -import logging +from helpers.logs.logs_handler import logger as logging from overrides import overrides from typing import Any, Dict, List, Tuple @@ -26,385 +25,379 @@ from sight_service.optimizer_instance import OptimizerInstance - class GeneticAlgorithm(OptimizerInstance): - def __init__(self): - super().__init__() - self.ga_population = [] - self.ga_active_samples = {} - self.proposals = [] - self.max_population_size = 40 - self.num_decisions = 0 - self.algorithms_tried = {} - self.algorithms_succeeded_above_min = {} - self.algorithms_succeeded_best = {} - self.history = [] - - @overrides - def launch( - self, request: service_pb2.LaunchRequest - ) -> service_pb2.LaunchResponse: - response = super(GeneticAlgorithm, self).launch(request) - response.display_string = 'Genetic Algorithm Launch SUCCESS!' - logging.info( - 'request.genetic_algorithm_config=%s', request.genetic_algorithm_config - ) - # if request.genetic_algorithm_config.max_population_size: - # self.max_population_size = max( - # 3, request.genetic_algorithm_config.max_population_size - # ) - ga_config = request.decision_config_params.choice_config[request.label].genetic_algorithm_config - self.max_population_size = ga_config.max_population_size - return response - - def find_best_worst( - self, options: List[Dict[str, Any]] - ) -> Tuple[float, int, float, int]: - largest_outcome = -math.inf - largest_idx = -1 - smallest_outcome = math.inf - smallest_idx = -1 - sum_outcomes = 0 - for i, unit in enumerate(options): - if unit['outcome'] > largest_outcome: - largest_outcome = unit['outcome'] - largest_idx = i - if unit['outcome'] < smallest_outcome: - smallest_outcome = unit['outcome'] - smallest_idx = i - sum_outcomes += unit['outcome'] - - return ( - largest_outcome, - largest_idx, - smallest_outcome, - smallest_idx, - sum_outcomes, - ) - - def find_best_worst_probweighted( - self, options: List[Dict[str, Any]] - ) -> Tuple[float, int, float, int]: - ( - largest_outcome, - largest_idx, - smallest_outcome, - smallest_idx, - sum_outcomes, - ) = self.find_best_worst(options) - # logging.info('largest_outcome=%s, largest_idx=%s, smallest_outcome=%s, smallest_idx=%s, sum_outcomes=%s', largest_outcome, largest_idx, smallest_outcome, smallest_idx, sum_outcomes) - - sum_of_max_adjusted_outcomes = largest_outcome * len(options) - sum_outcomes - smallest_outcome_choice = random.uniform(0, sum_of_max_adjusted_outcomes) - logging.info( - 'sum_of_max_adjusted_outcomes=%s, smallest_outcome_choice=%s', - sum_of_max_adjusted_outcomes, - smallest_outcome_choice, - ) - - cumulative_outcomes_sum = 0 - smallest_outcome = math.inf - smallest_idx = -1 - for i, unit in enumerate(options): - cumulative_outcomes_sum += largest_outcome - unit['outcome'] - # logging.info('unit[outcome]=%s, cumulative_outcomes_sum=%s, found=%s', unit['outcome'], cumulative_outcomes_sum, smallest_outcome_choice < cumulative_outcomes_sum) - if smallest_outcome_choice <= cumulative_outcomes_sum: - return largest_outcome, largest_idx, unit['outcome'], i - - logging.error( + def __init__(self): + super().__init__() + self.ga_population = [] + self.ga_active_samples = {} + self.proposals = [] + self.max_population_size = 40 + self.num_decisions = 0 + self.algorithms_tried = {} + self.algorithms_succeeded_above_min = {} + self.algorithms_succeeded_best = {} + self.history = [] + + @overrides + def launch( + self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(GeneticAlgorithm, self).launch(request) + response.display_string = 'Genetic Algorithm Launch SUCCESS!' + logging.info('request.genetic_algorithm_config=%s', + request.genetic_algorithm_config) + # if request.genetic_algorithm_config.max_population_size: + # self.max_population_size = max( + # 3, request.genetic_algorithm_config.max_population_size + # ) + ga_config = request.decision_config_params.choice_config[ + request.label].genetic_algorithm_config + self.max_population_size = ga_config.max_population_size + return response + + def find_best_worst( + self, options: List[Dict[str, + Any]]) -> Tuple[float, int, float, int]: + largest_outcome = -math.inf + largest_idx = -1 + smallest_outcome = math.inf + smallest_idx = -1 + sum_outcomes = 0 + for i, unit in enumerate(options): + if unit['outcome'] > largest_outcome: + largest_outcome = unit['outcome'] + largest_idx = i + if unit['outcome'] < smallest_outcome: + smallest_outcome = unit['outcome'] + smallest_idx = i + sum_outcomes += unit['outcome'] + + return ( + largest_outcome, + largest_idx, + smallest_outcome, + smallest_idx, + sum_outcomes, + ) + + def find_best_worst_probweighted( + self, options: List[Dict[str, + Any]]) -> Tuple[float, int, float, int]: ( - 'WARNING: smallest_outcome_choice=%s,' - ' sum_of_max_adjusted_outcomes=%s but we failed to find the index' - ' of this unit' - ), - smallest_outcome_choice, - sum_of_max_adjusted_outcomes, - ) - return largest_outcome, largest_idx, smallest_outcome, smallest_idx - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - logging.info( - '%s| ga_population(#%d)=', request.worker_id, len(self.ga_population) - ) - for member in sorted( - self.ga_population, key=lambda p: p['outcome'], reverse=True - ): - logging.info( - '%s| %s: %s', request.worker_id, member['outcome'], member['action'] - ) - - self.num_decisions += 1 - if ( - len(self.ga_population) < self.max_population_size - or random.randint(1, 100) <= 5 - ): - algorithm = 'random_sample' - # Randomly sample an action. - next_action = {} - for key in self.actions.keys(): - next_action[key] = random.uniform( - self.actions[key].min_value, self.actions[key].max_value + largest_outcome, + largest_idx, + smallest_outcome, + smallest_idx, + sum_outcomes, + ) = self.find_best_worst(options) + # logging.info('largest_outcome=%s, largest_idx=%s, smallest_outcome=%s, smallest_idx=%s, sum_outcomes=%s', largest_outcome, largest_idx, smallest_outcome, smallest_idx, sum_outcomes) + + sum_of_max_adjusted_outcomes = largest_outcome * len( + options) - sum_outcomes + smallest_outcome_choice = random.uniform(0, + sum_of_max_adjusted_outcomes) + logging.info( + 'sum_of_max_adjusted_outcomes=%s, smallest_outcome_choice=%s', + sum_of_max_adjusted_outcomes, + smallest_outcome_choice, ) - # logging.info(" [%s - %s]: %s", self.actions[key].min_value, - # self.actions[key].max_value, - # next_action[key]) - if len(self.ga_population) >= self.max_population_size: - largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( - self.find_best_worst_probweighted(self.ga_population) + cumulative_outcomes_sum = 0 + smallest_outcome = math.inf + smallest_idx = -1 + for i, unit in enumerate(options): + cumulative_outcomes_sum += largest_outcome - unit['outcome'] + # logging.info('unit[outcome]=%s, cumulative_outcomes_sum=%s, found=%s', unit['outcome'], cumulative_outcomes_sum, smallest_outcome_choice < cumulative_outcomes_sum) + if smallest_outcome_choice <= cumulative_outcomes_sum: + return largest_outcome, largest_idx, unit['outcome'], i + + logging.error( + ('WARNING: smallest_outcome_choice=%s,' + ' sum_of_max_adjusted_outcomes=%s but we failed to find the index' + ' of this unit'), + smallest_outcome_choice, + sum_of_max_adjusted_outcomes, ) - # Remove the worst member of the population - del self.ga_population[smallest_idx] - - logging.info( - '%s| Randomly sample: next_action : %s', - request.worker_id, - next_action, - ) - else: - largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( - self.find_best_worst_probweighted(self.ga_population) - ) - - # logging.info('Retrying largest=%s', self.ga_population[spouse_idx]) - # next_action = dict(self.ga_population[largest_idx]['action']) - # # Remove the chosen member of the population - # logging.info('deleting largest unit=%s', self.ga_population[largest_idx]) - # del self.ga_population[largest_idx] - - if self.proposals and random.randint(0, 10) < 5: - ( - prop_largest_outcome, - prop_largest_idx, - prop_smallest_outcome, - prop_smallest_idx, - ) = self.find_best_worst_probweighted(self.proposals) + return largest_outcome, largest_idx, smallest_outcome, smallest_idx + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + logging.info('%s| ga_population(#%d)=', request.worker_id, + len(self.ga_population)) + for member in sorted(self.ga_population, + key=lambda p: p['outcome'], + reverse=True): + logging.info('%s| %s: %s', request.worker_id, member['outcome'], + member['action']) + + self.num_decisions += 1 + if (len(self.ga_population) < self.max_population_size + or random.randint(1, 100) <= 5): + algorithm = 'random_sample' + # Randomly sample an action. + next_action = {} + for key in self.actions.keys(): + next_action[key] = random.uniform(self.actions[key].min_value, + self.actions[key].max_value) + # logging.info(" [%s - %s]: %s", self.actions[key].min_value, + # self.actions[key].max_value, + # next_action[key]) + + if len(self.ga_population) >= self.max_population_size: + largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( + self.find_best_worst_probweighted(self.ga_population)) + # Remove the worst member of the population + del self.ga_population[smallest_idx] + + logging.info( + '%s| Randomly sample: next_action : %s', + request.worker_id, + next_action, + ) + else: + largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( + self.find_best_worst_probweighted(self.ga_population)) + + # logging.info('Retrying largest=%s', self.ga_population[spouse_idx]) + # next_action = dict(self.ga_population[largest_idx]['action']) + # # Remove the chosen member of the population + # logging.info('deleting largest unit=%s', self.ga_population[largest_idx]) + # del self.ga_population[largest_idx] + + if self.proposals and random.randint(0, 10) < 5: + ( + prop_largest_outcome, + prop_largest_idx, + prop_smallest_outcome, + prop_smallest_idx, + ) = self.find_best_worst_probweighted(self.proposals) + logging.info( + '%s| Best proposal: %s: %s', + request.worker_id, + self.proposals[prop_largest_idx]['outcome'], + self.proposals[prop_largest_idx]['action'], + ) + next_action = self.proposals[prop_largest_idx]['action'] + algorithm = 'best_proposal' + del self.proposals[prop_largest_idx] + else: + spouse_idx = random.randint(0, len(self.ga_population) - 1) + # logging.info('smallest_idx=%s, largest_idx=%s, spouse_idx=%s', + # smallest_idx, largest_idx, spouse_idx) + while spouse_idx == smallest_idx or spouse_idx == largest_idx: + spouse_idx = (spouse_idx + 1) % len(self.ga_population) + logging.info( + '%s| smallest_idx=%s, largest_idx=%s, spouse_idx=%s', + request.worker_id, + smallest_idx, + largest_idx, + spouse_idx, + ) + + if random.randint(0, 9) > 4: + # Mate largest_idx and spouse_idx + logging.info( + '%s| Mating largest unit=%s : %s', + request.worker_id, + self.ga_population[largest_idx]['outcome'], + self.ga_population[largest_idx]['action'], + ) + logging.info( + '%s| and spouse=%s : %s', + request.worker_id, + self.ga_population[spouse_idx]['outcome'], + self.ga_population[spouse_idx]['action'], + ) + next_action = {} + keys = sorted(self.actions.keys()) + cross_idx = random.randint(0, len(keys) - 1) + logging.info('%s| at cross_idx=%d', request.worker_id, + cross_idx) + for i, key in enumerate(keys): + if i < cross_idx: + next_action[key] = self.ga_population[spouse_idx][ + 'action'][key] + else: + next_action[key] = self.ga_population[largest_idx][ + 'action'][key] + algorithm = 'mating' + else: + mutation_prob = random.randint(0, 100) + logging.info( + '%s| mutating mutation_prob=%s, spouse=%s: %s', + request.worker_id, + mutation_prob, + self.ga_population[spouse_idx]['outcome'], + self.ga_population[spouse_idx]['action'], + ) + next_action = {} + for key in self.actions.keys(): + if random.randint(0, 999) <= mutation_prob: + next_action[key] = random.uniform( + self.actions[key].min_value, + self.actions[key].max_value) + # next_action[key] = self.ga_population[spouse_idx]['action'][key] * random.uniform(.9, 1.1) + # if next_action[key] < self.actions[key].min_value: + # next_action[key] = self.actions[key].min_value + # elif next_action[key] > self.actions[key].max_value: + # next_action[key] = self.actions[key].max_value + else: + next_action[key] = self.ga_population[spouse_idx][ + 'action'][key] + # logging.info('received_action[%s]=%s original=%s', key, next_action[key], claim_year_sold_delay) + algorithm = f'mutating_{mutation_prob}' + logging.info('%s| new next_action=%s', request.worker_id, + next_action) + + # Remove the worst member of the population + # logging.info('deleting smallest unit=%s', self.ga_population[smallest_idx]) + del self.ga_population[smallest_idx] + + self.ga_active_samples[request.worker_id] = { + 'action': next_action, + 'algorithm': algorithm, + } + + dp_response = service_pb2.DecisionPointResponse() + dp_response.action.extend(param_dict_to_proto(next_action)) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + algorithm = self.ga_active_samples[request.worker_id]['algorithm'] + if algorithm not in self.algorithms_tried: + self.algorithms_tried[algorithm] = 0 + self.algorithms_succeeded_above_min[algorithm] = 0 + self.algorithms_succeeded_best[algorithm] = 0 + self.algorithms_tried[algorithm] += 1 + + if self.ga_population: + largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( + self.find_best_worst_probweighted(self.ga_population)) + if request.decision_outcome.reward >= smallest_outcome: + self.algorithms_succeeded_above_min[algorithm] += 1 + if request.decision_outcome.reward >= largest_outcome: + self.algorithms_succeeded_best[algorithm] += 1 + + self.ga_population.append({ + 'outcome': + request.decision_outcome.reward, + 'action': + self.ga_active_samples[request.worker_id]['action'], + }) + self.history.append({ + 'algorithm': + algorithm, + 'outcome': + request.decision_outcome.reward, + 'action': + self.ga_active_samples[request.worker_id]['action'], + 'worker_id': + request.worker_id, + }) logging.info( - '%s| Best proposal: %s: %s', + '%s| FinalizeEpisode member=%s: %s / %s', request.worker_id, - self.proposals[prop_largest_idx]['outcome'], - self.proposals[prop_largest_idx]['action'], + request.decision_outcome.reward, + self.ga_active_samples[request.worker_id]['algorithm'], + self.ga_active_samples[request.worker_id]['action'], ) - next_action = self.proposals[prop_largest_idx]['action'] - algorithm = 'best_proposal' - del self.proposals[prop_largest_idx] - else: - spouse_idx = random.randint(0, len(self.ga_population) - 1) - # logging.info('smallest_idx=%s, largest_idx=%s, spouse_idx=%s', - # smallest_idx, largest_idx, spouse_idx) - while spouse_idx == smallest_idx or spouse_idx == largest_idx: - spouse_idx = (spouse_idx + 1) % len(self.ga_population) + del self.ga_active_samples[request.worker_id] logging.info( - '%s| smallest_idx=%s, largest_idx=%s, spouse_idx=%s', + '%s| FinalizeEpisode #ga_active_samples=%s', request.worker_id, - smallest_idx, - largest_idx, - spouse_idx, + len(self.ga_active_samples), ) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + response = ( + f'[GeneticAlgorithm (max_population_size={self.max_population_size},' + f' num_decisions={self.num_decisions}):\n') + response += f' ga_population(#{len(self.ga_population)}):\n' + keys = sorted(self.actions.keys()) + response += ' idx,outcome,' + ','.join(keys) + '\n' + for i, unit in enumerate( + sorted(self.ga_population, + key=lambda p: p['outcome'], + reverse=True)): + response += (f' {i},{unit["outcome"]:.5F},' + + ','.join([str(unit['action'][key]) + for key in keys]) + '\n') + + response += f' ga_active_samples(#{len(self.ga_active_samples)}):\n' + response += ' worker_id,algorithm,' + ','.join(keys) + '\n' + for worker_id, sample in self.ga_active_samples.items(): + response += (f' {worker_id},{sample["algorithm"]},' + + ','.join([str(sample['action'][key]) + for key in keys]) + '\n') + response += ']' + + response += f' proposals(#{len(self.proposals)}):\n' + response += ' idx,outcome,' + ','.join(keys) + '\n' + for i, unit in enumerate( + sorted(self.proposals, + key=lambda p: p['outcome'], + reverse=True)): + response += (f' {i},{unit["outcome"]:.5F},' + + ','.join([str(unit['action'][key]) + for key in keys]) + '\n') + if i > 50: + break + + response += f' algorithms:\n' + for algorithm in sorted(self.algorithms_tried.keys()): + response += ( + ' %s: tried=%s, algorithms_succeeded_above_min=%.4E,' + ' algorithms_succeeded_best=%.4E\n' % ( + algorithm, + self.algorithms_tried[algorithm], + self.algorithms_succeeded_above_min[algorithm] / + self.algorithms_tried[algorithm], + self.algorithms_succeeded_best[algorithm] / + self.algorithms_tried[algorithm], + )) + + response += ' history:\n' + for i, h in enumerate(self.history): + response += ' %d: %s\n' % (i, h) + + return service_pb2.CurrentStatusResponse(response_str=response) + + def propose_action( + self, request: service_pb2.ProposeActionRequest + ) -> service_pb2.ProposeActionResponse: + action = {} + for key, value in request.action.items(): + action[key] = value - if random.randint(0, 9) > 4: - # Mate largest_idx and spouse_idx - logging.info( - '%s| Mating largest unit=%s : %s', - request.worker_id, - self.ga_population[largest_idx]['outcome'], - self.ga_population[largest_idx]['action'], - ) - logging.info( - '%s| and spouse=%s : %s', - request.worker_id, - self.ga_population[spouse_idx]['outcome'], - self.ga_population[spouse_idx]['action'], - ) - next_action = {} - keys = sorted(self.actions.keys()) - cross_idx = random.randint(0, len(keys) - 1) - logging.info('%s| at cross_idx=%d', request.worker_id, cross_idx) - for i, key in enumerate(keys): - if i < cross_idx: - next_action[key] = self.ga_population[spouse_idx]['action'][key] - else: - next_action[key] = self.ga_population[largest_idx]['action'][key] - algorithm = 'mating' + largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( + self.find_best_worst_probweighted(self.ga_population)) + if request.outcome.reward >= smallest_outcome: + self.proposals.append({ + 'action': action, + 'outcome': request.outcome.reward, + }) + logging.info( + '%s| Accepted Proposal %s: %s', + request.worker_id, + request.outcome.reward, + action, + ) else: - mutation_prob = random.randint(0, 100) - logging.info( - '%s| mutating mutation_prob=%s, spouse=%s: %s', - request.worker_id, - mutation_prob, - self.ga_population[spouse_idx]['outcome'], - self.ga_population[spouse_idx]['action'], - ) - next_action = {} - for key in self.actions.keys(): - if random.randint(0, 999) <= mutation_prob: - next_action[key] = random.uniform( - self.actions[key].min_value, self.actions[key].max_value - ) - # next_action[key] = self.ga_population[spouse_idx]['action'][key] * random.uniform(.9, 1.1) - # if next_action[key] < self.actions[key].min_value: - # next_action[key] = self.actions[key].min_value - # elif next_action[key] > self.actions[key].max_value: - # next_action[key] = self.actions[key].max_value - else: - next_action[key] = self.ga_population[spouse_idx]['action'][key] - # logging.info('received_action[%s]=%s original=%s', key, next_action[key], claim_year_sold_delay) - algorithm = f'mutating_{mutation_prob}' - logging.info('%s| new next_action=%s', request.worker_id, next_action) - - # Remove the worst member of the population - # logging.info('deleting smallest unit=%s', self.ga_population[smallest_idx]) - del self.ga_population[smallest_idx] - - self.ga_active_samples[request.worker_id] = { - 'action': next_action, - 'algorithm': algorithm, - } - - dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend(param_dict_to_proto(next_action)) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - algorithm = self.ga_active_samples[request.worker_id]['algorithm'] - if algorithm not in self.algorithms_tried: - self.algorithms_tried[algorithm] = 0 - self.algorithms_succeeded_above_min[algorithm] = 0 - self.algorithms_succeeded_best[algorithm] = 0 - self.algorithms_tried[algorithm] += 1 - - if self.ga_population: - largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( - self.find_best_worst_probweighted(self.ga_population) - ) - if request.decision_outcome.reward >= smallest_outcome: - self.algorithms_succeeded_above_min[algorithm] += 1 - if request.decision_outcome.reward >= largest_outcome: - self.algorithms_succeeded_best[algorithm] += 1 - - self.ga_population.append({ - 'outcome': request.decision_outcome.reward, - 'action': self.ga_active_samples[request.worker_id]['action'], - }) - self.history.append({ - 'algorithm': algorithm, - 'outcome': request.decision_outcome.reward, - 'action': self.ga_active_samples[request.worker_id]['action'], - 'worker_id': request.worker_id, - }) - logging.info( - '%s| FinalizeEpisode member=%s: %s / %s', - request.worker_id, - request.decision_outcome.reward, - self.ga_active_samples[request.worker_id]['algorithm'], - self.ga_active_samples[request.worker_id]['action'], - ) - del self.ga_active_samples[request.worker_id] - logging.info( - '%s| FinalizeEpisode #ga_active_samples=%s', - request.worker_id, - len(self.ga_active_samples), - ) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - response = ( - f'[GeneticAlgorithm (max_population_size={self.max_population_size},' - f' num_decisions={self.num_decisions}):\n' - ) - response += f' ga_population(#{len(self.ga_population)}):\n' - keys = sorted(self.actions.keys()) - response += ' idx,outcome,' + ','.join(keys) + '\n' - for i, unit in enumerate( - sorted(self.ga_population, key=lambda p: p['outcome'], reverse=True) - ): - response += ( - f' {i},{unit["outcome"]:.5F},' - + ','.join([str(unit['action'][key]) for key in keys]) - + '\n' - ) - - response += f' ga_active_samples(#{len(self.ga_active_samples)}):\n' - response += ' worker_id,algorithm,' + ','.join(keys) + '\n' - for worker_id, sample in self.ga_active_samples.items(): - response += ( - f' {worker_id},{sample["algorithm"]},' - + ','.join([str(sample['action'][key]) for key in keys]) - + '\n' - ) - response += ']' - - response += f' proposals(#{len(self.proposals)}):\n' - response += ' idx,outcome,' + ','.join(keys) + '\n' - for i, unit in enumerate( - sorted(self.proposals, key=lambda p: p['outcome'], reverse=True) - ): - response += ( - f' {i},{unit["outcome"]:.5F},' - + ','.join([str(unit['action'][key]) for key in keys]) - + '\n' - ) - if i > 50: - break - - response += f' algorithms:\n' - for algorithm in sorted(self.algorithms_tried.keys()): - response += ( - ' %s: tried=%s, algorithms_succeeded_above_min=%.4E,' - ' algorithms_succeeded_best=%.4E\n' - % ( - algorithm, - self.algorithms_tried[algorithm], - self.algorithms_succeeded_above_min[algorithm] - / self.algorithms_tried[algorithm], - self.algorithms_succeeded_best[algorithm] - / self.algorithms_tried[algorithm], - ) - ) - - response += ' history:\n' - for i, h in enumerate(self.history): - response += ' %d: %s\n' % (i, h) - - return service_pb2.CurrentStatusResponse(response_str=response) - - def propose_action( - self, request: service_pb2.ProposeActionRequest - ) -> service_pb2.ProposeActionResponse: - action = {} - for key, value in request.action.items(): - action[key] = value - - largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( - self.find_best_worst_probweighted(self.ga_population) - ) - if request.outcome.reward >= smallest_outcome: - self.proposals.append({ - 'action': action, - 'outcome': request.outcome.reward, - }) - logging.info( - '%s| Accepted Proposal %s: %s', - request.worker_id, - request.outcome.reward, - action, - ) - else: - logging.info( - '%s| Rejected Proposal %s: %s', - request.worker_id, - request.outcome.reward, - action, - ) - return service_pb2.ProposeActionResponse() + logging.info( + '%s| Rejected Proposal %s: %s', + request.worker_id, + request.outcome.reward, + action, + ) + return service_pb2.ProposeActionResponse() diff --git a/sight_service/llm.py b/sight_service/llm.py index f887c85..bd99144 100644 --- a/sight_service/llm.py +++ b/sight_service/llm.py @@ -11,13 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """LLM-based optimization for driving Sight applications.""" from concurrent import futures import json -import logging -import os +from helpers.logs.logs_handler import logger as logging +from helpers.logs.logs_handler import logger as loggingogs.logs_handler import logger as logging import random import threading from typing import Any, Dict, List, Optional, Tuple @@ -37,755 +36,695 @@ class LLM(OptimizerInstance): - """Uses an LLM to choose the parameters of the code. + """Uses an LLM to choose the parameters of the code. Attributes: script: The script of the conversation accrued so far. """ - def __init__(self): - super().__init__() - # genai.configure(api_key=_GENAI_API_KEY) - genai.configure(api_key="_GENAI_API_KEY") - self._intro = '' - self._history = [] - self._actions_to_do = [] - self._history_len_for_prompt = 20 - self._num_decision_points = 0 - # self.last_outcome = None - self._lock = threading.RLock() - self._waiting_on_tell = False - self._response_ready = False - self._response_for_listen = '' - self._waiting_on_llm_response = False - - def _attr_summary( - self, key: str, attr: sight_pb2.DecisionConfigurationStart.AttrProps - ) -> str: - """Returns a summary of an attribute for the LLM.""" - if attr.min_value < attr.max_value: - return ( - f'"{key}": {{ "description": {attr.description}, "min_value":' - f' {attr.min_value}, "max_value": {attr.max_value} }},' - ) - return f'"{key}": {{ "description": {attr.description} }},' - - @overrides - def launch( - self, request: service_pb2.LaunchRequest - ) -> service_pb2.LaunchResponse: - response = super(LLM, self).launch(request) - logging.info('LLM request=%s', request) - self._llm_config = request.decision_config_params.choice_config[ - request.label - ].llm_config - logging.info('LLM config=%s', self._llm_config) - self._bayesian_opt = BayesianOpt() - self._bayesian_opt.launch(request) - - self._intro += '' - if ( - self._llm_config.goal - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_OPTIMIZE - ): - self._intro = ( - 'You are controlling an agent that is trying to reach a goal. The' - ' agent is described as follows.\n' - ) - self._intro += f'"{self._llm_config.description}"\n' - if ( - self._llm_config.goal - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_OPTIMIZE - ): - self._intro += ( - 'The simulation will periodically report its state and then ask you ' - + 'to select an action for it to perform. After it has performed' - ' this ' - + 'action it will report back the numeric outcome of the this' - ' action. ' - + 'Higher outcome values are better than low outcome values. Your' - ' job ' - + 'is to choose actions that maximize the outcome values.\n' - ) - if len(self.state) > 0: - self._intro += ( - 'The state of the simulation consists of the following attributes: \n' - ) - self._intro += ( - ' {\n ' - + '\n '.join( - [self._attr_summary(key, p) for key, p in self.state.items()] - ) - + '}\n' - ) - self._intro += 'The possible actions you need to select are: \n' - self._intro += ( - ' {\n ' - + '\n '.join( - [self._attr_summary(key, p) for key, p in self.actions.items()] + def __init__(self): + super().__init__() + # genai.configure(api_key=_GENAI_API_KEY) + genai.configure(api_key="_GENAI_API_KEY") + self._intro = '' + self._history = [] + self._actions_to_do = [] + self._history_len_for_prompt = 20 + self._num_decision_points = 0 + # self.last_outcome = None + self._lock = threading.RLock() + self._waiting_on_tell = False + self._response_ready = False + self._response_for_listen = '' + self._waiting_on_llm_response = False + + def _attr_summary( + self, key: str, + attr: sight_pb2.DecisionConfigurationStart.AttrProps) -> str: + """Returns a summary of an attribute for the LLM.""" + if attr.min_value < attr.max_value: + return ( + f'"{key}": {{ "description": {attr.description}, "min_value":' + f' {attr.min_value}, "max_value": {attr.max_value} }},') + return f'"{key}": {{ "description": {attr.description} }},' + + @overrides + def launch( + self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(LLM, self).launch(request) + logging.info('LLM request=%s', request) + self._llm_config = request.decision_config_params.choice_config[ + request.label].llm_config + logging.info('LLM config=%s', self._llm_config) + self._bayesian_opt = BayesianOpt() + self._bayesian_opt.launch(request) + + self._intro += '' + if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_OPTIMIZE): + self._intro = ( + 'You are controlling an agent that is trying to reach a goal. The' + ' agent is described as follows.\n') + self._intro += f'"{self._llm_config.description}"\n' + if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_OPTIMIZE): + self._intro += ( + 'The simulation will periodically report its state and then ask you ' + + + 'to select an action for it to perform. After it has performed' + ' this ' + + 'action it will report back the numeric outcome of the this' + ' action. ' + + 'Higher outcome values are better than low outcome values. Your' + ' job ' + + 'is to choose actions that maximize the outcome values.\n') + if len(self.state) > 0: + self._intro += ( + 'The state of the simulation consists of the following attributes: \n' + ) + self._intro += (' {\n ' + '\n '.join( + [self._attr_summary(key, p) + for key, p in self.state.items()]) + '}\n') + self._intro += 'The possible actions you need to select are: \n' + self._intro += (' {\n ' + '\n '.join( + [self._attr_summary(key, p) + for key, p in self.actions.items()]) + '}\n') + self._intro += 'The possible outcomes you will observe are: \n' + self._intro += (' {\n ' + '\n '.join( + [self._attr_summary(key, p) + for key, p in self.outcomes.items()]) + '}\n') + self._intro += '========================\n' + + logging.info( + 'INTERACTIVE=%s', + self._llm_config.goal == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE, ) - + '}\n' - ) - self._intro += 'The possible outcomes you will observe are: \n' - self._intro += ( - ' {\n ' - + '\n '.join( - [self._attr_summary(key, p) for key, p in self.outcomes.items()] + if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE): + self._waiting_on_tell = True + self._response_ready = False + self._response_for_listen = '' + self._waiting_on_llm_response = False + else: + detail_prompt = ( + 'Please summarize everything you know about these parameters for the' + ' above application area, detail the steps that need to be taken to' + ' create a good estimate these parameters.\n') + self._intro += (detail_prompt + + self._ask(self._intro + detail_prompt) + '\n') + + detail_prompt = ( + 'Based on this plan describe the most reasonable estimate of these' + ' parameters\n') + self._intro += (detail_prompt + + self._ask(self._intro + detail_prompt) + '\n') + + response.display_string = 'LLM SUCCESS! ' + self._intro + logging.info('self._intro=%s', self._intro) + return response + + def _random_state(self) -> Dict[str, float]: + """Returns a random state.""" + s = {} + for key, p in self.state.items(): + s[key] = (p.max_value - + p.min_value) * random.random() + p.min_value + return s + + def _random_action(self) -> Dict[str, float]: + """Returns a random action.""" + a = {} + for key, p in self.actions.items(): + a[key] = (p.max_value - + p.min_value) * random.random() + p.min_value + return a + + def _random_outcome(self) -> Dict[str, float]: + """Returns a random outcome.""" + o = {} + for key, p in self.outcomes.items(): + o[key] = (p.max_value - + p.min_value) * random.random() + p.min_value + return o + + def _random_event(self) -> Dict[str, Any]: + return { + 'state': self._random_state(), + 'action': self._random_action(), + 'outcome': self._random_outcome(), + # random.random(), + } + + def _filtered_history(self, include_example_action: bool) -> List[Any]: + ordered_history = self._history[:-1].copy() + # logging.info( + # '#hist=%d ordered_history[#%d]=%s', + # len(self._history), + # len(ordered_history), + # ordered_history, + # ) + ordered_history = sorted( + ordered_history, + key=lambda h: -h['outcome'] + if 'outcome' in h and isinstance(h['outcome'], float) else 0, ) - + '}\n' - ) - self._intro += '========================\n' - - logging.info( - 'INTERACTIVE=%s', - self._llm_config.goal - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE, - ) - if ( - self._llm_config.goal - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - ): - self._waiting_on_tell = True - self._response_ready = False - self._response_for_listen = '' - self._waiting_on_llm_response = False - else: - detail_prompt = ( - 'Please summarize everything you know about these parameters for the' - ' above application area, detail the steps that need to be taken to' - ' create a good estimate these parameters.\n' - ) - self._intro += ( - detail_prompt + self._ask(self._intro + detail_prompt) + '\n' - ) - - detail_prompt = ( - 'Based on this plan describe the most reasonable estimate of these' - ' parameters\n' - ) - self._intro += ( - detail_prompt + self._ask(self._intro + detail_prompt) + '\n' - ) - - response.display_string = 'LLM SUCCESS! ' + self._intro - logging.info('self._intro=%s', self._intro) - return response - - def _random_state(self) -> Dict[str, float]: - """Returns a random state.""" - s = {} - for key, p in self.state.items(): - s[key] = (p.max_value - p.min_value) * random.random() + p.min_value - return s - - def _random_action(self) -> Dict[str, float]: - """Returns a random action.""" - a = {} - for key, p in self.actions.items(): - a[key] = (p.max_value - p.min_value) * random.random() + p.min_value - return a - - def _random_outcome(self) -> Dict[str, float]: - """Returns a random outcome.""" - o = {} - for key, p in self.outcomes.items(): - o[key] = (p.max_value - p.min_value) * random.random() + p.min_value - return o - - def _random_event(self) -> Dict[str, Any]: - return { - 'state': self._random_state(), - 'action': self._random_action(), - 'outcome': self._random_outcome(), - # random.random(), - } - - def _filtered_history(self, include_example_action: bool) -> List[Any]: - ordered_history = self._history[:-1].copy() - # logging.info( - # '#hist=%d ordered_history[#%d]=%s', - # len(self._history), - # len(ordered_history), - # ordered_history, - # ) - ordered_history = sorted( - ordered_history, - key=lambda h: -h['outcome'] - if 'outcome' in h and isinstance(h['outcome'], float) - else 0, - ) - if len(ordered_history) > self._history_len_for_prompt: - ordered_history = ordered_history[0 : self._history_len_for_prompt - 1] - random.shuffle(ordered_history) - - # If this is the first question, add a random event to serve as an example - # of the format. - # if include_example_action and len(ordered_history) == 0: - # ordered_history.append(self._random_event()) - - logging.info( - 'ordered_history[#%d]=%s', - len(ordered_history), - ordered_history, - ) - # if worker_id is None: - if len(self._history) == 0: - return ordered_history - return ordered_history + [self._history[-1]] - - def _hist_event_to_text( - self, event: Dict, last_outcome: float, is_last_event: bool - ) -> Tuple[str, Any]: - t = '' - if len(event['state']) > 0: - t += 'Decision State:\n' - t += ( - ' {' - + ', '.join([f'"{k}": {v}' for k, v in event['state'].items()]) - + '}\n' - ) - # t += 'Decision Action (json format): ' - if event['action'] is not None or is_last_event: - t += 'Simulation parameters (json format): ' - if event['action'] is not None: - t += ( - ' {' - + ', '.join( - [f'"{key}": {value}' for key, value in event['action'].items()] - ) - + '}\n' - ) - if event['outcome'] is not None: - # t += 'Decision Outcome: ' + str(event['outcome']) + '\n' - t += 'Simulation Outcome (json format): ' + str(event['outcome']) + '\n' - if ( - self._llm_config.goal - != sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - ): - if last_outcome is not None: - if last_outcome < event['outcome'] - 0.1: - t += ' This is a better outcome than the last time.\n' - elif last_outcome > event['outcome'] + 0.1: - t += ' This is a worse outcome than the last time.\n' - else: - t += ' This is a similar outcome to the last time.\n' - t += '========================\n' - last_outcome = event['outcome'] - return t, last_outcome - - def _history_to_text(self, include_example_action: bool = True) -> str: - t = '' - last_outcome = None - hist = self._filtered_history(include_example_action) - logging.info( - '_history_to_text() include_example_action=%s hist=%s', - include_example_action, - hist, - ) - # if include_example_action and ( - # len(hist) == 0 or (len(hist) == 1 and hist[0]['outcome'] is None) - # ): - # logging.info('_history_to_text() Adding random_event') - # t += self._hist_event_to_text(self._random_event(), None, False) - for i, event in enumerate(hist): - logging.info('_history_to_text event=%s', event) - event_text, last_outcome = self._hist_event_to_text( - event, last_outcome, i == len(hist) - 1 - ) - t += event_text - return t - - def _history_to_chat( - self, worker_id: str, include_example_action: bool = True - ) -> List[Dict[str, str]]: - chat = [] - last_outcome = None - last_outcome_message = '' - for h in self._filtered_history(include_example_action): - if len(h['state']) > 0: - chat.append({ - 'author': 'USER', - 'content': ( - last_outcome_message - + 'Decision State:\n' - + ' {' - + ', '.join([f'"{k}": {v}' for k, v in h['state'].items()]) - + '}\n' - + 'Please provide the Decision Action (json format):\n' - ), - }) - if h['action'] is not None: - chat.append({ - 'author': 'AI', - 'content': ( - +'Decision Action:\n' - + ' {{' - + ', '.join( - [f'"{key}": {value}' for key, value in h['action'].items()] - ) - + '}' - ), - }) - if h['outcome'] is not None: - last_outcome_message = 'Decision Outcome: ' + str(h['outcome']) + '\n' - if ( - self._llm_config.goal - != sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - ): - if last_outcome is not None: - if last_outcome < h['outcome'] - 0.1: - last_outcome_message += ( - ' This is a better outcome than the last time.\n' - ) - elif last_outcome > h['outcome'] + 0.1: - last_outcome_message += ( - ' This is a worse outcome than the last time.\n' - ) - else: - last_outcome_message += ( - ' This is a similar outcome to the last time.\n' - ) - return chat - - def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: - """Returns the dict representation of a DecisionParams proto""" - d = {} - for a in dp: - d[a.key] = a.value.double_value - return d - - def _get_creds(self) -> Any: - creds, project = google.auth.default() - auth_req = google.auth.transport.requests.Request() - creds.refresh(auth_req) - return creds - - def _get_req_headers(self) -> Dict[str, str]: - return { - 'Authorization': f'Bearer {self._get_creds().token}', - 'Content-Type': 'application/json; charset=utf-8', - } - - def _ask(self, prompt) -> str: - if ( - self._llm_config.algorithm - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_TEXT_BISON - ): - return self._ask_text_bison(prompt) - elif ( - self._llm_config.algorithm - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON - ): - return self._ask_chat_bison(prompt) - elif ( - self._llm_config.algorithm - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO - ): - return self._ask_gemini_pro(prompt) - else: - raise ValueError(f'Invalid algorithm {self._llm_config.algorithm}') - - def _ask_text_bison(self, prompt) -> str: - while True: - response = requests.post( - f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/text-bison-32k:predict", - data=json.dumps({ - 'instances': [{'prompt': prompt}], - 'parameters': { - 'temperature': 0.2, - 'maxOutputTokens': 2048, - 'topK': 40, - 'topP': 0.55, - # "groundingConfig": string, - # "stopSequences": [ string ], - # "candidateCount": integer, - # "logprobs": integer, - # "presencePenalty": float, - # "frequencyPenalty": float, - # "logitBias": map, - 'echo': False, - }, - }), - headers=self._get_req_headers(), - ).json() - # logging.info('response=%s', response) - if ( - 'error' in response - or response['predictions'][0]['content'].strip() == '' - ): - continue - return response['predictions'][0]['content'].strip() - - def _get_action(self, worker_id: str) -> List[Dict[str, float]]: - if ( - self._llm_config.algorithm - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_TEXT_BISON - ): - return self._action_from_text_bison(worker_id) - elif ( - self._llm_config.algorithm - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON - ): - return self._action_from_chat_bison(worker_id) - elif ( - self._llm_config.algorithm - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO - ): - return self._action_from_gemini_pro(worker_id) - else: - raise ValueError(f'Invalid algorithm {self._llm_config.algorithm}') - - def _action_from_text_bison(self, worker_id: str) -> List[Dict[str, float]]: - logging.info('ask_text_bison') - logging.info(self._intro + '\n' + self._history_to_text()) - while True: - text = self._ask_text_bison(self._intro + '\n' + self._history_to_text()) - logging.info('text=[%s]', text) - # text = text.removeprefix('```json\n') - # logging.info('text=[%s]', text) - text = text.strip('`').split('\n')[0] - # text = text.split('\n')[0].strip() - logging.info('text=[%s]', text) - try: - return [json.loads(text)] - except json.decoder.JSONDecodeError: - continue - - def _ask_chat_bison(self, prompt, message) -> str: - response = requests.post( - f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/chat-bison-32k:predict", - data=json.dumps({ - 'instances': [ - { - 'context': prompt, - 'messages': message, + if len(ordered_history) > self._history_len_for_prompt: + ordered_history = ordered_history[0:self._history_len_for_prompt - + 1] + random.shuffle(ordered_history) + + # If this is the first question, add a random event to serve as an example + # of the format. + # if include_example_action and len(ordered_history) == 0: + # ordered_history.append(self._random_event()) + + logging.info( + 'ordered_history[#%d]=%s', + len(ordered_history), + ordered_history, + ) + # if worker_id is None: + if len(self._history) == 0: + return ordered_history + return ordered_history + [self._history[-1]] + + def _hist_event_to_text(self, event: Dict, last_outcome: float, + is_last_event: bool) -> Tuple[str, Any]: + t = '' + if len(event['state']) > 0: + t += 'Decision State:\n' + t += (' {' + + ', '.join([f'"{k}": {v}' + for k, v in event['state'].items()]) + '}\n') + # t += 'Decision Action (json format): ' + if event['action'] is not None or is_last_event: + t += 'Simulation parameters (json format): ' + if event['action'] is not None: + t += (' {' + ', '.join([ + f'"{key}": {value}' for key, value in event['action'].items() + ]) + '}\n') + if event['outcome'] is not None: + # t += 'Decision Outcome: ' + str(event['outcome']) + '\n' + t += 'Simulation Outcome (json format): ' + str( + event['outcome']) + '\n' + if (self._llm_config.goal != sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE): + if last_outcome is not None: + if last_outcome < event['outcome'] - 0.1: + t += ' This is a better outcome than the last time.\n' + elif last_outcome > event['outcome'] + 0.1: + t += ' This is a worse outcome than the last time.\n' + else: + t += ' This is a similar outcome to the last time.\n' + t += '========================\n' + last_outcome = event['outcome'] + return t, last_outcome + + def _history_to_text(self, include_example_action: bool = True) -> str: + t = '' + last_outcome = None + hist = self._filtered_history(include_example_action) + logging.info( + '_history_to_text() include_example_action=%s hist=%s', + include_example_action, + hist, + ) + # if include_example_action and ( + # len(hist) == 0 or (len(hist) == 1 and hist[0]['outcome'] is None) + # ): + # logging.info('_history_to_text() Adding random_event') + # t += self._hist_event_to_text(self._random_event(), None, False) + for i, event in enumerate(hist): + logging.info('_history_to_text event=%s', event) + event_text, last_outcome = self._hist_event_to_text( + event, last_outcome, i == len(hist) - 1) + t += event_text + return t + + def _history_to_chat( + self, + worker_id: str, + include_example_action: bool = True) -> List[Dict[str, str]]: + chat = [] + last_outcome = None + last_outcome_message = '' + for h in self._filtered_history(include_example_action): + if len(h['state']) > 0: + chat.append({ + 'author': + 'USER', + 'content': + (last_outcome_message + 'Decision State:\n' + ' {' + + ', '.join([f'"{k}": {v}' + for k, v in h['state'].items()]) + '}\n' + + 'Please provide the Decision Action (json format):\n'), + }) + if h['action'] is not None: + chat.append({ + 'author': + 'AI', + 'content': (+ 'Decision Action:\n' + ' {{' + ', '.join([ + f'"{key}": {value}' + for key, value in h['action'].items() + ]) + '}'), + }) + if h['outcome'] is not None: + last_outcome_message = 'Decision Outcome: ' + str( + h['outcome']) + '\n' + if (self._llm_config.goal + != sight_pb2.DecisionConfigurationStart.LLMConfig. + LLMGoal.LM_INTERACTIVE): + if last_outcome is not None: + if last_outcome < h['outcome'] - 0.1: + last_outcome_message += ( + ' This is a better outcome than the last time.\n' + ) + elif last_outcome > h['outcome'] + 0.1: + last_outcome_message += ( + ' This is a worse outcome than the last time.\n' + ) + else: + last_outcome_message += ( + ' This is a similar outcome to the last time.\n' + ) + return chat + + def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: + """Returns the dict representation of a DecisionParams proto""" + d = {} + for a in dp: + d[a.key] = a.value.double_value + return d + + def _get_creds(self) -> Any: + creds, project = google.auth.default() + auth_req = google.auth.transport.requests.Request() + creds.refresh(auth_req) + return creds + + def _get_req_headers(self) -> Dict[str, str]: + return { + 'Authorization': f'Bearer {self._get_creds().token}', + 'Content-Type': 'application/json; charset=utf-8', + } + + def _ask(self, prompt) -> str: + if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_TEXT_BISON): + return self._ask_text_bison(prompt) + elif (self._llm_config.algorithm == sight_pb2. + DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON): + return self._ask_chat_bison(prompt) + elif (self._llm_config.algorithm == sight_pb2. + DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): + return self._ask_gemini_pro(prompt) + else: + raise ValueError(f'Invalid algorithm {self._llm_config.algorithm}') + + def _ask_text_bison(self, prompt) -> str: + while True: + response = requests.post( + f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/text-bison-32k:predict", + data=json.dumps({ + 'instances': [{ + 'prompt': prompt + }], + 'parameters': { + 'temperature': 0.2, + 'maxOutputTokens': 2048, + 'topK': 40, + 'topP': 0.55, + # "groundingConfig": string, + # "stopSequences": [ string ], + # "candidateCount": integer, + # "logprobs": integer, + # "presencePenalty": float, + # "frequencyPenalty": float, + # "logitBias": map, + 'echo': False, + }, + }), + headers=self._get_req_headers(), + ).json() + # logging.info('response=%s', response) + if ('error' in response + or response['predictions'][0]['content'].strip() == ''): + continue + return response['predictions'][0]['content'].strip() + + def _get_action(self, worker_id: str) -> List[Dict[str, float]]: + if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_TEXT_BISON): + return self._action_from_text_bison(worker_id) + elif (self._llm_config.algorithm == sight_pb2. + DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON): + return self._action_from_chat_bison(worker_id) + elif (self._llm_config.algorithm == sight_pb2. + DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): + return self._action_from_gemini_pro(worker_id) + else: + raise ValueError(f'Invalid algorithm {self._llm_config.algorithm}') + + def _action_from_text_bison(self, + worker_id: str) -> List[Dict[str, float]]: + logging.info('ask_text_bison') + logging.info(self._intro + '\n' + self._history_to_text()) + while True: + text = self._ask_text_bison(self._intro + '\n' + + self._history_to_text()) + logging.info('text=[%s]', text) + # text = text.removeprefix('```json\n') + # logging.info('text=[%s]', text) + text = text.strip('`').split('\n')[0] + # text = text.split('\n')[0].strip() + logging.info('text=[%s]', text) + try: + return [json.loads(text)] + except json.decoder.JSONDecodeError: + continue + + def _ask_chat_bison(self, prompt, message) -> str: + response = requests.post( + f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/chat-bison-32k:predict", + data=json.dumps({ + 'instances': [ + { + 'context': prompt, + 'messages': message, + }, + ], + 'parameters': { + 'temperature': 0.2, + 'maxOutputTokens': 2048, + 'topK': 40, + 'topP': 0.55, + # "groundingConfig": string, + # "stopSequences": [ string ], + # "candidateCount": integer, + # "logprobs": integer, + # "presencePenalty": float, + # "frequencyPenalty": float, + # "logitBias": map, + 'echo': False, }, - ], - 'parameters': { - 'temperature': 0.2, - 'maxOutputTokens': 2048, - 'topK': 40, - 'topP': 0.55, - # "groundingConfig": string, - # "stopSequences": [ string ], - # "candidateCount": integer, - # "logprobs": integer, - # "presencePenalty": float, - # "frequencyPenalty": float, - # "logitBias": map, - 'echo': False, - }, - }), - headers=self._get_req_headers(), - ).json() - # logging.info('response=%s', response) - # logging.info( - # "response['predictions']=%s", response['predictions'][0]['candidates'] - # ) - # if 'error' in response or response['predictions'][0]['content'].strip() == '': - # continue - return response['predictions'][0]['candidates'][0]['content'].strip() - - def _action_from_chat_bison(self, worker_id: str) -> List[Dict[str, float]]: - while True: - text = self._ask_chat_bison(self._intro, self._history_to_chat(worker_id)) - logging.info('text=[%s]', text) - try: - return [json.loads(text)] - except json.decoder.JSONDecodeError: - continue - - def _ask_gemini_pro(self, prompt) -> str: - while True: - response = requests.post( - f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent", - data=json.dumps({ - 'contents': { - 'role': 'user', - 'parts': {'text': prompt}, - }, - 'safety_settings': { - 'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', - 'threshold': 'BLOCK_LOW_AND_ABOVE', - }, - 'generation_config': { - 'temperature': 0.9, - 'topP': 1, - 'topK': 1, - 'maxOutputTokens': 8192, - # "stopSequences": [".", "?", "!"] - }, - }), - headers=self._get_req_headers(), - ).json() - logging.info('response=%s', response) - if len(response) == 0: - continue - text = '' - for r in response: - if 'parts' in r['candidates'][0]['content']: - text += r['candidates'][0]['content']['parts'][0]['text'] - text = text.strip() - if text == '': - continue - return text - - def _action_from_gemini_pro(self, worker_id: str) -> List[Dict[str, float]]: - while True: - logging.info('ask_geminipro') - prompt = self._intro + '\n' - random_sample, _ = self._hist_event_to_text(self._random_event(), None, False) - prompt += 'Example request: '+ random_sample + '\n' - prompt += self._history_to_text() - logging.info('prompt=%s', prompt) - text = self._ask_gemini_pro(prompt) - if text.startswith('```json'): - text = [text.removeprefix('```json').removesuffix('```')] - else: - text = text.split('\n') - logging.info('text=[%s]', text) - - actions = [] - for i in range(0, len(text), 3): - try: - logging.info('%d: processed %s', i, text[i]) - action = text[i].removeprefix('Simulation parameters (json format):') - logging.info('%d: action=%s', i, action) - actions.append(json.loads(action)) - except json.decoder.JSONDecodeError: - continue - if len(actions) == 0: - continue - return actions - - def _is_done(self, worker_id: str) -> Tuple[bool, str]: - """Checks with the LLM to see whether it has enough information to answer. + }), + headers=self._get_req_headers(), + ).json() + # logging.info('response=%s', response) + # logging.info( + # "response['predictions']=%s", response['predictions'][0]['candidates'] + # ) + # if 'error' in response or response['predictions'][0]['content'].strip() == '': + # continue + return response['predictions'][0]['candidates'][0]['content'].strip() + + def _action_from_chat_bison(self, + worker_id: str) -> List[Dict[str, float]]: + while True: + text = self._ask_chat_bison(self._intro, + self._history_to_chat(worker_id)) + logging.info('text=[%s]', text) + try: + return [json.loads(text)] + except json.decoder.JSONDecodeError: + continue + + def _ask_gemini_pro(self, prompt) -> str: + while True: + response = requests.post( + f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent", + data=json.dumps({ + 'contents': { + 'role': 'user', + 'parts': { + 'text': prompt + }, + }, + 'safety_settings': { + 'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', + 'threshold': 'BLOCK_LOW_AND_ABOVE', + }, + 'generation_config': { + 'temperature': 0.9, + 'topP': 1, + 'topK': 1, + 'maxOutputTokens': 8192, + # "stopSequences": [".", "?", "!"] + }, + }), + headers=self._get_req_headers(), + ).json() + logging.info('response=%s', response) + if len(response) == 0: + continue + text = '' + for r in response: + if 'parts' in r['candidates'][0]['content']: + text += r['candidates'][0]['content']['parts'][0]['text'] + text = text.strip() + if text == '': + continue + return text + + def _action_from_gemini_pro(self, + worker_id: str) -> List[Dict[str, float]]: + while True: + logging.info('ask_geminipro') + prompt = self._intro + '\n' + random_sample, _ = self._hist_event_to_text( + self._random_event(), None, False) + prompt += 'Example request: ' + random_sample + '\n' + prompt += self._history_to_text() + logging.info('prompt=%s', prompt) + text = self._ask_gemini_pro(prompt) + if text.startswith('```json'): + text = [text.removeprefix('```json').removesuffix('```')] + else: + text = text.split('\n') + logging.info('text=[%s]', text) + + actions = [] + for i in range(0, len(text), 3): + try: + logging.info('%d: processed %s', i, text[i]) + action = text[i].removeprefix( + 'Simulation parameters (json format):') + logging.info('%d: action=%s', i, action) + actions.append(json.loads(action)) + except json.decoder.JSONDecodeError: + continue + if len(actions) == 0: + continue + return actions + + def _is_done(self, worker_id: str) -> Tuple[bool, str]: + """Checks with the LLM to see whether it has enough information to answer. Returns a tuple with a boolean that indicates whether the question can be answered and if so, the answer string. """ - if ( - self._llm_config.algorithm - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO - ): - return self._is_done_from_gemini_pro(worker_id) - return False, '' - - def _is_done_from_gemini_pro(self, worker_id: str) -> Tuple[bool, str]: - question = ( - self._intro - + '\n' - + self._history_to_text(False) - + '\nHas the question been fully answered, including all of its' - ' clauses? Answer Y if yes or N if there are any additional' - ' simulations that need to be performed to fully answer the question.' - ) - logging.info('_is_done_from_gemini_pro question=%s', question) - text = self._ask_gemini_pro(question) - logging.info('_is_done_from_gemini_pro text=%s', text) - if not text.lower().startswith('y'): - logging.info('_is_done_from_gemini_pro NOT DONE') - return False, '' - question = ( - self._intro - + '\n' - + self._history_to_text(False) - + "\nWhat is the answer to the user's question?" - ) - text = self._ask_gemini_pro(question) - logging.info('_is_done_from_gemini_pro answer=%s', text) - return True, text - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - logging.info('DecisionPoint request=%s', request) - # self._append_outcome(request.decision_outcome.reward) - self._lock.acquire() - - dp_response = service_pb2.DecisionPointResponse() - - if ( - self._llm_config.goal - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - and self._waiting_on_tell - ): - self._lock.release() - dp_response.action_type = ( - service_pb2.DecisionPointResponse.ActionType.AT_RETRY - ) - return dp_response - - if len(self._history) > 0 and 'outcome' not in self._history[0]: - if len(request.decision_outcome.outcome_params) > 0: - self._history[-1]['outcome'] = self._params_to_dict( - request.decision_point.outcome_params + if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): + return self._is_done_from_gemini_pro(worker_id) + return False, '' + + def _is_done_from_gemini_pro(self, worker_id: str) -> Tuple[bool, str]: + question = ( + self._intro + '\n' + self._history_to_text(False) + + '\nHas the question been fully answered, including all of its' + ' clauses? Answer Y if yes or N if there are any additional' + ' simulations that need to be performed to fully answer the question.' ) - else: - self._history[-1]['outcome'] = request.decision_outcome.reward - # self.last_outcome = self._history[-1]['outcome'] - # self.script += 'Decision State:\n' - # self.script += ' {' + ', '.join([ - # f'"{p.key}": {p.value.double_value}' - # for p in request.decision_point.state_params - # ]) + '}\n' - # self.script += 'Decision Action (json format):\n' - self._history.append({ - 'state': self._params_to_dict(request.decision_point.state_params), - 'action': None, - 'outcome': None, - }) - - if self._actions_to_do: - selected_actions = [self._actions_to_do.pop(0)] - # Periodically try a random action, but not on the first trial in case the - # user just wants a single reasonable recommendation. - elif ( - self._llm_config.goal - != sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - and len(self._history) > 1 - and random.random() > 0.1 - ): - logging.info( - '##########################\n##### BAYESIAN OPT' - ' ######\n##########################' - ) - # selected_actions = self._random_action() - dp = self._bayesian_opt.decision_point(request) - selected_actions = {} - for a in dp.action: - selected_actions[a.key] = a.value.double_value - selected_actions = [selected_actions] - print('selected_actions=%s' % selected_actions) - - else: - selected_actions = self._get_action(request.worker_id) - - logging.info('decision_point(): selected_actions=%s', selected_actions) - - self._history[-1]['action'] = selected_actions[0] - # If there are more actions to perform, store them in self._actions_to_do - if len(selected_actions) >= 1: - self._actions_to_do.extend(selected_actions[1:]) - - # self.script += ' {' + ', '.join([ - # f'"{key}": {value}' - # for key, value in selected_actions.items() - # ]) + '}\n' - - for key, value in self._history[-1]['action'].items(): - a = dp_response.action.add() - a.key = key - a.value.double_value = float(value) - - self._num_decision_points += 1 - - self._lock.release() - dp_response.action_type = ( - service_pb2.DecisionPointResponse.ActionType.AT_ACT - ) - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - self._lock.acquire() - - logging.info('FinalizeEpisode request=%s', request) - if len(request.decision_outcome.outcome_params) > 0: - self._history[-1]['outcome'] = self._params_to_dict( - request.decision_outcome.outcome_params - ) - else: - self._history[-1]['outcome'] = request.decision_outcome.reward - # self.last_outcome = self._history[-1]['outcome'] - - logging.info('self._history[-1]=%s', self._history[-1]) - for key, value in self._history[-1]['action'].items(): - a = request.decision_point.choice_params.add() - a.key = key - a.value.double_value = float(value) - self._bayesian_opt.finalize_episode(request) - - if ( - self._llm_config.goal - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - ): - # If there are no outstanding acitions, ask the LLM whether the user's - # question can be answered via the already-completed model runs. - if len(self._actions_to_do) == 0: - can_respond_to_question, response = self._is_done(request.worker_id) - self._response_ready = can_respond_to_question - if self._response_ready: - self._waiting_on_tell = True - self._response_for_listen = response - self._lock.release() - - logging.info( - 'FinalizeEpisode response=%s', - service_pb2.FinalizeEpisodeResponse(response_str='Success!'), - ) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def tell(self, request: service_pb2.TellRequest) -> service_pb2.TellResponse: - tell_response = service_pb2.TellResponse() - self._lock.acquire() - logging.info('tell() request=%s', request) - - if ( - self._llm_config.goal - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - and self._waiting_on_tell - ): - logging.info('INTERACTIVE') - self._intro += '\n' + self._history_to_text(False) + '\n' - self._history = [] - self._intro += 'User input: ' + request.message_str - # self._intro += '\n' + request.message_str - self._waiting_on_tell = False - logging.info('tell self._intro=%s', self._intro) - - self._lock.release() - tell_response.response_str = self._ask(self._intro) - return tell_response - - @overrides - def listen( - self, request: service_pb2.ListenRequest - ) -> service_pb2.ListenResponse: - listen_response = service_pb2.ListenResponse() - self._lock.acquire() - logging.info( - 'listen() request=%s, self._response_ready=%s,' - ' self._response_for_listen=%s', - request, - self._response_ready, - self._response_for_listen, - ) - - if ( - self._llm_config.goal - == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - ): - listen_response.response_ready = self._response_ready - if self._response_ready: - listen_response.response_str = self._response_for_listen - self._response_ready = False - - self._lock.release() - logging.info('listen() response=%s', listen_response) - return listen_response - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - bayesian_opt_status = self._bayesian_opt.current_status(request) - return service_pb2.CurrentStatusResponse( - response_str=f"""[LLM: script={self._intro + self._history_to_text(None)} + logging.info('_is_done_from_gemini_pro question=%s', question) + text = self._ask_gemini_pro(question) + logging.info('_is_done_from_gemini_pro text=%s', text) + if not text.lower().startswith('y'): + logging.info('_is_done_from_gemini_pro NOT DONE') + return False, '' + question = (self._intro + '\n' + self._history_to_text(False) + + "\nWhat is the answer to the user's question?") + text = self._ask_gemini_pro(question) + logging.info('_is_done_from_gemini_pro answer=%s', text) + return True, text + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + logging.info('DecisionPoint request=%s', request) + # self._append_outcome(request.decision_outcome.reward) + self._lock.acquire() + + dp_response = service_pb2.DecisionPointResponse() + + if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE and self._waiting_on_tell): + self._lock.release() + dp_response.action_type = ( + service_pb2.DecisionPointResponse.ActionType.AT_RETRY) + return dp_response + + if len(self._history) > 0 and 'outcome' not in self._history[0]: + if len(request.decision_outcome.outcome_params) > 0: + self._history[-1]['outcome'] = self._params_to_dict( + request.decision_point.outcome_params) + else: + self._history[-1]['outcome'] = request.decision_outcome.reward + # self.last_outcome = self._history[-1]['outcome'] + # self.script += 'Decision State:\n' + # self.script += ' {' + ', '.join([ + # f'"{p.key}": {p.value.double_value}' + # for p in request.decision_point.state_params + # ]) + '}\n' + # self.script += 'Decision Action (json format):\n' + self._history.append({ + 'state': + self._params_to_dict(request.decision_point.state_params), + 'action': + None, + 'outcome': + None, + }) + + if self._actions_to_do: + selected_actions = [self._actions_to_do.pop(0)] + # Periodically try a random action, but not on the first trial in case the + # user just wants a single reasonable recommendation. + elif (self._llm_config.goal != sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE and len(self._history) > 1 + and random.random() > 0.1): + logging.info('##########################\n##### BAYESIAN OPT' + ' ######\n##########################') + # selected_actions = self._random_action() + dp = self._bayesian_opt.decision_point(request) + selected_actions = {} + for a in dp.action: + selected_actions[a.key] = a.value.double_value + selected_actions = [selected_actions] + print('selected_actions=%s' % selected_actions) + + else: + selected_actions = self._get_action(request.worker_id) + + logging.info('decision_point(): selected_actions=%s', selected_actions) + + self._history[-1]['action'] = selected_actions[0] + # If there are more actions to perform, store them in self._actions_to_do + if len(selected_actions) >= 1: + self._actions_to_do.extend(selected_actions[1:]) + + # self.script += ' {' + ', '.join([ + # f'"{key}": {value}' + # for key, value in selected_actions.items() + # ]) + '}\n' + + for key, value in self._history[-1]['action'].items(): + a = dp_response.action.add() + a.key = key + a.value.double_value = float(value) + + self._num_decision_points += 1 + + self._lock.release() + dp_response.action_type = ( + service_pb2.DecisionPointResponse.ActionType.AT_ACT) + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + self._lock.acquire() + + logging.info('FinalizeEpisode request=%s', request) + if len(request.decision_outcome.outcome_params) > 0: + self._history[-1]['outcome'] = self._params_to_dict( + request.decision_outcome.outcome_params) + else: + self._history[-1]['outcome'] = request.decision_outcome.reward + # self.last_outcome = self._history[-1]['outcome'] + + logging.info('self._history[-1]=%s', self._history[-1]) + for key, value in self._history[-1]['action'].items(): + a = request.decision_point.choice_params.add() + a.key = key + a.value.double_value = float(value) + self._bayesian_opt.finalize_episode(request) + + if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE): + # If there are no outstanding acitions, ask the LLM whether the user's + # question can be answered via the already-completed model runs. + if len(self._actions_to_do) == 0: + can_respond_to_question, response = self._is_done( + request.worker_id) + self._response_ready = can_respond_to_question + if self._response_ready: + self._waiting_on_tell = True + self._response_for_listen = response + self._lock.release() + + logging.info( + 'FinalizeEpisode response=%s', + service_pb2.FinalizeEpisodeResponse(response_str='Success!'), + ) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def tell(self, + request: service_pb2.TellRequest) -> service_pb2.TellResponse: + tell_response = service_pb2.TellResponse() + self._lock.acquire() + logging.info('tell() request=%s', request) + + if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE and self._waiting_on_tell): + logging.info('INTERACTIVE') + self._intro += '\n' + self._history_to_text(False) + '\n' + self._history = [] + self._intro += 'User input: ' + request.message_str + # self._intro += '\n' + request.message_str + self._waiting_on_tell = False + logging.info('tell self._intro=%s', self._intro) + + self._lock.release() + tell_response.response_str = self._ask(self._intro) + return tell_response + + @overrides + def listen( + self, + request: service_pb2.ListenRequest) -> service_pb2.ListenResponse: + listen_response = service_pb2.ListenResponse() + self._lock.acquire() + logging.info( + 'listen() request=%s, self._response_ready=%s,' + ' self._response_for_listen=%s', + request, + self._response_ready, + self._response_for_listen, + ) + + if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE): + listen_response.response_ready = self._response_ready + if self._response_ready: + listen_response.response_str = self._response_for_listen + self._response_ready = False + + self._lock.release() + logging.info('listen() response=%s', listen_response) + return listen_response + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + bayesian_opt_status = self._bayesian_opt.current_status(request) + return service_pb2.CurrentStatusResponse( + response_str= + f"""[LLM: script={self._intro + self._history_to_text(None)} ----------------- -BayesianOpt={bayesian_opt_status.response_str}""" - ) +BayesianOpt={bayesian_opt_status.response_str}""") diff --git a/sight_service/nevergrad_opt.py b/sight_service/nevergrad_opt.py index dc08526..fc56e35 100644 --- a/sight_service/nevergrad_opt.py +++ b/sight_service/nevergrad_opt.py @@ -13,7 +13,7 @@ # limitations under the License. """LLM-based optimization for driving Sight applications.""" -import logging +from helpers.logs.logs_handler import logger as logging from overrides import overrides from typing import Any, Dict, List, Tuple @@ -32,6 +32,8 @@ import threading _file_name = "nevergrad_opt.py" + + class NeverGradOpt(OptimizerInstance): """Uses the NeverGrad library to choose the parameters of the code. @@ -66,8 +68,6 @@ def launch( self.actions = self.normalizer.normalize_in_0_to_1(self.actions) # print("self.actions : ", self.actions) - - self.possible_values = {} for i, key in enumerate(sorted(self.actions.keys())): if self.actions[key].valid_float_values: @@ -97,7 +97,6 @@ def launch( # print('here ng.p.Dict is : ', ng.p.Dict(**params)) # print('here ng.p.Instrumentation is : ', ng.p.Instrumentation(ng.p.Dict(**params))) - parametrization = ng.p.Instrumentation(ng.p.Dict(**params)) budget = 1000 @@ -111,61 +110,60 @@ def launch( budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_CMA): - self._optimizer = ng.optimizers.CMA(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.CMA( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_TwoPointsDE): - self._optimizer = ng.optimizers.TwoPointsDE(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.TwoPointsDE( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_RandomSearch): - self._optimizer = ng.optimizers.RandomSearch(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.RandomSearch( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_PSO): - self._optimizer = ng.optimizers.PSO(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.PSO( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_ScrHammersleySearch): - self._optimizer = ng.optimizers.ScrHammersleySearch(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.ScrHammersleySearch( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_DE): self._optimizer = ng.optimizers.DE(parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_CGA): - self._optimizer = ng.optimizers.cGA(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.cGA( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_ES): self._optimizer = ng.optimizers.ES(parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_DL_OPO): - self._optimizer = ng.optimizers.DiscreteLenglerOnePlusOne(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.DiscreteLenglerOnePlusOne( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_DDE): - self._optimizer = ng.optimizers.DiscreteDE(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.DiscreteDE( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_NMM): - self._optimizer = ng.optimizers.NeuralMetaModel(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.NeuralMetaModel( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_TINY_SPSA): - self._optimizer = ng.optimizers.TinySPSA(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.TinySPSA( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_VORONOI_DE): - self._optimizer = ng.optimizers.VoronoiDE(parametrization=parametrization, - budget=budget) + self._optimizer = ng.optimizers.VoronoiDE( + parametrization=parametrization, budget=budget) elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart .NeverGradConfig.NeverGradAlgorithm.NG_CMA_SMALL): - self._optimizer = ng.optimizers.CMAsmall(parametrization=parametrization, - budget=budget) - + self._optimizer = ng.optimizers.CMAsmall( + parametrization=parametrization, budget=budget) # print(self._optimizer, type(self._optimizer)) @@ -202,7 +200,8 @@ def decision_point( self.num_samples_issued += 1 self._lock.release() - denormalized_actions = self.normalizer.denormalize_from_0_to_1(selected_actions.args[0]) + denormalized_actions = self.normalizer.denormalize_from_0_to_1( + selected_actions.args[0]) # print("denormalized_actions : ", denormalized_actions) dp_response = service_pb2.DecisionPointResponse() @@ -278,29 +277,30 @@ def current_status( # print('self._total_count was : ', self._total_count) # print('self._completed_count is now : ', self._completed_count) - if(self._completed_count == self._total_count): - status = service_pb2.CurrentStatusResponse.Status.SUCCESS - elif(self._completed_count < self._total_count): - status = service_pb2.CurrentStatusResponse.Status.IN_PROGRESS + if (self._completed_count == self._total_count): + status = service_pb2.CurrentStatusResponse.Status.SUCCESS + elif (self._completed_count < self._total_count): + status = service_pb2.CurrentStatusResponse.Status.IN_PROGRESS else: - status = service_pb2.CurrentStatusResponse.Status.FAILURE + status = service_pb2.CurrentStatusResponse.Status.FAILURE - return service_pb2.CurrentStatusResponse(response_str=response, status=status) + return service_pb2.CurrentStatusResponse(response_str=response, + status=status) @overrides def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest + self, request: service_pb2.WorkerAliveRequest ) -> service_pb2.WorkerAliveResponse: method_name = "WorkerAlive" logging.debug(">>>> In %s of %s", method_name, _file_name) - if(self._completed_count == self._total_count): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + if (self._completed_count == self._total_count): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE # elif(not self.pending_samples): # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY else: - # Increasing count here so that multiple workers can't enter the dp call for same sample at last - self._completed_count += 1 - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + # Increasing count here so that multiple workers can't enter the dp call for same sample at last + self._completed_count += 1 + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT logging.info("worker_alive_status is %s", worker_alive_status) logging.debug("<<<< Out %s of %s", method_name, _file_name) return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/optimizer_instance.py b/sight_service/optimizer_instance.py index bc0bfb8..4aea8d0 100644 --- a/sight_service/optimizer_instance.py +++ b/sight_service/optimizer_instance.py @@ -11,11 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """An instance of a Sight optimizer dedicated to a single experiment.""" from concurrent import futures -import logging +from helpers.logs.logs_handler import logger as logging from typing import Any, Dict, List, Tuple, Sequence from sight.widgets.decision import utils from sight_service.proto import service_pb2 @@ -23,155 +22,150 @@ _file_name = "optimizer_instance.py" + def param_dict_to_proto( - param_dict: Dict[str, float] -) -> List[sight_pb2.DecisionParam]: - """converting dictionary of parameters into proto.""" - param_proto: List[sight_pb2.DecisionParam] = [] - for k, v in sorted(param_dict.items()): - if isinstance(v, str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=v, - ) - elif isinstance(v, float): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=v, - ) - elif (not utils.is_scalar(v)): - print('here v is : ', v, type(v)) - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_JSON, - json_value=v, - ) - else: - raise ValueError('action attribute type must be either string or float') - - param_proto.append( - sight_pb2.DecisionParam( - key=k, - value=val - ) - ) - return param_proto + param_dict: Dict[str, float]) -> List[sight_pb2.DecisionParam]: + """converting dictionary of parameters into proto.""" + param_proto: List[sight_pb2.DecisionParam] = [] + for k, v in sorted(param_dict.items()): + if isinstance(v, str): + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_STRING, + string_value=v, + ) + elif isinstance(v, float): + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_DOUBLE, + double_value=v, + ) + elif (not utils.is_scalar(v)): + print('here v is : ', v, type(v)) + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_JSON, + json_value=v, + ) + else: + raise ValueError( + 'action attribute type must be either string or float') + + param_proto.append(sight_pb2.DecisionParam(key=k, value=val)) + return param_proto def param_proto_to_dict( - param_proto: Sequence[sight_pb2.DecisionParam], -) -> Dict[str, float]: - """converting proto back into dictionary of parameters.""" - param_dict = {} - for param in param_proto: - # if ((param.value.sub_type != sight_pb2.Value.ST_DOUBLE) and (param.value.sub_type != sight_pb2.Value.ST_STRING)): - # raise ValueError("Unsupported action type %s" % param.value.sub_type) - # param_dict[param.key] = param.value.double_value - if (param.value.sub_type == sight_pb2.Value.ST_DOUBLE): - param_dict[param.key] = param.value.double_value - elif (param.value.sub_type == sight_pb2.Value.ST_STRING): - param_dict[param.key] = param.value.string_value - elif (param.value.sub_type == sight_pb2.Value.ST_BOOL): - param_dict[param.key] = param.value.bool_value - elif (param.value.sub_type == sight_pb2.Value.ST_BYTES): - param_dict[param.key] = param.value.bytes_value - elif (param.value.sub_type == sight_pb2.Value.ST_INT64): - param_dict[param.key] = param.value.int64_value - elif (param.value.sub_type == sight_pb2.Value.ST_JSON): - param_dict[param.key] = param.value.json_value - else: - raise ValueError("Unsupported action type %s" % param.value.sub_type) - return param_dict + param_proto: Sequence[sight_pb2.DecisionParam], ) -> Dict[str, float]: + """converting proto back into dictionary of parameters.""" + param_dict = {} + for param in param_proto: + # if ((param.value.sub_type != sight_pb2.Value.ST_DOUBLE) and (param.value.sub_type != sight_pb2.Value.ST_STRING)): + # raise ValueError("Unsupported action type %s" % param.value.sub_type) + # param_dict[param.key] = param.value.double_value + if (param.value.sub_type == sight_pb2.Value.ST_DOUBLE): + param_dict[param.key] = param.value.double_value + elif (param.value.sub_type == sight_pb2.Value.ST_STRING): + param_dict[param.key] = param.value.string_value + elif (param.value.sub_type == sight_pb2.Value.ST_BOOL): + param_dict[param.key] = param.value.bool_value + elif (param.value.sub_type == sight_pb2.Value.ST_BYTES): + param_dict[param.key] = param.value.bytes_value + elif (param.value.sub_type == sight_pb2.Value.ST_INT64): + param_dict[param.key] = param.value.int64_value + elif (param.value.sub_type == sight_pb2.Value.ST_JSON): + param_dict[param.key] = param.value.json_value + else: + raise ValueError("Unsupported action type %s" % + param.value.sub_type) + return param_dict class OptimizerInstance: - """An OptimizerInstance class that is generic for all optimizers. + """An OptimizerInstance class that is generic for all optimizers. An optimizer containing base methods which specialized optimizers will override while communicating with client. """ - def __init__(self): - self.actions = {} - self.state = {} - self.outcomes = {} + def __init__(self): + self.actions = {} + self.state = {} + self.outcomes = {} - def launch( - self, request: service_pb2.LaunchRequest - ) -> service_pb2.LaunchResponse: - """Initializing new study and storing state and action attributes for the same. + def launch( + self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + """Initializing new study and storing state and action attributes for the same. """ - method_name = "launch" - logging.debug(">>>> In %s of %s", method_name, _file_name) - # logging.info('request.decision_config_params=%s', request.decision_config_params) - - # sorting dict key wise to maintain consistency at for all calls - action_keys = list(request.decision_config_params.action_attrs.keys()) - action_keys.sort() - for k in action_keys: - self.actions[k] = request.decision_config_params.action_attrs[k] - - # sorting dict key wise to maintain consistency at for all calls - state_keys = list(request.decision_config_params.state_attrs.keys()) - state_keys.sort() - for k in state_keys: - self.state[k] = request.decision_config_params.state_attrs[k] - - # sorting dict key wise to maintain consistency at for all calls - outcome_keys = list(request.decision_config_params.outcome_attrs.keys()) - outcome_keys.sort() - for k in outcome_keys: - self.outcomes[k] = request.decision_config_params.outcome_attrs[k] - - print(f"<<<<<<<<< Out {method_name} of {_file_name}.") - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.LaunchResponse() - - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - return service_pb2.DecisionPointResponse() - - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - return service_pb2.FinalizeEpisodeResponse() - - def tell( - self, request: service_pb2.TellRequest - ) -> service_pb2.TellResponse: - return service_pb2.TellResponse() - - def listen( - self, request: service_pb2.ListenRequest - ) -> service_pb2.ListenResponse: - return service_pb2.ListenResponse() - - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - return service_pb2.CurrentStatusResponse() - - def propose_action( - self, request: service_pb2.ProposeActionRequest - ) -> service_pb2.ProposeActionResponse: - return service_pb2.ProposeActionResponse() - - def GetOutcome( - self, request: service_pb2.GetOutcomeRequest - ) -> service_pb2.GetOutcomeResponse: - return service_pb2.GetOutcomeResponse() - - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - return service_pb2.FetchOptimalActionResponse() - - def close( - self, request: service_pb2.CloseRequest - ) -> service_pb2.CloseResponse: - return service_pb2.CloseResponse() - - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - return service_pb2.WorkerAliveResponse() + method_name = "launch" + logging.debug(">>>> In %s of %s", method_name, _file_name) + # logging.info('request.decision_config_params=%s', request.decision_config_params) + + # sorting dict key wise to maintain consistency at for all calls + action_keys = list(request.decision_config_params.action_attrs.keys()) + action_keys.sort() + for k in action_keys: + self.actions[k] = request.decision_config_params.action_attrs[k] + + # sorting dict key wise to maintain consistency at for all calls + state_keys = list(request.decision_config_params.state_attrs.keys()) + state_keys.sort() + for k in state_keys: + self.state[k] = request.decision_config_params.state_attrs[k] + + # sorting dict key wise to maintain consistency at for all calls + outcome_keys = list( + request.decision_config_params.outcome_attrs.keys()) + outcome_keys.sort() + for k in outcome_keys: + self.outcomes[k] = request.decision_config_params.outcome_attrs[k] + + print(f"<<<<<<<<< Out {method_name} of {_file_name}.") + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.LaunchResponse() + + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + return service_pb2.DecisionPointResponse() + + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + return service_pb2.FinalizeEpisodeResponse() + + def tell(self, + request: service_pb2.TellRequest) -> service_pb2.TellResponse: + return service_pb2.TellResponse() + + def listen( + self, + request: service_pb2.ListenRequest) -> service_pb2.ListenResponse: + return service_pb2.ListenResponse() + + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + return service_pb2.CurrentStatusResponse() + + def propose_action( + self, request: service_pb2.ProposeActionRequest + ) -> service_pb2.ProposeActionResponse: + return service_pb2.ProposeActionResponse() + + def GetOutcome( + self, request: service_pb2.GetOutcomeRequest + ) -> service_pb2.GetOutcomeResponse: + return service_pb2.GetOutcomeResponse() + + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + return service_pb2.FetchOptimalActionResponse() + + def close(self, + request: service_pb2.CloseRequest) -> service_pb2.CloseResponse: + return service_pb2.CloseResponse() + + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + return service_pb2.WorkerAliveResponse() diff --git a/sight_service/sensitivity_analysis.py b/sight_service/sensitivity_analysis.py index 74fc216..652bd3b 100644 --- a/sight_service/sensitivity_analysis.py +++ b/sight_service/sensitivity_analysis.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Sensitivity analysis of Sight applications.""" -import logging +from helpers.logs.logs_handler import logger as logging import random from typing import Any, Dict, List, Tuple from overrides import overrides @@ -27,161 +26,178 @@ class SensitivityAnalysis(OptimizerInstance): - """Exhaustively searches over all the possible values of the action attributes. + """Exhaustively searches over all the possible values of the action attributes. Attributes: possible_values: Maps each action attributes to the list of possible values of this attribute. """ - def __init__(self): - super().__init__() - self.num_samples_issued = 0 - self.active_samples = {} - self.complete_samples = {} - self.possible_values = {} - self._lock = threading.RLock() - - @overrides - def launch( - self, request: service_pb2.LaunchRequest - ) -> service_pb2.LaunchResponse: - method_name = 'launch' - logging.debug('>>>> In %s of %s', method_name, _file_name) - - response = super(SensitivityAnalysis, self).launch(request) - self.possible_values = {} - for i, key in enumerate(sorted(self.actions.keys())): - if self.actions[key].valid_float_values: - self.possible_values[key] = list(self.actions[key].valid_float_values) - elif self.actions[key].step_size: - self.possible_values[key] = [] - cur = self.actions[key].min_value - while cur <= self.actions[key].max_value: - self.possible_values[key].append(cur) - cur += self.actions[key].step_size - - logging.info('possible_values=%s', self.possible_values) - response.display_string = 'Sensitivity Analysis!' - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return response - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - method_name = 'decision_point' - logging.debug('>>>> In %s of %s', method_name, _file_name) - - next_action = {} - for i, key in enumerate(self.actions): - if key in self.possible_values: - print('selecting from possible values') - next_action[key] = self.possible_values[key][ - random.randint(0, len(self.possible_values[key]) - 1) - ] - elif self.actions[key].HasField('continuous_prob_dist'): - rand_val = random.gauss(self.actions[key].continuous_prob_dist.gaussian.mean, - self.actions[key].continuous_prob_dist.gaussian.stdev) - print ('self.actions[key].continuous_prob_dist=%s, rand_val=%s' % (self.actions[key].continuous_prob_dist, rand_val)) - if rand_val < self.actions[key].min_value: - rand_val = self.actions[key].min_value - elif rand_val > self.actions[key].max_value: - rand_val = self.actions[key].max_value - next_action[key] = rand_val - if self.actions[key].continuous_prob_dist.HasField('gaussian'): - rand_val = random.gauss(self.actions[key].continuous_prob_dist.gaussian.mean, - self.actions[key].continuous_prob_dist.gaussian.stdev) - print ('self.actions[key].continuous_prob_dist=%s, rand_val=%s' % (self.actions[key].continuous_prob_dist, rand_val)) - if rand_val < self.actions[key].min_value: - rand_val = self.actions[key].min_value - elif rand_val > self.actions[key].max_value: - rand_val = self.actions[key].max_value - next_action[key] = rand_val - elif self.actions[key].continuous_prob_dist.HasField('uniform'): - rand_val = random.uniform(self.actions[key].continuous_prob_dist.uniform.min_val, - self.actions[key].continuous_prob_dist.uniform.max_val) - print ('self.actions[key].continuous_prob_dist=%s, rand_val=%s' % (self.actions[key].continuous_prob_dist, rand_val)) - next_action[key] = rand_val - else: - raise ValueError('Only support Gaussian continuous distribution.') - elif self.actions[key].HasField('discrete_prob_dist'): - if self.actions[key].discrete_prob_dist.HasField('uniform'): - rand_val = random.randint(self.actions[key].discrete_prob_dist.uniform.min_val, - self.actions[key].discrete_prob_dist.uniform.max_val) - print ('self.actions[key].discrete_prob_dist=%s, rand_val=%s' % (self.actions[key].discrete_prob_dist, rand_val)) - next_action[key] = rand_val - else: - raise ValueError('Only support Uniform discrete distribution.') - else: - print('selecting from random.uniform') - next_action[key] = random.uniform( - self.actions[key].min_value, self.actions[key].max_value - ) - - self._lock.acquire() - self.active_samples[request.worker_id] = { - 'action': next_action, - 'sample_num': self.num_samples_issued, - } - self.num_samples_issued += 1 - self._lock.release() - - logging.info('next_action=%s', next_action) - dp_response = service_pb2.DecisionPointdp_response() - dp_response.action.extend(param_dict_to_proto(next_action)) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - method_name = 'finalize_episode' - logging.debug('>>>> In %s of %s', method_name, _file_name) - # logging.info('Running for exhaustive search....') - - self._lock.acquire() - # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) - self.complete_samples[self.active_samples[request.worker_id]['sample_num']] = { - 'outcome': request.decision_outcome.outcome_value, - 'action': self.active_samples[request.worker_id]['action'], - } - del self.active_samples[request.worker_id] - self._lock.release() - - # logging.info('FinalizeEpisode active_samples=%s' % self.active_samples) - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - method_name = 'current_status' - logging.debug('>>>> In %s of %s', method_name, _file_name) - response = ( - '[SensitivityAnalysis:\n' - ) - response += f' #active_samples={len(self.active_samples)}\n' - response += ' completed_samples=\n' - response += 'sample_num, ' + ', '.join(list(self.actions)) + ', outcome\n' - - cur = [0] * len(self.actions) - keys = sorted(self.actions.keys()) - logging.info('self.complete_samples=%s', self.complete_samples) - for s in sorted(self.complete_samples.items(), key=lambda x: x[1]['outcome'], reverse=True): - response += str(s[0])+', ' - response += ', '.join([str(s[1]['action'][key]) for key in keys]) - response += ', '+str(s[1]['outcome'])+'\n' - response += ']' - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=response) - - @overrides - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - method_name = 'fetch_optimal_action' - return service_pb2.CurrentStatusResponse(response_str='') + def __init__(self): + super().__init__() + self.num_samples_issued = 0 + self.active_samples = {} + self.complete_samples = {} + self.possible_values = {} + self._lock = threading.RLock() + + @overrides + def launch( + self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + method_name = 'launch' + logging.debug('>>>> In %s of %s', method_name, _file_name) + + response = super(SensitivityAnalysis, self).launch(request) + self.possible_values = {} + for i, key in enumerate(sorted(self.actions.keys())): + if self.actions[key].valid_float_values: + self.possible_values[key] = list( + self.actions[key].valid_float_values) + elif self.actions[key].step_size: + self.possible_values[key] = [] + cur = self.actions[key].min_value + while cur <= self.actions[key].max_value: + self.possible_values[key].append(cur) + cur += self.actions[key].step_size + + logging.info('possible_values=%s', self.possible_values) + response.display_string = 'Sensitivity Analysis!' + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return response + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + method_name = 'decision_point' + logging.debug('>>>> In %s of %s', method_name, _file_name) + + next_action = {} + for i, key in enumerate(self.actions): + if key in self.possible_values: + print('selecting from possible values') + next_action[key] = self.possible_values[key][random.randint( + 0, + len(self.possible_values[key]) - 1)] + elif self.actions[key].HasField('continuous_prob_dist'): + rand_val = random.gauss( + self.actions[key].continuous_prob_dist.gaussian.mean, + self.actions[key].continuous_prob_dist.gaussian.stdev) + print( + 'self.actions[key].continuous_prob_dist=%s, rand_val=%s' % + (self.actions[key].continuous_prob_dist, rand_val)) + if rand_val < self.actions[key].min_value: + rand_val = self.actions[key].min_value + elif rand_val > self.actions[key].max_value: + rand_val = self.actions[key].max_value + next_action[key] = rand_val + if self.actions[key].continuous_prob_dist.HasField('gaussian'): + rand_val = random.gauss( + self.actions[key].continuous_prob_dist.gaussian.mean, + self.actions[key].continuous_prob_dist.gaussian.stdev) + print( + 'self.actions[key].continuous_prob_dist=%s, rand_val=%s' + % (self.actions[key].continuous_prob_dist, rand_val)) + if rand_val < self.actions[key].min_value: + rand_val = self.actions[key].min_value + elif rand_val > self.actions[key].max_value: + rand_val = self.actions[key].max_value + next_action[key] = rand_val + elif self.actions[key].continuous_prob_dist.HasField( + 'uniform'): + rand_val = random.uniform( + self.actions[key].continuous_prob_dist.uniform.min_val, + self.actions[key].continuous_prob_dist.uniform.max_val) + print( + 'self.actions[key].continuous_prob_dist=%s, rand_val=%s' + % (self.actions[key].continuous_prob_dist, rand_val)) + next_action[key] = rand_val + else: + raise ValueError( + 'Only support Gaussian continuous distribution.') + elif self.actions[key].HasField('discrete_prob_dist'): + if self.actions[key].discrete_prob_dist.HasField('uniform'): + rand_val = random.randint( + self.actions[key].discrete_prob_dist.uniform.min_val, + self.actions[key].discrete_prob_dist.uniform.max_val) + print( + 'self.actions[key].discrete_prob_dist=%s, rand_val=%s' + % (self.actions[key].discrete_prob_dist, rand_val)) + next_action[key] = rand_val + else: + raise ValueError( + 'Only support Uniform discrete distribution.') + else: + print('selecting from random.uniform') + next_action[key] = random.uniform(self.actions[key].min_value, + self.actions[key].max_value) + + self._lock.acquire() + self.active_samples[request.worker_id] = { + 'action': next_action, + 'sample_num': self.num_samples_issued, + } + self.num_samples_issued += 1 + self._lock.release() + + logging.info('next_action=%s', next_action) + dp_response = service_pb2.DecisionPointdp_response() + dp_response.action.extend(param_dict_to_proto(next_action)) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + method_name = 'finalize_episode' + logging.debug('>>>> In %s of %s', method_name, _file_name) + # logging.info('Running for exhaustive search....') + + self._lock.acquire() + # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) + self.complete_samples[self.active_samples[ + request.worker_id]['sample_num']] = { + 'outcome': request.decision_outcome.outcome_value, + 'action': self.active_samples[request.worker_id]['action'], + } + del self.active_samples[request.worker_id] + self._lock.release() + + # logging.info('FinalizeEpisode active_samples=%s' % self.active_samples) + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + method_name = 'current_status' + logging.debug('>>>> In %s of %s', method_name, _file_name) + response = ('[SensitivityAnalysis:\n') + response += f' #active_samples={len(self.active_samples)}\n' + response += ' completed_samples=\n' + response += 'sample_num, ' + ', '.join(list( + self.actions)) + ', outcome\n' + + cur = [0] * len(self.actions) + keys = sorted(self.actions.keys()) + logging.info('self.complete_samples=%s', self.complete_samples) + for s in sorted(self.complete_samples.items(), + key=lambda x: x[1]['outcome'], + reverse=True): + response += str(s[0]) + ', ' + response += ', '.join([str(s[1]['action'][key]) for key in keys]) + response += ', ' + str(s[1]['outcome']) + '\n' + response += ']' + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=response) + + @overrides + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + method_name = 'fetch_optimal_action' + return service_pb2.CurrentStatusResponse(response_str='') diff --git a/sight_service/service_root.py b/sight_service/service_root.py index dd3006d..16ea060 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -23,7 +23,7 @@ def warn(*args, **kwargs): warnings.warn = warn from concurrent import futures -import logging +from helpers.logs.logs_handler import logger as logging from absl import app from absl import flags @@ -153,12 +153,12 @@ def get_instance(self, client_id: str) -> OptimizerInstance: # method_name = "get_instance" # logging.debug(">>>>>>> In %s method of %s file.", method_name, _file_name) with self.instances_lock.gen_rlock(): - if(client_id in self.instances): - instance_obj = self.instances[client_id] - return instance_obj + if (client_id in self.instances): + instance_obj = self.instances[client_id] + return instance_obj else: - #add better mechanism, this require in close rpc for now - return None + #add better mechanism, this require in close rpc for now + return None # logging.debug("<<<<<< Out %s method of %s file.", method_name, _file_name) @@ -300,12 +300,11 @@ def Close(self, request, context): _file_name) # only call if it's launch called, otherwise no entry of opt for that client - if(self.optimizers.get_instance( - request.client_id)): - obj = self.optimizers.get_instance( - request.client_id).close(request) + if (self.optimizers.get_instance(request.client_id)): + obj = self.optimizers.get_instance( + request.client_id).close(request) else: - obj = service_pb2.CloseResponse() + obj = service_pb2.CloseResponse() #? do we need to remove entry from optimizer dict, if available?? logging.info("<<<<<<< Out %s method of %s file.", method_name, @@ -344,6 +343,7 @@ def serve(): server.wait_for_termination() logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + def main(argv): method_name = "__main__" logging.basicConfig(level=logging.INFO) @@ -355,5 +355,6 @@ def main(argv): logging.error(e) logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + if __name__ == "__main__": app.run(main) diff --git a/sight_service/service_utils.py b/sight_service/service_utils.py index 3c028b1..ccde91d 100644 --- a/sight_service/service_utils.py +++ b/sight_service/service_utils.py @@ -11,13 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Common resources used in the gRPC sight_service example.""" import os import json import time -import logging +from helpers.logs.logs_handler import logger as logging from absl import flags from google.cloud import spanner @@ -27,199 +26,185 @@ def write_to_JSON(new_log_entry): - """Writes in the database locally as json file. + """Writes in the database locally as json file. Returns: """ - fname = "service/decision/decision_db.json" + fname = "service/decision/decision_db.json" - with open(fname, "r+") as sight_service_db_file: - # First we load existing data into a dict. - log_data = json.load(sight_service_db_file) - # Join new_data with log_data inside emp_details - log_data["log_details"].append(new_log_entry) - # Sets file's current position at offset. - sight_service_db_file.seek(0) - # convert back to json. - json.dump(log_data, sight_service_db_file, indent=4) + with open(fname, "r+") as sight_service_db_file: + # First we load existing data into a dict. + log_data = json.load(sight_service_db_file) + # Join new_data with log_data inside emp_details + log_data["log_details"].append(new_log_entry) + # Sets file's current position at offset. + sight_service_db_file.seek(0) + # convert back to json. + json.dump(log_data, sight_service_db_file, indent=4) def create_database(instance_id, database_id, log_table_id, study_table_id): - """Creates a database and tables for sample data.""" - - method_name = "create_database" - logging.debug(">>>> In %s of %s", method_name, _file_name) - spanner_client = spanner.Client(project=os.environ['PROJECT_ID']) - - instance = spanner_client.instance(instance_id) - if instance.exists(): - print("Instance with ID {} exists.".format(instance_id)) - else: - config_name = "{}/instanceConfigs/regional-us-central1".format( - spanner_client.project_name - ) - - instance = spanner_client.instance( - instance_id, - configuration_name=config_name, - display_name="Log Data", - node_count=1, - ) - - operation = instance.create() - - print("Waiting for operation to complete...") - operation.result(OPERATION_TIMEOUT_SECONDS) - - print("Created instance {}".format(instance_id)) - - database = instance.database(database_id) - if database.exists(): - print("Database with ID {} exists.".format(database_id)) - else: - operation = database.create() - print("Waiting for operation to complete...") - operation.result(OPERATION_TIMEOUT_SECONDS) - print("Created database {} on instance {}".format(database_id, instance_id)) - - operation = database.update_ddl(["""CREATE TABLE """ + log_table_id + """ ( + """Creates a database and tables for sample data.""" + + method_name = "create_database" + logging.debug(">>>> In %s of %s", method_name, _file_name) + spanner_client = spanner.Client(project=os.environ['PROJECT_ID']) + + instance = spanner_client.instance(instance_id) + if instance.exists(): + print("Instance with ID {} exists.".format(instance_id)) + else: + config_name = "{}/instanceConfigs/regional-us-central1".format( + spanner_client.project_name) + + instance = spanner_client.instance( + instance_id, + configuration_name=config_name, + display_name="Log Data", + node_count=1, + ) + + operation = instance.create() + + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + + print("Created instance {}".format(instance_id)) + + database = instance.database(database_id) + if database.exists(): + print("Database with ID {} exists.".format(database_id)) + else: + operation = database.create() + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + print("Created database {} on instance {}".format( + database_id, instance_id)) + + operation = database.update_ddl([ + """CREATE TABLE """ + log_table_id + """ ( Id INT64 NOT NULL, LogFormat INT64, LogPathPrefix STRING(MAX), LogOwner STRING(MAX), LogLabel STRING(MAX) - ) PRIMARY KEY (Id)"""]) - print("Waiting for operation to complete...") - operation.result(OPERATION_TIMEOUT_SECONDS) - print( - "Created {} table on database {} on instance {}".format( - log_table_id, database_id, instance_id - ) - ) - - operation = database.update_ddl( - ["""CREATE TABLE """ + study_table_id + """ ( + ) PRIMARY KEY (Id)""" + ]) + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + print("Created {} table on database {} on instance {}".format( + log_table_id, database_id, instance_id)) + + operation = database.update_ddl([ + """CREATE TABLE """ + study_table_id + """ ( LogId INT64 NOT NULL, StudyName STRING(MAX) - ) PRIMARY KEY (LogId)"""] - ) - print("Waiting for operation to complete...") - operation.result(OPERATION_TIMEOUT_SECONDS) - print( - "Created {} table on database {} on instance {}".format( - study_table_id, database_id, instance_id - ) - ) + ) PRIMARY KEY (LogId)""" + ]) + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + print("Created {} table on database {} on instance {}".format( + study_table_id, database_id, instance_id)) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + + +def Insert_In_StudyDetails_Table(study_details, instance_id, database_id, + study_table_id): + """adds study details to table mapped to unique LogId.""" + method_name = "Insert_In_StudyDetails_Table" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) + + def insert_StudyDetails(transaction): + query = ( + f"INSERT {study_table_id} (LogId, StudyName) VALUES" + f" ({study_details['LogId']}, '{study_details['StudyName']}')") + # print("StudyDetail query : ", query) + + row_ct = transaction.execute_update(query) + print("{} record inserted to spanner table {}".format( + row_ct, study_table_id)) + + database.run_in_transaction(insert_StudyDetails) logging.debug("<<<< Out %s of %s", method_name, _file_name) -def Insert_In_StudyDetails_Table( - study_details, instance_id, database_id, study_table_id -): - """adds study details to table mapped to unique LogId.""" - method_name = "Insert_In_StudyDetails_Table" - logging.debug(">>>> In %s of %s", method_name, _file_name) +def Fetch_From_StudyDetails_Table(log_id, instance_id, database_id, + study_table_id): + """fetch study name from table mapped to unique LogId.""" + method_name = "Fetch_From_StudyDetails_Table" + logging.debug(">>>> In %s of %s", method_name, _file_name) - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) - def insert_StudyDetails(transaction): - query = ( - f"INSERT {study_table_id} (LogId, StudyName) VALUES" - f" ({study_details['LogId']}, '{study_details['StudyName']}')" - ) - # print("StudyDetail query : ", query) + with database.snapshot() as snapshot: + query = f"SELECT StudyName FROM {study_table_id} WHERE LogId = {log_id}" + results = snapshot.execute_sql(query) - row_ct = transaction.execute_update(query) - print( - "{} record inserted to spanner table {}".format(row_ct, study_table_id) - ) + # print(results) + for row in results: + # print("For LogId : {} => StudyName: {}".format(log_id ,row[0])) + return row[0] + logging.debug("<<<< Out %s of %s", method_name, _file_name) - database.run_in_transaction(insert_StudyDetails) - logging.debug("<<<< Out %s of %s", method_name, _file_name) +def Insert_In_LogDetails_Table(new_log_entry, instance_id, database_id, + table_id): + """Writes in the sight service database to spanner table. -def Fetch_From_StudyDetails_Table( - log_id, instance_id, database_id, study_table_id -): - """fetch study name from table mapped to unique LogId.""" - method_name = "Fetch_From_StudyDetails_Table" - logging.debug(">>>> In %s of %s", method_name, _file_name) + Returns: + """ + method_name = "Insert_In_LogDetails_Table" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) + + def insert_LogDetails(transaction): + query = ( + f"INSERT {table_id} (Id, LogFormat, LogPathPrefix, LogOwner, LogLabel)" + f" VALUES ({new_log_entry['Id']}, {new_log_entry['LogFormat']}," + f" '{new_log_entry['LogPathPrefix']}', '{new_log_entry['LogOwner']}'," + f" '{new_log_entry['LogLabel']}')") + # print("LogDetail query : ", query) + + row_ct = transaction.execute_update(query) + print("{} record inserted to spanner table {}".format( + row_ct, table_id)) + + database.run_in_transaction(insert_LogDetails) + logging.debug("<<<< Out %s of %s", method_name, _file_name) - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) - with database.snapshot() as snapshot: - query = f"SELECT StudyName FROM {study_table_id} WHERE LogId = {log_id}" - results = snapshot.execute_sql(query) +def Insert_In_ClientData_Table(client_details, instance_id, database_id, + clientdata_table_id): + """adds client details to table.""" - # print(results) - for row in results: - # print("For LogId : {} => StudyName: {}".format(log_id ,row[0])) - return row[0] - logging.debug("<<<< Out %s of %s", method_name, _file_name) + method_name = "Insert_In_ClientData_Table" + logging.debug(">>>> In %s of %s", method_name, _file_name) + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) -def Insert_In_LogDetails_Table( - new_log_entry, instance_id, database_id, table_id -): - """Writes in the sight service database to spanner table. + def insert_ClientDetails(transaction): + query = ( + f"INSERT {clientdata_table_id} (sight_id, env, network_path," + f" learner_path, replay_address) VALUES ({client_details['sight_id']}," + f" '{client_details['env']}', '{client_details['network_path']}'," + f" '{client_details['learner_path']}'," + f" '{client_details['replay_address']}')") + # print("StudyDetail query : ", query) - Returns: - """ - method_name = "Insert_In_LogDetails_Table" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) - - def insert_LogDetails(transaction): - query = ( - f"INSERT {table_id} (Id, LogFormat, LogPathPrefix, LogOwner, LogLabel)" - f" VALUES ({new_log_entry['Id']}, {new_log_entry['LogFormat']}," - f" '{new_log_entry['LogPathPrefix']}', '{new_log_entry['LogOwner']}'," - f" '{new_log_entry['LogLabel']}')" - ) - # print("LogDetail query : ", query) - - row_ct = transaction.execute_update(query) - print("{} record inserted to spanner table {}".format(row_ct, table_id)) - - database.run_in_transaction(insert_LogDetails) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - - -def Insert_In_ClientData_Table( - client_details, instance_id, database_id, clientdata_table_id -): - """adds client details to table.""" - - method_name = "Insert_In_ClientData_Table" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) - - def insert_ClientDetails(transaction): - query = ( - f"INSERT {clientdata_table_id} (sight_id, env, network_path," - f" learner_path, replay_address) VALUES ({client_details['sight_id']}," - f" '{client_details['env']}', '{client_details['network_path']}'," - f" '{client_details['learner_path']}'," - f" '{client_details['replay_address']}')" - ) - # print("StudyDetail query : ", query) - - row_ct = transaction.execute_update(query) - print( - "{} record inserted to spanner table {}".format( - row_ct, clientdata_table_id - ) - ) + row_ct = transaction.execute_update(query) + print("{} record inserted to spanner table {}".format( + row_ct, clientdata_table_id)) - database.run_in_transaction(insert_ClientDetails) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + database.run_in_transaction(insert_ClientDetails) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/sight_service/single_action_optimizer.py b/sight_service/single_action_optimizer.py index c8b8da2..8f11bf5 100644 --- a/sight_service/single_action_optimizer.py +++ b/sight_service/single_action_optimizer.py @@ -11,11 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """An instance of a Sight optimizer dedicated to a single experiment.""" from concurrent import futures -import logging +from helpers.logs.logs_handler import logger as logging from typing import Any, Dict, List, Tuple, Sequence from sight_service.optimizer_instance import OptimizerInstance from sight_service.proto import service_pb2 @@ -25,17 +24,15 @@ class SingleActionOptimizer(OptimizerInstance): - """An SingleActionOptimizer class that is generic for all optimizers. + """An SingleActionOptimizer class that is generic for all optimizers. An optimizer containing base methods which specialized optimizers will override while communicating with client. """ - def __init__(self): - super().__init__() - self.unique_id = 1 - self.pending_samples = {} - self.active_samples = {} - self.completed_samples = {} - - + def __init__(self): + super().__init__() + self.unique_id = 1 + self.pending_samples = {} + self.active_samples = {} + self.completed_samples = {} diff --git a/sight_service/smc_py.py b/sight_service/smc_py.py index 406c420..bd0bb0e 100644 --- a/sight_service/smc_py.py +++ b/sight_service/smc_py.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """LLM-based optimization for driving Sight applications.""" -import logging +from helpers.logs.logs_handler import logger as logging from overrides import overrides from typing import Any, Dict, List, Tuple @@ -31,206 +30,225 @@ from smcpy import VectorMCMC, VectorMCMCKernel import threading + # Initialize model class ModelSamplingDriver(): - ''' + ''' Driver for communicating with SMC. ''' - def __init__(self, param_names: List[str], priors: List, std_dev:float): - self._buf_size = 50 - self._model_inputs_meta_q = queue.Queue(1) - self._model_inputs_q = queue.Queue(self._buf_size) - self._model_outputs_meta_q = queue.Queue(1) - self._model_outputs_q = queue.Queue(self._buf_size) - - # Define prior distributions & MCMC kernel - self._vector_mcmc = VectorMCMC(self.evaluate, [0], priors, std_dev) - self._mcmc_kernel = VectorMCMCKernel(self._vector_mcmc, param_order=param_names) - self._smc = Sampler(self._mcmc_kernel) - self._num_mcmc_samples = 5 - - def sample(self): - step_list, mll_list = self._smc.sample(num_particles=self._buf_size, - num_mcmc_samples=self._num_mcmc_samples, - target_ess=0.8) - self._model_inputs_meta_q.put(-1) - # print ('step_list=', step_list.__dict__) - # print ('step_list=', step_list.mean()) - # print ('mll_list=', mll_list) - - print(f'phi_sequence={self._smc.phi_sequence}') - print(f'fbf norm index={self._smc.req_phi_index}') - print('marginal log likelihood = {}'.format(mll_list[-1])) - print('parameter means = {}'.format(step_list[-1].compute_mean())) - - def evaluate(self, params): - print('<<< ModelSamplingDriver evaluate() #params=', len(params)) - self._model_inputs_meta_q.put(len(params)) - for i, p in enumerate(params): - self._model_inputs_q.put({'idx': i, 'params': p}) - - results = [None] * len(params) - for i in range(len(params)): - v = self._model_outputs_q.get() - results[v['idx']] = v['result'] - print('>>> ModelSamplingDriver evaluate() #results=', len(results)) - return np.array(results) + def __init__(self, param_names: List[str], priors: List, std_dev: float): + self._buf_size = 50 + self._model_inputs_meta_q = queue.Queue(1) + self._model_inputs_q = queue.Queue(self._buf_size) + self._model_outputs_meta_q = queue.Queue(1) + self._model_outputs_q = queue.Queue(self._buf_size) + + # Define prior distributions & MCMC kernel + self._vector_mcmc = VectorMCMC(self.evaluate, [0], priors, std_dev) + self._mcmc_kernel = VectorMCMCKernel(self._vector_mcmc, + param_order=param_names) + self._smc = Sampler(self._mcmc_kernel) + self._num_mcmc_samples = 5 + + def sample(self): + step_list, mll_list = self._smc.sample( + num_particles=self._buf_size, + num_mcmc_samples=self._num_mcmc_samples, + target_ess=0.8) + self._model_inputs_meta_q.put(-1) + # print ('step_list=', step_list.__dict__) + # print ('step_list=', step_list.mean()) + # print ('mll_list=', mll_list) + + print(f'phi_sequence={self._smc.phi_sequence}') + print(f'fbf norm index={self._smc.req_phi_index}') + print('marginal log likelihood = {}'.format(mll_list[-1])) + print('parameter means = {}'.format(step_list[-1].compute_mean())) + + def evaluate(self, params): + print('<<< ModelSamplingDriver evaluate() #params=', len(params)) + self._model_inputs_meta_q.put(len(params)) + for i, p in enumerate(params): + self._model_inputs_q.put({'idx': i, 'params': p}) + + results = [None] * len(params) + for i in range(len(params)): + v = self._model_outputs_q.get() + results[v['idx']] = v['result'] + print('>>> ModelSamplingDriver evaluate() #results=', len(results)) + return np.array(results) class SMCPy(OptimizerInstance): - """Uses the SMCPy library to choose the parameters of the code. + """Uses the SMCPy library to choose the parameters of the code. Attributes: possible_values: Maps each action attributes to the list of possible values of this attribute. """ - def __init__(self): - super(SMCPy, self).__init__() - self.num_samples_issued = 0 - self.active_samples = {} - self.complete_samples = {} - self.possible_values = {} - self._lock = threading.RLock() - self._driver = None - - @overrides - def launch( - self, request: service_pb2.LaunchRequest - ) -> service_pb2.LaunchResponse: - response = super(SMCPy, self).launch(request) - - # self.possible_values = {} - # for i, key in enumerate(sorted(self.actions.keys())): - # if self.actions[key].valid_float_values: - # self.possible_values[key] = list(self.actions[key].valid_float_values) - # elif self.actions[key].step_size: - # self.possible_values[key] = [] - # cur = self.actions[key].min_value - # while cur <= self.actions[key].max_value: - # self.possible_values[key].append(cur) - # cur += self.actions[key].step_size - # print('possible_values=%s' % self.possible_values) - - self._param_names = list(sorted(self.actions.keys())) - self._driver = ModelSamplingDriver( - param_names = self._param_names, - priors = [uniform(self.actions[key].min_value, self.actions[key].max_value) for key in self._param_names], - std_dev = 0.5) - self._smc_thread = threading.Thread(target = self._driver.sample, args = ()) - self._smc_thread.start() - - self._num_samples_in_cur_batch = 0 - self._sample_idx = 0 - self._num_samples_complete = 0 - self._num_samples_remaining = 0 - - response.display_string = 'SMCPy Start' - print('response=%s' % response) - return response - - def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: - """Returns the dict representation of a DecisionParams proto""" - d = {} - for a in dp: - d[a.key] = a.value.double_value - return d - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - logging.info('DecisionPoint request=%s', request) - logging.info('DecisionPoint self._lock=%s', self._lock) - - self._lock.acquire() - logging.info('decision_point() _sample_idx=%s, self._num_samples_in_cur_batch=%s, self._num_samples_remaining=%s, self._num_samples_complete=%s', - self._sample_idx, self._num_samples_in_cur_batch, self._num_samples_remaining, self._num_samples_complete) - - dp_response = service_pb2.DecisionPointResponse() - logging.info('dp_response=%s', dp_response) - params = [] - if self._sample_idx == self._num_samples_in_cur_batch and \ - self._num_samples_complete < self._num_samples_remaining: - logging.info('AT_RETRY') - self._lock.release() - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_RETRY - return dp_response - - logging.info('Start new batch') - # Start new batch - if self._sample_idx == self._num_samples_in_cur_batch: - logging.info('Starting new batch') - self._num_samples_in_cur_batch = self._driver._model_inputs_meta_q.get() - self._sample_idx = 0 - self._num_samples_complete = 0 - - logging.info('Getting Params') - - params = self._driver._model_inputs_q.get()['params'] - - self.active_samples[request.worker_id] = { - 'action': params, - 'sample_num': self.num_samples_issued, - 'idx': self._sample_idx, - } - self._sample_idx += 1 - - self.num_samples_issued += 1 - self._lock.release() - - for i, value in enumerate(params): - a = dp_response.action.add() - a.key = self._param_names[i] - a.value.double_value = float(value) - - print('DecisionPoint response=%s' % dp_response) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - logging.info('FinalizeEpisode request=%s', request) - d = {} - for a in request.decision_point.choice_params: - d[a.key] = a.value.double_value - result = [d[key] for key in self._param_names] - - self._lock.acquire() - self._driver._model_outputs_q.put({ - 'idx': self.active_samples[request.worker_id]['idx'], - 'result': result, - }) - self._num_samples_complete += 1 - - logging.info('FinalizeEpisode outcome=%s / %s', request.decision_outcome.reward, d) - del self.active_samples[request.worker_id] - self._lock.release() - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - response = '[SMCPy (num_ask=#%s, num_tell=#%s)\n' % (self._optimizer.num_ask, self._optimizer.num_tell) - - self._lock.acquire() - response += 'sample_num, ' + ', '.join(list(self.actions)) + ', outcome\n' - cur = [0] * len(self.actions) - keys = sorted(self.actions.keys()) - logging.info('self.complete_samples=%s', self.complete_samples) - for s in sorted(self.complete_samples.items(), key=lambda x: x[1]['outcome'], reverse=True): - response += str(s[0])+', ' - response += ', '.join([str(s[1]['action'][key]) for key in keys]) - response += ', '+str(s[1]['outcome'])+'\n' - - response += 'pareto_front:\n' - for trial in self._optimizer.pareto_front(): - response += ', '.join([str(trial.args[0][key]) for key in keys])+'\n' - response += ']\n' - self._lock.release() - - return service_pb2.CurrentStatusResponse(response_str=response) \ No newline at end of file + def __init__(self): + super(SMCPy, self).__init__() + self.num_samples_issued = 0 + self.active_samples = {} + self.complete_samples = {} + self.possible_values = {} + self._lock = threading.RLock() + self._driver = None + + @overrides + def launch( + self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(SMCPy, self).launch(request) + + # self.possible_values = {} + # for i, key in enumerate(sorted(self.actions.keys())): + # if self.actions[key].valid_float_values: + # self.possible_values[key] = list(self.actions[key].valid_float_values) + # elif self.actions[key].step_size: + # self.possible_values[key] = [] + # cur = self.actions[key].min_value + # while cur <= self.actions[key].max_value: + # self.possible_values[key].append(cur) + # cur += self.actions[key].step_size + # print('possible_values=%s' % self.possible_values) + + self._param_names = list(sorted(self.actions.keys())) + self._driver = ModelSamplingDriver(param_names=self._param_names, + priors=[ + uniform( + self.actions[key].min_value, + self.actions[key].max_value) + for key in self._param_names + ], + std_dev=0.5) + self._smc_thread = threading.Thread(target=self._driver.sample, + args=()) + self._smc_thread.start() + + self._num_samples_in_cur_batch = 0 + self._sample_idx = 0 + self._num_samples_complete = 0 + self._num_samples_remaining = 0 + + response.display_string = 'SMCPy Start' + print('response=%s' % response) + return response + + def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: + """Returns the dict representation of a DecisionParams proto""" + d = {} + for a in dp: + d[a.key] = a.value.double_value + return d + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + logging.info('DecisionPoint request=%s', request) + logging.info('DecisionPoint self._lock=%s', self._lock) + + self._lock.acquire() + logging.info( + 'decision_point() _sample_idx=%s, self._num_samples_in_cur_batch=%s, self._num_samples_remaining=%s, self._num_samples_complete=%s', + self._sample_idx, self._num_samples_in_cur_batch, + self._num_samples_remaining, self._num_samples_complete) + + dp_response = service_pb2.DecisionPointResponse() + logging.info('dp_response=%s', dp_response) + params = [] + if self._sample_idx == self._num_samples_in_cur_batch and \ + self._num_samples_complete < self._num_samples_remaining: + logging.info('AT_RETRY') + self._lock.release() + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_RETRY + return dp_response + + logging.info('Start new batch') + # Start new batch + if self._sample_idx == self._num_samples_in_cur_batch: + logging.info('Starting new batch') + self._num_samples_in_cur_batch = self._driver._model_inputs_meta_q.get( + ) + self._sample_idx = 0 + self._num_samples_complete = 0 + + logging.info('Getting Params') + + params = self._driver._model_inputs_q.get()['params'] + + self.active_samples[request.worker_id] = { + 'action': params, + 'sample_num': self.num_samples_issued, + 'idx': self._sample_idx, + } + self._sample_idx += 1 + + self.num_samples_issued += 1 + self._lock.release() + + for i, value in enumerate(params): + a = dp_response.action.add() + a.key = self._param_names[i] + a.value.double_value = float(value) + + print('DecisionPoint response=%s' % dp_response) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + logging.info('FinalizeEpisode request=%s', request) + d = {} + for a in request.decision_point.choice_params: + d[a.key] = a.value.double_value + result = [d[key] for key in self._param_names] + + self._lock.acquire() + self._driver._model_outputs_q.put({ + 'idx': + self.active_samples[request.worker_id]['idx'], + 'result': + result, + }) + self._num_samples_complete += 1 + + logging.info('FinalizeEpisode outcome=%s / %s', + request.decision_outcome.reward, d) + del self.active_samples[request.worker_id] + self._lock.release() + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + response = '[SMCPy (num_ask=#%s, num_tell=#%s)\n' % ( + self._optimizer.num_ask, self._optimizer.num_tell) + + self._lock.acquire() + response += 'sample_num, ' + ', '.join(list( + self.actions)) + ', outcome\n' + cur = [0] * len(self.actions) + keys = sorted(self.actions.keys()) + logging.info('self.complete_samples=%s', self.complete_samples) + for s in sorted(self.complete_samples.items(), + key=lambda x: x[1]['outcome'], + reverse=True): + response += str(s[0]) + ', ' + response += ', '.join([str(s[1]['action'][key]) for key in keys]) + response += ', ' + str(s[1]['outcome']) + '\n' + + response += 'pareto_front:\n' + for trial in self._optimizer.pareto_front(): + response += ', '.join([str(trial.args[0][key]) + for key in keys]) + '\n' + response += ']\n' + self._lock.release() + + return service_pb2.CurrentStatusResponse(response_str=response) diff --git a/sight_service/vizier.py b/sight_service/vizier.py index 4021aaa..af1fa82 100644 --- a/sight_service/vizier.py +++ b/sight_service/vizier.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Vizier Bayesian optimizer for driving Sight applications.""" -import logging +from helpers.logs.logs_handler import logger as logging import os from overrides import overrides from typing import Any, Dict, List, Tuple @@ -31,192 +30,182 @@ PROJECT_ID = os.environ['PROJECT_ID'] PROJECT_REGION = 'us-central1' VIZIER_ENDPOINT = f'{PROJECT_REGION}-aiplatform.googleapis.com' -_vizier_client = aiplatform.gapic.VizierServiceClient( - client_options=dict(api_endpoint=VIZIER_ENDPOINT) -) +_vizier_client = aiplatform.gapic.VizierServiceClient(client_options=dict( + api_endpoint=VIZIER_ENDPOINT)) _file_name = "vizier.py" # FLAGS = flags.FLAGS def _get_vizier_study_display_name(client_id: str, label: str) -> str: - return ( - 'Sight_' - + label.replace(' ', '_') - + '_' - + str(client_id) - + '_' - + datetime.now().strftime('%Y%m%d_%H%M%S') - ) + return ('Sight_' + label.replace(' ', '_') + '_' + str(client_id) + '_' + + datetime.now().strftime('%Y%m%d_%H%M%S')) def _get_vizier_study_config(client_id: str, label: str, study_config_param): - """Generate a Vizier StudyConfig from command-line flags.""" - method_name = "_get_vizier_study_config" - logging.debug(">>>> In %s of %s", method_name, _file_name) - study_params = [] - for attr in study_config_param.action_attrs: - study_params.append({ - 'parameter_id': attr, - 'double_value_spec': { - 'min_value': study_config_param.action_attrs[attr].min_value, - 'max_value': study_config_param.action_attrs[attr].max_value, + """Generate a Vizier StudyConfig from command-line flags.""" + method_name = "_get_vizier_study_config" + logging.debug(">>>> In %s of %s", method_name, _file_name) + study_params = [] + for attr in study_config_param.action_attrs: + study_params.append({ + 'parameter_id': attr, + 'double_value_spec': { + 'min_value': study_config_param.action_attrs[attr].min_value, + 'max_value': study_config_param.action_attrs[attr].max_value, + }, + }) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return { + 'display_name': _get_vizier_study_display_name(client_id, label), + 'study_spec': { + 'algorithm': 'ALGORITHM_UNSPECIFIED', + 'parameters': study_params, + 'metrics': [{ + 'metric_id': 'outcome', + 'goal': 'MAXIMIZE' + }], }, - }) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return { - 'display_name': _get_vizier_study_display_name(client_id, label), - 'study_spec': { - 'algorithm': 'ALGORITHM_UNSPECIFIED', - 'parameters': study_params, - 'metrics': [{'metric_id': 'outcome', 'goal': 'MAXIMIZE'}], - }, - } + } class Vizier(OptimizerInstance): - """Vizier specific implementation of OptimizerInstance class. + """Vizier specific implementation of OptimizerInstance class. """ - def __init__(self): - super().__init__() - self.vizier_study = '' - self.current_trial: Dict[str, str] = {} - self.vizier_url = '' - self._total_count = 0 - self._completed_count = 0 - - @overrides - def launch( - self, request: service_pb2.LaunchRequest - ) -> service_pb2.LaunchResponse: - method_name = "launch" - logging.debug(">>>> In %s of %s", method_name, _file_name) - launch_response = super(Vizier, self).launch(request) - - self._total_count = request.decision_config_params.num_trials - study_config = _get_vizier_study_config( - request.client_id, request.label, request.decision_config_params - ) - vizier_response = _vizier_client.create_study( - parent=f'projects/{PROJECT_ID}/locations/{PROJECT_REGION}', - study=study_config - ) - vizier_url = ( - 'https://pantheon.corp.google.com/vertex-ai/locations/' - + PROJECT_REGION - + '/studies/' - + vizier_response.name.split('/')[-1] - + '?project=' - + PROJECT_ID - ) - self.vizier_url = vizier_url - - self.vizier_study = vizier_response.name - logging.info('updated self : %s', str(self.__dict__)) - - launch_response.display_string = vizier_url - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return launch_response - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - method_name = "decision_point" - logging.debug(">>>> In %s of %s", method_name, _file_name) - response = ( - _vizier_client.suggest_trials({ - 'parent': self.vizier_study, - 'suggestion_count': 1, - 'client_id': request.worker_id, - }) - .result() - .trials - ) - - self.current_trial[request.worker_id] = response[0].name - - dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend( - param_dict_to_proto( - { + def __init__(self): + super().__init__() + self.vizier_study = '' + self.current_trial: Dict[str, str] = {} + self.vizier_url = '' + self._total_count = 0 + self._completed_count = 0 + + @overrides + def launch( + self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + method_name = "launch" + logging.debug(">>>> In %s of %s", method_name, _file_name) + launch_response = super(Vizier, self).launch(request) + + self._total_count = request.decision_config_params.num_trials + study_config = _get_vizier_study_config(request.client_id, + request.label, + request.decision_config_params) + vizier_response = _vizier_client.create_study( + parent=f'projects/{PROJECT_ID}/locations/{PROJECT_REGION}', + study=study_config) + vizier_url = ('https://pantheon.corp.google.com/vertex-ai/locations/' + + PROJECT_REGION + '/studies/' + + vizier_response.name.split('/')[-1] + '?project=' + + PROJECT_ID) + self.vizier_url = vizier_url + + self.vizier_study = vizier_response.name + logging.info('updated self : %s', str(self.__dict__)) + + launch_response.display_string = vizier_url + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return launch_response + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + method_name = "decision_point" + logging.debug(">>>> In %s of %s", method_name, _file_name) + response = (_vizier_client.suggest_trials({ + 'parent': + self.vizier_study, + 'suggestion_count': + 1, + 'client_id': + request.worker_id, + }).result().trials) + + self.current_trial[request.worker_id] = response[0].name + + dp_response = service_pb2.DecisionPointResponse() + dp_response.action.extend( + param_dict_to_proto({ param.parameter_id: param.value for param in response[0].parameters - } - ) - ) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - method_name = "finalize_episode" - logging.debug(">>>> In %s of %s", method_name, _file_name) - metrics = [] - metrics_obj = {} - metrics_obj['metric_id'] = request.decision_outcome.outcome_label - metrics_obj['value'] = request.decision_outcome.reward - metrics.append(metrics_obj) - - if request.worker_id not in self.current_trial: - logging.info('Given worker not found......') - logging.info('current key(worker) is = %s', request.worker_id) - logging.info('current instance = %s', str(self)) - return service_pb2.FinalizeEpisodeResponse( - response_str=f'Worker {request.worker_id} has no known trial!' - ) - - logging.info('FinalizeEpisode metrics=%s', metrics) - _vizier_client.complete_trial({ - 'name': self.current_trial[request.worker_id], - 'final_measurement': {'metrics': metrics}, - }) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - method_name = "current_status" - logging.debug(">>>> In %s of %s", method_name, _file_name) - # optimal = _vizier_client.list_optimal_trials({ - # 'parent': self.vizier_study, - # }) - print('user can check status of vizier study here : ', self.vizier_url) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=str(self.vizier_url)) - - @overrides - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - method_name = "fetch_optimal_action" - logging.debug(">>>> In %s of %s", method_name, _file_name) - optimal = _vizier_client.list_optimal_trials({ - 'parent': self.vizier_study, - }) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=str(optimal)) - - @overrides - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) - if(self._completed_count == self._total_count): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - # elif(not self.pending_samples): - # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY - else: - # Increasing count here so that multiple workers can't enter the dp call for same sample at last - self._completed_count += 1 - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) - + })) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + method_name = "finalize_episode" + logging.debug(">>>> In %s of %s", method_name, _file_name) + metrics = [] + metrics_obj = {} + metrics_obj['metric_id'] = request.decision_outcome.outcome_label + metrics_obj['value'] = request.decision_outcome.reward + metrics.append(metrics_obj) + + if request.worker_id not in self.current_trial: + logging.info('Given worker not found......') + logging.info('current key(worker) is = %s', request.worker_id) + logging.info('current instance = %s', str(self)) + return service_pb2.FinalizeEpisodeResponse( + response_str=f'Worker {request.worker_id} has no known trial!') + + logging.info('FinalizeEpisode metrics=%s', metrics) + _vizier_client.complete_trial({ + 'name': + self.current_trial[request.worker_id], + 'final_measurement': { + 'metrics': metrics + }, + }) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + method_name = "current_status" + logging.debug(">>>> In %s of %s", method_name, _file_name) + # optimal = _vizier_client.list_optimal_trials({ + # 'parent': self.vizier_study, + # }) + print('user can check status of vizier study here : ', self.vizier_url) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse( + response_str=str(self.vizier_url)) + + @overrides + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + method_name = "fetch_optimal_action" + logging.debug(">>>> In %s of %s", method_name, _file_name) + optimal = _vizier_client.list_optimal_trials({ + 'parent': + self.vizier_study, + }) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=str(optimal)) + + @overrides + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) + if (self._completed_count == self._total_count): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + # elif(not self.pending_samples): + # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + else: + # Increasing count here so that multiple workers can't enter the dp call for same sample at last + self._completed_count += 1 + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index 36a9efa..6af02af 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -13,7 +13,7 @@ # limitations under the License. """Exhaustive search for driving Sight applications.""" -import logging +from helpers.logs.logs_handler import logger as logging from readerwriterlock import rwlock from overrides import overrides from typing import Any, Dict, List, Tuple @@ -27,7 +27,6 @@ import threading from sight.widgets.decision import utils - _file_name = "exhaustive_search.py" @@ -71,9 +70,7 @@ def propose_action( action_attrs = param_proto_to_dict(request.action_attrs) with self.pending_lock.gen_wlock(): - self.pending_samples[self.unique_id] = [ - action_attrs, attributes - ] + self.pending_samples[self.unique_id] = [action_attrs, attributes] # print('self.pending_samples : ', # self.pending_samples) @@ -83,10 +80,8 @@ def propose_action( # self.completed_samples) print('self.unique_id : ', self.unique_id) - # Create response - response = service_pb2.ProposeActionResponse( - action_id=self.unique_id) + response = service_pb2.ProposeActionResponse(action_id=self.unique_id) self.unique_id += 1 return response @@ -101,11 +96,11 @@ def GetOutcome( # print('self.completed_samples : ', # self.completed_samples) with self.completed_lock.gen_rlock(): - completed_samples = self.completed_samples + completed_samples = self.completed_samples with self.pending_lock.gen_rlock(): - pending_samples = self.pending_samples + pending_samples = self.pending_samples with self.active_lock.gen_rlock(): - active_samples = self.active_samples + active_samples = self.active_samples response = service_pb2.GetOutcomeResponse() if (request.unique_ids): @@ -117,16 +112,17 @@ def GetOutcome( sample_details = self.completed_samples[sample_id] outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED outcome.reward = sample_details['reward'] - outcome.action_attrs.extend(param_dict_to_proto( - sample_details['action'])) - outcome.outcome_attrs.extend(param_dict_to_proto( - sample_details['outcome'])) - outcome.attributes.extend(param_dict_to_proto( - sample_details['attribute'])) + outcome.action_attrs.extend( + param_dict_to_proto(sample_details['action'])) + outcome.outcome_attrs.extend( + param_dict_to_proto(sample_details['outcome'])) + outcome.attributes.extend( + param_dict_to_proto(sample_details['attribute'])) elif (sample_id in pending_samples): outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.PENDING outcome.response_str = '!! requested sample not yet assigned to any worker !!' - elif any(value['id'] == sample_id for value in active_samples.values()): + elif any(value['id'] == sample_id + for value in active_samples.values()): outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.ACTIVE outcome.response_str = '!! requested sample not completed yet !!' else: @@ -135,11 +131,11 @@ def GetOutcome( print("!! NOT EXIST !!") with self.active_lock.gen_rlock(): - print(self.active_samples) + print(self.active_samples) with self.pending_lock.gen_rlock(): - print(self.pending_samples) + print(self.pending_samples) with self.completed_lock.gen_rlock(): - print(self.completed_samples) + print(self.completed_samples) else: for sample_id in completed_samples.keys(): sample_details = completed_samples[sample_id] @@ -148,14 +144,14 @@ def GetOutcome( outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED outcome.reward = sample_details['reward'] - outcome.action_attrs.extend(param_dict_to_proto( - sample_details['action'])) + outcome.action_attrs.extend( + param_dict_to_proto(sample_details['action'])) - outcome.outcome_attrs.extend(param_dict_to_proto( - sample_details['outcome'])) + outcome.outcome_attrs.extend( + param_dict_to_proto(sample_details['outcome'])) - outcome.attributes.extend(param_dict_to_proto( - sample_details['attribute'])) + outcome.attributes.extend( + param_dict_to_proto(sample_details['attribute'])) # print('response here: ', response) return response @@ -182,23 +178,23 @@ def decision_point( # else: # if self.pending_samples: - # todo : meetashah : add logic to fetch action stored from propose actions and send it as repsonse - # key, sample = self.pending_samples.popitem() - # fetching the key in FIFO manner + # todo : meetashah : add logic to fetch action stored from propose actions and send it as repsonse + # key, sample = self.pending_samples.popitem() + # fetching the key in FIFO manner - #? this part now handled by worker alive rpc - # with self.pending_lock.gen_wlock(): - # key = next(iter(self.pending_samples)) - # sample = self.pending_samples.pop(key) + #? this part now handled by worker alive rpc + # with self.pending_lock.gen_wlock(): + # key = next(iter(self.pending_samples)) + # sample = self.pending_samples.pop(key) - # with self.active_lock.gen_wlock(): - # self.active_samples[request.worker_id] = {'id': key, 'sample': sample} + # with self.active_lock.gen_wlock(): + # self.active_samples[request.worker_id] = {'id': key, 'sample': sample} with self.active_lock.gen_rlock(): - if(request.worker_id in self.active_samples): - sample = self.active_samples[request.worker_id]['sample'] - else: - raise ValueError("key not foung in active_samples") + if (request.worker_id in self.active_samples): + sample = self.active_samples[request.worker_id]['sample'] + else: + raise ValueError("key not foung in active_samples") next_action = sample[0] logging.info('next_action=%s', next_action) # raise SystemExit @@ -223,22 +219,23 @@ def finalize_episode( # logging.info("req in finalize episode of dummy.py : %s", request) with self.active_lock.gen_rlock(): - sample_dict = self.active_samples[request.worker_id] + sample_dict = self.active_samples[request.worker_id] with self.completed_lock.gen_wlock(): - self.completed_samples[sample_dict['id']] = { - # 'action': self.pending_samples[unique_action_id], - 'action': - param_proto_to_dict(request.decision_point.choice_params), - 'attribute': sample_dict['sample'][1], - 'reward': request.decision_outcome.reward, - 'outcome': - param_proto_to_dict(request.decision_outcome.outcome_params) - } + self.completed_samples[sample_dict['id']] = { + # 'action': self.pending_samples[unique_action_id], + 'action': + param_proto_to_dict(request.decision_point.choice_params), + 'attribute': + sample_dict['sample'][1], + 'reward': + request.decision_outcome.reward, + 'outcome': + param_proto_to_dict(request.decision_outcome.outcome_params) + } with self.active_lock.gen_wlock(): - del self.active_samples[request.worker_id] - + del self.active_samples[request.worker_id] # print('self.active_samples : ', self.active_samples) # print('self.pending_samples : ', self.pending_samples) @@ -264,38 +261,40 @@ def fetch_optimal_action( logging.debug("<<<< Out %s of %s", method_name, _file_name) @overrides - def close( - self, request: service_pb2.CloseRequest - ) -> service_pb2.CloseResponse: + def close(self, + request: service_pb2.CloseRequest) -> service_pb2.CloseResponse: method_name = "close" logging.debug(">>>> In %s of %s", method_name, _file_name) self.exp_completed = True - logging.info("sight experiment completed...., changed exp_completed to True") + logging.info( + "sight experiment completed...., changed exp_completed to True") logging.debug("<<<< Out %s of %s", method_name, _file_name) return service_pb2.CloseResponse(response_str="success") @overrides def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest + self, request: service_pb2.WorkerAliveRequest ) -> service_pb2.WorkerAliveResponse: method_name = "WorkerAlive" logging.debug(">>>> In %s of %s", method_name, _file_name) - if(self.exp_completed): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - elif(not self.pending_samples): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + if (self.exp_completed): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + elif (not self.pending_samples): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY else: - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - # put sample in active sample list?? - with self.pending_lock.gen_wlock(): - key = next(iter(self.pending_samples)) - sample = self.pending_samples.pop(key) - - with self.active_lock.gen_wlock(): - self.active_samples[request.worker_id] = {'id': key, 'sample': sample} - print("self.active_samples : ", self.active_samples) + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + # put sample in active sample list?? + with self.pending_lock.gen_wlock(): + key = next(iter(self.pending_samples)) + sample = self.pending_samples.pop(key) + + with self.active_lock.gen_wlock(): + self.active_samples[request.worker_id] = { + 'id': key, + 'sample': sample + } + print("self.active_samples : ", self.active_samples) logging.info("worker_alive_status is %s", worker_alive_status) logging.debug("<<<< Out %s of %s", method_name, _file_name) return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) - From 61400699ac2084c107d4a36ce05e1b4b9f40d5cf Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Mon, 16 Sep 2024 10:14:33 +0000 Subject: [PATCH 03/25] more logging changes --- py/helpers/cache/cache_gcs.py | 1 + py/sight/demo/cartpole/demo_cartpole.py | 2 +- py/sight/demo/cartpole/demo_pendulum.py | 2 +- py/sight/demo/cartpole/gym_demo.py | 2 +- py/sight/service_utils.py | 1 + py/sight/widgets/decision/decision.py | 2 +- .../widgets/decision/llm_optimizer_client.py | 119 +++++++++--------- .../single_action_optimizer_client.py | 3 +- sight_service/service_root.py | 3 +- 9 files changed, 70 insertions(+), 65 deletions(-) diff --git a/py/helpers/cache/cache_gcs.py b/py/helpers/cache/cache_gcs.py index a723f38..8916787 100644 --- a/py/helpers/cache/cache_gcs.py +++ b/py/helpers/cache/cache_gcs.py @@ -7,6 +7,7 @@ from .cache_interface import CacheInterface from .cache_redis import RedisCache + class GCSCache(CacheInterface): def __init__(self, config={}, with_redis_client: RedisCache | None = None): diff --git a/py/sight/demo/cartpole/demo_cartpole.py b/py/sight/demo/cartpole/demo_cartpole.py index f8b2a57..e0e7e7b 100644 --- a/py/sight/demo/cartpole/demo_cartpole.py +++ b/py/sight/demo/cartpole/demo_cartpole.py @@ -96,6 +96,6 @@ def main(argv: Sequence[str]) -> None: if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG, ) + # logging.basicConfig(level=logging.DEBUG, ) # print(logging.getLogger(__name__)) app.run(main) diff --git a/py/sight/demo/cartpole/demo_pendulum.py b/py/sight/demo/cartpole/demo_pendulum.py index 0c93bd2..618f2e6 100644 --- a/py/sight/demo/cartpole/demo_pendulum.py +++ b/py/sight/demo/cartpole/demo_pendulum.py @@ -87,6 +87,6 @@ def main(argv: Sequence[str]) -> None: if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG, ) + # logging.basicConfig(level=logging.DEBUG, ) # print(logging.getLogger(__name__)) app.run(main) diff --git a/py/sight/demo/cartpole/gym_demo.py b/py/sight/demo/cartpole/gym_demo.py index 5180e76..5899d15 100644 --- a/py/sight/demo/cartpole/gym_demo.py +++ b/py/sight/demo/cartpole/gym_demo.py @@ -59,6 +59,6 @@ def main(argv: Sequence[str]) -> None: if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG, ) + # logging.basicConfig(level=logging.DEBUG, ) # print(logging.getLogger(__name__)) app.run(main) diff --git a/py/sight/service_utils.py b/py/sight/service_utils.py index 1875114..adceffe 100644 --- a/py/sight/service_utils.py +++ b/py/sight/service_utils.py @@ -476,6 +476,7 @@ def obtain_insecure_channel(options): ) return channel + def generate_metadata(): """Generate metadata to call service with authentication.""" diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index da4d304..f1dc2ec 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -42,7 +42,7 @@ from sight.widgets.decision import utils from sight.utility import poll_network_batch_outcome -logging.basicConfig(level=logging.DEBUG) +# logging.basicConfig(level=logging.DEBUG) _DECISON_MODE = flags.DEFINE_enum( 'decision_mode', diff --git a/py/sight/widgets/decision/llm_optimizer_client.py b/py/sight/widgets/decision/llm_optimizer_client.py index 583cd80..49537df 100644 --- a/py/sight/widgets/decision/llm_optimizer_client.py +++ b/py/sight/widgets/decision/llm_optimizer_client.py @@ -11,80 +11,81 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Client for LLM optimizer to communicate with server.""" from typing import Optional, Sequence, Tuple -from helpers.logs.logs_handler import logger as loggingfrom sight_service.proto import service_pb2 +from helpers.logs.logs_handler import logger as logging +from sight_service.proto import service_pb2 from sight import service_utils as service from sight.proto import sight_pb2 from sight.widgets.decision.optimizer_client import OptimizerClient from overrides import override import time -class LLMOptimizerClient (OptimizerClient): - """LLM client for the Sight service.""" - def __init__(self, llm_name: str, description: str, sight): - super().__init__(sight_pb2.DecisionConfigurationStart.OptimizerType.OT_LLM) - if llm_name.startswith('text_bison'): - self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_TEXT_BISON - elif llm_name.startswith('chat_bison'): - self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON - elif llm_name.startswith('gemini_pro'): - self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO - else: - raise ValueError(f'Unknown LLM Algorithm {llm_name}') +class LLMOptimizerClient(OptimizerClient): + """LLM client for the Sight service.""" + + def __init__(self, llm_name: str, description: str, sight): + super().__init__( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_LLM) + if llm_name.startswith('text_bison'): + self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_TEXT_BISON + elif llm_name.startswith('chat_bison'): + self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON + elif llm_name.startswith('gemini_pro'): + self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO + else: + raise ValueError(f'Unknown LLM Algorithm {llm_name}') - if llm_name.endswith('_optimize'): - self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_OPTIMIZE - elif llm_name.endswith('_recommend'): - self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_RECOMMEND - elif llm_name.endswith('_interactive'): - self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - else: - raise ValueError(f'Unknown LLM Goal {llm_name}') + if llm_name.endswith('_optimize'): + self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_OPTIMIZE + elif llm_name.endswith('_recommend'): + self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_RECOMMEND + elif llm_name.endswith('_interactive'): + self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE + else: + raise ValueError(f'Unknown LLM Goal {llm_name}') - self._description = description + self._description = description - self._sight = sight - self._worker_id = None + self._sight = sight + self._worker_id = None - @override - def create_config(self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: - choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig( - ) - llm_config = sight_pb2.DecisionConfigurationStart.LLMConfig( - algorithm=self._algorithm, - goal=self._goal, - description=self._description - ) - choice_config.llm_config.CopyFrom(llm_config) - return choice_config + @override + def create_config( + self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: + choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig() + llm_config = sight_pb2.DecisionConfigurationStart.LLMConfig( + algorithm=self._algorithm, + goal=self._goal, + description=self._description) + choice_config.llm_config.CopyFrom(llm_config) + return choice_config - @override - def decision_point(self, sight, request: service_pb2.DecisionPointRequest): - for key, value in sight.widget_decision_state["state"].items(): - param = request.decision_point.state_params.add() - param.key = key - param.value.sub_type - param.value.double_value = value + @override + def decision_point(self, sight, request: service_pb2.DecisionPointRequest): + for key, value in sight.widget_decision_state["state"].items(): + param = request.decision_point.state_params.add() + param.key = key + param.value.sub_type + param.value.double_value = value - while True: - response = service.call( - lambda s, meta: s.DecisionPoint(request, 300, metadata=meta) - ) - logging.info('decision_point() response=%s' % response) - if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_ACT: - return self._get_dp_action(response) - if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_RETRY: - time.sleep(5) + while True: + response = service.call( + lambda s, meta: s.DecisionPoint(request, 300, metadata=meta)) + logging.info('decision_point() response=%s' % response) + if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_ACT: + return self._get_dp_action(response) + if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_RETRY: + time.sleep(5) - @override - def finalize_episode(self, sight, request: service_pb2.FinalizeEpisodeRequest): - logging.info('LLMOptimizerClient() finalize_episode, request=%s', request) - response = service.call( - lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta) - ) - return response + @override + def finalize_episode(self, sight, + request: service_pb2.FinalizeEpisodeRequest): + logging.info('LLMOptimizerClient() finalize_episode, request=%s', + request) + response = service.call( + lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta)) + return response diff --git a/py/sight/widgets/decision/single_action_optimizer_client.py b/py/sight/widgets/decision/single_action_optimizer_client.py index f981fde..dc38316 100644 --- a/py/sight/widgets/decision/single_action_optimizer_client.py +++ b/py/sight/widgets/decision/single_action_optimizer_client.py @@ -13,7 +13,8 @@ # limitations under the License. """Client for optimizers that are called once per episode to communicate with server.""" -from helpers.logs.logs_handler import logger as loggingfrom typing import Optional, Sequence, Tuple +from helpers.logs.logs_handler import logger as logging +from typing import Optional, Sequence, Tuple from sight_service.proto import service_pb2 from sight import service_utils as service from sight.proto import sight_pb2 diff --git a/sight_service/service_root.py b/sight_service/service_root.py index 16ea060..37b54da 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -23,7 +23,8 @@ def warn(*args, **kwargs): warnings.warn = warn from concurrent import futures -from helpers.logs.logs_handler import logger as logging +# from helpers.logs.logs_handler import logger as logging +import logging from absl import app from absl import flags From 11e3e5b8c50de2ea7798e967f5de4399529de7a9 Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Wed, 18 Sep 2024 06:52:42 +0000 Subject: [PATCH 04/25] dsub local changes --- .gitignore | 1 + py/helpers/logs/logs_handler.py | 10 +++++++-- py/sight/widgets/decision/trials.py | 34 ++++++++++++++++------------- sight_service/llm.py | 1 - 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index 001f60e..60bab61 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ __pycache__ */.cache_local_data/* */.cache_redis_data/* extra/* +**/*.log diff --git a/py/helpers/logs/logs_handler.py b/py/helpers/logs/logs_handler.py index f577fe3..e5de80a 100644 --- a/py/helpers/logs/logs_handler.py +++ b/py/helpers/logs/logs_handler.py @@ -14,6 +14,12 @@ import logging from google.cloud import logging as cloud_logging +class CustomAdapter(logging.LoggerAdapter): + def process(self, msg, kwargs): + # Include the extra context from the adapter into the log message + extra_info = ' | '.join(f'{key}: {value}' for key, value in self.extra.items()) + return f'{extra_info} | {msg}', kwargs + # Instantiates a client logging_client = cloud_logging.Client() @@ -25,9 +31,9 @@ # Set up Python logging logger = logging.getLogger("cloudLogger") -logger.setLevel(logging.DEBUG) +logger.setLevel(logging.INFO) logger.addHandler(handler) +adapter = CustomAdapter(logger, {'user': 'meetashah'}) # Example of logging # logger.info("This is an info message logged to GCP") - diff --git a/py/sight/widgets/decision/trials.py b/py/sight/widgets/decision/trials.py index f85a1ad..6d9c6e5 100644 --- a/py/sight/widgets/decision/trials.py +++ b/py/sight/widgets/decision/trials.py @@ -394,20 +394,21 @@ def start_job_in_dsub_local( ) sight.location.get().next() - remote_script = (f'gs://{os.environ["PROJECT_ID"]}/sight/d-sub/binary/' + - binary_path.split('/')[-1]) - print(f'Uploading {binary_path}...') - subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], - check=True) + # remote_script = (f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/binary/{str(sight.id)}/' + + # binary_path.split('/')[-1]) + remote_script = binary_path + # print(f'Uploading {binary_path}...') + # subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], + # check=True) # provider = 'google-cls-v2' if deployment_mode == 'distributed' else 'local' script_args = ( f'--decision_mode={decision_mode} --deployment_mode={deployment_mode} --worker_mode={worker_mode} --optimizer_type={optimizer_type} ' ) - if FLAGS.service_account: - script_args = (script_args + - f'--service_account={FLAGS.service_account}') + # if FLAGS.service_account: + # script_args = (script_args + + # f'--service_account={FLAGS.service_account}') print('sight.id=%s' % sight.id) args = [ @@ -415,12 +416,13 @@ def start_job_in_dsub_local( '--provider=local', f'--image={docker_image}', f'--project={_PROJECT_ID.value}', - f'--logging=gs://{os.environ["PROJECT_ID"]}/d-sub/logs/local/{sight.id}', + # f'--logging=gs://{os.environ["PROJECT_ID"]}/d-sub/logs/local/{sight.id}', + f'--logging=extra/dsub-logs', '--env', f'GOOGLE_CLOUD_PROJECT={os.environ["PROJECT_ID"]}', - '--env', - 'GOOGLE_APPLICATION_CREDENTIALS=/mnt/data/mount/file' + - f'{FLAGS.gcloud_dir_path}/application_default_credentials.json', + # '--env', + # 'GOOGLE_APPLICATION_CREDENTIALS=/mnt/data/mount/file' + + # f'{FLAGS.gcloud_dir_path}/application_default_credentials.json', '--env', f'PARENT_LOG_ID={sight.id}', # '--env', @@ -429,10 +431,12 @@ def start_job_in_dsub_local( f'SIGHT_SERVICE_ID={service._SERVICE_ID}', '--input', f'SCRIPT={remote_script}', - f'--command=cd /x-sight && python3 "${{SCRIPT}}" {script_args}', + '--input-recursive', + f'CLOUDSDK_CONFIG={os.path.expanduser("~")}/.config/gcloud', + f'--command=python3 "${{SCRIPT}}" {script_args}', # + f'--optimizer_type={optimizer_type}', - '--mount', - 'RESOURCES=file:/' + f'{FLAGS.gcloud_dir_path}', + # '--mount', + # 'RESOURCES=file:/' + f'{FLAGS.gcloud_dir_path}', # + f'{os.path.expanduser("~")}/.config/gcloud', '--tasks', '/tmp/optimization_tasks.tsv', diff --git a/sight_service/llm.py b/sight_service/llm.py index bd99144..0d2f7f8 100644 --- a/sight_service/llm.py +++ b/sight_service/llm.py @@ -16,7 +16,6 @@ from concurrent import futures import json from helpers.logs.logs_handler import logger as logging -from helpers.logs.logs_handler import logger as loggingogs.logs_handler import logger as logging import random import threading from typing import Any, Dict, List, Optional, Tuple From cb943d999ef255703a4f75341e1fa15e9f570113 Mon Sep 17 00:00:00 2001 From: hrushikeshm-g Date: Wed, 18 Sep 2024 07:37:57 +0000 Subject: [PATCH 05/25] proposal propose action outcome JSONified --- py/sight/widgets/decision/proposal.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/py/sight/widgets/decision/proposal.py b/py/sight/widgets/decision/proposal.py index 8bb310c..900f685 100644 --- a/py/sight/widgets/decision/proposal.py +++ b/py/sight/widgets/decision/proposal.py @@ -27,6 +27,7 @@ from helpers.cache.cache_helper import CacheKeyMaker, CacheConfig from helpers.cache.cache_factory import CacheFactory from helpers.cache.cache_interface import CacheInterface +import json _CACHE_MODE = flags.DEFINE_enum( 'cache_mode', 'none', @@ -83,5 +84,13 @@ async def propose_actions(sight, action_dict, custom_part="sight_cache"): outcome = response.get('outcome', None) if response is None or outcome is None: raise Exception('fetch_outcome response or respose["outcome"] is none') + # converting the stringify data into json data if it can + for key in outcome: + value = outcome[key] + try: + final_value = json.loads(value) + except (json.JSONDecodeError,TypeError): + final_value = value + outcome[key] = final_value cache_client.json_set(key=cache_key, value=outcome) return outcome From aed9bcedecb081c8f7e97fbd6a4ca052003769e7 Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Fri, 20 Sep 2024 10:30:27 +0000 Subject: [PATCH 06/25] sight.silent change and upgraded dsub --- py/sight/requirements.txt | 2 +- py/sight/sight.py | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/py/sight/requirements.txt b/py/sight/requirements.txt index 4ab7ca1..226f011 100644 --- a/py/sight/requirements.txt +++ b/py/sight/requirements.txt @@ -19,7 +19,7 @@ dm-launchpad==0.5.0 dm-reverb==0.7.2 dm-sonnet==2.0.1 dm-tree==0.1.8 -dsub==0.4.7 +dsub==0.5.0 # etils==1.6.0 fastavro==1.7.0 flatbuffers==23.5.26 diff --git a/py/sight/sight.py b/py/sight/sight.py index 96e5547..e9d48cd 100644 --- a/py/sight/sight.py +++ b/py/sight/sight.py @@ -395,20 +395,21 @@ def __enter__(self): return self def __exit__(self, exc_type, value, traceback): - # last rpc call to server for this sight id - req = service_pb2.CloseRequest() - req.client_id = str(self.id) - response = service.call( - lambda s, meta: s.Close(req, 300, metadata=meta)) - # print("close rpc status :", response.response_str) - if self.params.silent_logger: self.close() + return if exc_type is not None: # pytype: disable=attribute-error exception(exc_type, value, traceback, self, inspect.currentframe().f_back) # pytype: enable=attribute-error + + # last rpc call to server for this sight id + req = service_pb2.CloseRequest() + req.client_id = str(self.id) + response = service.call( + lambda s, meta: s.Close(req, 300, metadata=meta)) + # print("close rpc status :", response.response_str) self.close() def __del__(self): From e8c1e4518a926a63cecafae412abe6f96df53426 Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Mon, 23 Sep 2024 10:13:01 +0000 Subject: [PATCH 07/25] cloud log fix --- py/helpers/logs/logs_handler.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/py/helpers/logs/logs_handler.py b/py/helpers/logs/logs_handler.py index e5de80a..0473866 100644 --- a/py/helpers/logs/logs_handler.py +++ b/py/helpers/logs/logs_handler.py @@ -14,26 +14,26 @@ import logging from google.cloud import logging as cloud_logging +# Set this to True for Cloud logging +USE_CLOUD_LOGGING = False + class CustomAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): # Include the extra context from the adapter into the log message extra_info = ' | '.join(f'{key}: {value}' for key, value in self.extra.items()) return f'{extra_info} | {msg}', kwargs -# Instantiates a client -logging_client = cloud_logging.Client() - -# Retrieves a Cloud Logging handler based on the environment -# you're running in and integrates the handler with the -# Python logging module. By default this captures all logs -# at INFO level and higher -handler = logging_client.get_default_handler() +if USE_CLOUD_LOGGING: + logging_client = cloud_logging.Client() + handler = logging_client.get_default_handler() +else: + handler = logging.StreamHandler() # Set up Python logging -logger = logging.getLogger("cloudLogger") +logger = logging.getLogger("myLogger") logger.setLevel(logging.INFO) logger.addHandler(handler) -adapter = CustomAdapter(logger, {'user': 'meetashah'}) +# adapter = CustomAdapter(logger, {'user': 'meetashah'}) # Example of logging # logger.info("This is an info message logged to GCP") From 83a72d716dd25f7a95d18a93ecef3ab37191b109 Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Thu, 3 Oct 2024 14:06:26 +0000 Subject: [PATCH 08/25] formatter changes --- .style.yapf | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .style.yapf diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 0000000..a72817d --- /dev/null +++ b/.style.yapf @@ -0,0 +1,4 @@ +[style] +based_on_style = google +indent_width = 2 +column_limit = 80 From eaab9b8207684ed1b5993a02e57dc797fec99302 Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Thu, 3 Oct 2024 14:07:13 +0000 Subject: [PATCH 09/25] more formatter changes --- .config/.isort.cfg | 5 + .style.yapf => .config/.style.yapf | 0 .config/setup.txt | 2 + .pre-commit-config.yaml | 23 + fvs_sight/fvs_api.py | 8 +- fvs_sight/fvs_sight_worker.py | 70 +- kokua-worker.py | 87 - py/helpers/cache/cache_factory.py | 40 +- py/helpers/cache/cache_gcs.py | 72 +- py/helpers/cache/cache_helper.py | 77 +- py/helpers/cache/cache_local.py | 77 +- py/helpers/cache/cache_none.py | 20 +- py/helpers/cache/cache_redis.py | 59 +- py/helpers/cache/tests/test_cache_gcs.py | 2 +- py/helpers/cache/tests/test_cache_local.py | 2 +- py/helpers/cache/tests/test_cache_none.py | 50 +- py/helpers/cache/tests/test_cache_redis.py | 62 +- py/helpers/logs/logs_handler.py | 17 +- py/sight/attribute.py | 1 - py/sight/block.py | 152 +- py/sight/data_structures.py | 77 +- py/sight/data_structures_test.py | 67 +- py/sight/demo/async_demo.py | 27 +- py/sight/demo/asyncq.py | 33 +- py/sight/demo/cartpole/demo_cartpole.py | 75 +- py/sight/demo/cartpole/demo_pendulum.py | 65 +- py/sight/demo/cartpole/driver_cartpole.py | 170 +- py/sight/demo/cartpole/driver_pendulum.py | 169 +- py/sight/demo/cartpole/gym_demo.py | 41 +- py/sight/demo/demo.py | 22 +- py/sight/demo/dummy.py | 104 +- py/sight/demo/fn_sphere.py | 90 +- py/sight/demo/fn_sphere_parallel.py | 465 +++-- py/sight/demo/fractal_demo.py | 17 +- py/sight/demo/gym_demo_env.py | 11 +- py/sight/demo/kokua_demo.py | 82 +- py/sight/demo/portfolio_demo.py | 163 +- py/sight/demo/propose_action.py | 299 ++-- py/sight/demo/search_optimization.py | 144 +- py/sight/demo/secret_find.py | 58 +- py/sight/demo/shower_demo_with_env.py | 3 +- py/sight/demo/shower_demo_without_env.py | 25 +- py/sight/demo/sir.py | 120 +- py/sight/demo/spawn_workers.py | 48 +- py/sight/demo/stream.py | 26 +- py/sight/demo/sweetness.py | 39 +- py/sight/demo/test_rpc.py | 20 +- py/sight/demo/volterra_lotka.py | 195 ++- py/sight/exception.py | 26 +- py/sight/gcs_utils.py | 120 +- py/sight/location.py | 3 +- py/sight/location_test.py | 18 +- py/sight/proto/example_pb2.py | 20 +- py/sight/proto/feature_pb2.py | 49 +- py/sight/proto/sight_pb2.py | 296 ++-- .../proto/widgets/pipeline/flume/flume_pb2.py | 53 +- py/sight/publish_log.py | 30 +- py/sight/service_utils.py | 770 ++++---- py/sight/sight.py | 1354 +++++++-------- py/sight/sight_test.py | 110 +- py/sight/trace.py | 76 +- py/sight/utility.py | 30 +- .../decision/acme/acme_optimizer_client.py | 677 ++++---- .../widgets/decision/acme/build_d4pg_actor.py | 4 +- .../widgets/decision/acme/build_dqn_actor.py | 15 +- .../widgets/decision/acme/build_mdqn_actor.py | 20 +- .../decision/acme/build_qrdqn_actor.py | 68 +- .../widgets/decision/acme/build_td3_actor.py | 15 +- py/sight/widgets/decision/acme/shower_env.py | 15 +- py/sight/widgets/decision/acme/sight_adder.py | 168 +- .../decision/acme/sight_variable_source.py | 31 +- .../decision/analyze_decision_outcomes.py | 664 ++++--- py/sight/widgets/decision/converse.py | 47 +- py/sight/widgets/decision/current_status.py | 43 +- py/sight/widgets/decision/decision.py | 1544 ++++++++--------- .../widgets/decision/decision_episode_fn.py | 370 ++-- py/sight/widgets/decision/env_driver.py | 40 +- py/sight/widgets/decision/get_outcome.py | 38 +- py/sight/widgets/decision/listen.py | 32 +- .../widgets/decision/llm_optimizer_client.py | 115 +- py/sight/widgets/decision/optimizer_client.py | 41 +- py/sight/widgets/decision/proposal.py | 104 +- py/sight/widgets/decision/resource_lock.py | 39 +- .../widgets/decision/shower_env_driver.py | 62 +- .../single_action_optimizer_client.py | 63 +- py/sight/widgets/decision/tell.py | 23 +- py/sight/widgets/decision/trials.py | 548 +++--- py/sight/widgets/decision/utils.py | 7 +- py/sight/widgets/numpy_sight/demo.py | 2 - py/sight/widgets/numpy_sight/numpy_sight.py | 181 +- .../widgets/numpy_sight/numpy_sight_test.py | 56 +- py/sight/widgets/pandas_sight/pandas_sight.py | 141 +- py/sight/widgets/simulation/analysis_utils.py | 836 +++++---- .../simulation/apply_lstm_surrogate.py | 80 +- py/sight/widgets/simulation/bulk_inference.py | 86 +- .../simulation/delta_across_time_steps.py | 213 +-- .../widgets/simulation/fine_tune_gemini.py | 96 +- .../simulation/generate_log_trans_dataset.py | 262 +-- .../simulation/load_log_time_series.py | 111 +- py/sight/widgets/simulation/run_trace.py | 585 +++---- py/sight/widgets/simulation/simulation.py | 197 ++- .../simulation/simulation_parameters.py | 144 +- .../widgets/simulation/simulation_state.py | 265 ++- .../simulation/simulation_time_step.py | 171 +- .../simulation/simulation_widget_state.py | 1 - .../simulation/split_log_time_series.py | 62 +- .../simulation/train_darts_surrogate.py | 41 +- .../simulation/train_lstm_surrogate.py | 71 +- .../widgets/simulation/train_surrogate.py | 68 +- py/sight/widgets/tensorflow_sight/demo.py | 16 +- .../tensorflow_sight/tensorflow_sight.py | 76 +- .../tensorflow_sight/tensorflow_sight_test.py | 58 +- py/tests/colorful_tests.py | 21 +- sight_service/acme_optimizer.py | 894 +++++----- sight_service/bayesian_opt.py | 242 ++- sight_service/build_d4pg_learner.py | 4 +- sight_service/build_dqn_learner.py | 6 +- sight_service/build_mdqn_learner.py | 20 +- sight_service/build_qrdqn_learner.py | 68 +- sight_service/build_td3_learner.py | 15 +- sight_service/exhaustive_search.py | 428 +++-- sight_service/genetic_algorithm.py | 718 ++++---- sight_service/llm.py | 1333 +++++++------- sight_service/nevergrad_opt.py | 540 +++--- sight_service/normalizer.py | 74 +- sight_service/optimizer_instance.py | 263 ++- sight_service/proto/numproto/numproto.py | 16 +- .../proto/numproto/protobuf/ndarray_pb2.py | 17 +- sight_service/proto/service_pb2.py | 674 ++++--- sight_service/proto/service_pb2_grpc.py | 951 +++++----- sight_service/sensitivity_analysis.py | 331 ++-- sight_service/service_root.py | 485 +++--- sight_service/service_utils.py | 284 ++- sight_service/single_action_optimizer.py | 19 +- sight_service/smc_py.py | 425 +++-- sight_service/vizier.py | 331 ++-- sight_service/worklist_scheduler_opt.py | 531 +++--- streamlit/streamlit_app.py | 143 +- 138 files changed, 11791 insertions(+), 12090 deletions(-) create mode 100644 .config/.isort.cfg rename .style.yapf => .config/.style.yapf (100%) create mode 100644 .config/setup.txt create mode 100644 .pre-commit-config.yaml delete mode 100644 kokua-worker.py diff --git a/.config/.isort.cfg b/.config/.isort.cfg new file mode 100644 index 0000000..6f96180 --- /dev/null +++ b/.config/.isort.cfg @@ -0,0 +1,5 @@ +[settings] +profile = google +use_parentheses = true +line_length = 80 +multi_line_output = 3 diff --git a/.style.yapf b/.config/.style.yapf similarity index 100% rename from .style.yapf rename to .config/.style.yapf diff --git a/.config/setup.txt b/.config/setup.txt new file mode 100644 index 0000000..0eb05a8 --- /dev/null +++ b/.config/setup.txt @@ -0,0 +1,2 @@ +yapf -ir -vv --style .config/.style.yapf . +isort . --settings-path .config/ -v diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..1258dd9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,23 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: trailing-whitespace + exclude: ^.*\.patch$ + + - id: end-of-file-fixer + exclude: ^.*\.patch$ + + - id: check-yaml + + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.32.0 + hooks: + - id: yapf + args: ['--style', '.config/.style.yapf'] + + - repo: https://github.com/pre-commit/mirrors-isort + rev: v5.10.1 + hooks: + - id: isort + args: ['--settings-path', '.config/.isort.cfg'] diff --git a/fvs_sight/fvs_api.py b/fvs_sight/fvs_api.py index 48f28eb..4cd8eca 100644 --- a/fvs_sight/fvs_api.py +++ b/fvs_sight/fvs_api.py @@ -1,4 +1,5 @@ from typing import Any + from sight.proto import sight_pb2 FVS_PARAMS = { @@ -26,8 +27,8 @@ def create_attr_props( """ return { key: (sight_pb2.DecisionConfigurationStart.AttrProps() if value - is not None else sight_pb2.DecisionConfigurationStart.AttrProps()) - for key, value in config_dict.items() + is not None else sight_pb2.DecisionConfigurationStart.AttrProps() + ) for key, value in config_dict.items() } @@ -58,6 +59,7 @@ def get_action_attrs(): action_config.update(expand_params_for_cycles(fvs_params=FVS_PARAMS)) return create_attr_props(action_config) + # action_attrs = { # "a1": # sight_pb2.DecisionConfigurationStart.AttrProps( @@ -77,13 +79,13 @@ def get_action_attrs(): # } - def get_outcome_attrs(): """Returns the outcome attributes for the FVS outcome. """ outcome_config = {'time_series': None} return create_attr_props(outcome_config) + # outcome_attrs = { # "time_series": # sight_pb2.DecisionConfigurationStart.AttrProps( diff --git a/fvs_sight/fvs_sight_worker.py b/fvs_sight/fvs_sight_worker.py index 9384813..57e7204 100644 --- a/fvs_sight/fvs_sight_worker.py +++ b/fvs_sight/fvs_sight_worker.py @@ -1,29 +1,29 @@ - -import yaml +import json +import os +from typing import Sequence from absl import app from absl import flags -from sight.widgets.decision import decision_episode_fn # from fvs_sight.fvs_api import action_attrs, outcome_attrs from fvs_sight import fvs_api - +import pandas as pd from sight import data_structures from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision -from typing import Sequence -import os -import pandas as pd -import json +from sight.widgets.decision import decision_episode_fn import yaml -def simulate_fvs(sight,params_dict): - print('here params_dict is :', params_dict) - mitigation_list = [227.6, 273.4, 273.3, 248.6, 165.3, 130.6, 106.4, 92.1, 81.7, 62.8] - sim_stream = pd.Series(mitigation_list) - # print(sim_stream) - return sim_stream +def simulate_fvs(sight, params_dict): + print('here params_dict is :', params_dict) + mitigation_list = [ + 227.6, 273.4, 273.3, 248.6, 165.3, 130.6, 106.4, 92.1, 81.7, 62.8 + ] + sim_stream = pd.Series(mitigation_list) + # print(sim_stream) + return sim_stream + def driver_fn(sight): @@ -34,9 +34,9 @@ def driver_fn(sight): # return None # raise SystemError - sim_stream = simulate_fvs(sight,params_dict) + sim_stream = simulate_fvs(sight, params_dict) - outcome = {'time_series' : sim_stream} + outcome = {'time_series': sim_stream} print("outcome : ", outcome) decision.decision_outcome('outcome_label', sight, reward=0, outcome=outcome) @@ -45,32 +45,24 @@ def driver_fn(sight): #temporary created def get_sight_instance(): - params = sight_pb2.Params( - label="kokua_experiment", - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label="kokua_experiment", + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - decision.run( - driver_fn=driver_fn, - sight=sight, - action_attrs=fvs_api.get_action_attrs(), - outcome_attrs=fvs_api.get_outcome_attrs() - ) - -if __name__ == "__main__": - app.run(main) - - - - + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + with get_sight_instance() as sight: + decision.run(driver_fn=driver_fn, + sight=sight, + action_attrs=fvs_api.get_action_attrs(), + outcome_attrs=fvs_api.get_outcome_attrs()) +if __name__ == "__main__": + app.run(main) diff --git a/kokua-worker.py b/kokua-worker.py deleted file mode 100644 index 549c23d..0000000 --- a/kokua-worker.py +++ /dev/null @@ -1,87 +0,0 @@ - -import yaml - -from absl import app -from absl import flags -from sight.widgets.decision import decision_episode_fn - -from sight import data_structures -from sight.proto import sight_pb2 -from sight.sight import Sight -from sight.widgets.decision import decision -from typing import Sequence -import os -import pandas as pd -import json -import yaml - - -def simulate_fvs(sight,params_dict): - print('here params_dict is :', params_dict) - mitigation_list = [101, 102, 103, 104, 105] - sim_stream = pd.Series(mitigation_list) - # print(sim_stream) - return sim_stream - -def driver_func(sight): - - params_dict = decision.decision_point("label", sight) - # params_dict = {'fvs_type':'managed','region':'BM','project_id':'ACR173','desc': 'fire_projectACR173', 'fire-SIMFIRE_27-1_cycle': 2028, 'fire-SIMFIRE_27-6_stand_area_burned': 10.0, 'fire-SIMFIRE_30-1_cycle': 2031, 'fire-SIMFIRE_30-6_stand_area_burned': 10.0, 'fire-SIMFIRE_31-1_cycle': 2032, 'fire-SIMFIRE_31-6_stand_area_burned': 10.0} - print('params_dict : ', params_dict) - # raise SystemError - - sim_stream = simulate_fvs(sight,params_dict) - - outcome = {'time_series' : sim_stream} - print("outcome : ", outcome) - - decision.decision_outcome(json.dumps(params_dict), sight, reward=0, outcome=outcome) - - -#temporary created -def get_sight_instance(): - params = sight_pb2.Params( - label="kokua_experiment", - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj - - -action_attrs = { - "a": - sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, - max_value=1, - ), -} - -outcome_attrs = { - "c": - sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, - max_value=1, - ), -} - -def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - decision.run( - driver_fn=driver_func, - sight=sight, - action_attrs=action_attrs, - outcome_attrs=outcome_attrs - ) - -if __name__ == "__main__": - app.run(main) - - - - - - - diff --git a/py/helpers/cache/cache_factory.py b/py/helpers/cache/cache_factory.py index e1f4c4d..d6f4847 100644 --- a/py/helpers/cache/cache_factory.py +++ b/py/helpers/cache/cache_factory.py @@ -1,27 +1,27 @@ from .cache_gcs import GCSCache +from .cache_interface import CacheInterface from .cache_local import LocalCache -from .cache_redis import RedisCache from .cache_none import NoneCache -from .cache_interface import CacheInterface +from .cache_redis import RedisCache class CacheFactory: - @staticmethod - def get_cache(cache_type='local', - config={}, - with_redis=None) -> CacheInterface | None: - if cache_type == 'local_with_redis': - return LocalCache(config, with_redis) - elif cache_type == 'gcs_with_redis': - return GCSCache(config, with_redis) - elif cache_type == 'local': - return LocalCache(config) - elif cache_type == 'gcs': - return GCSCache(config) - elif cache_type == 'redis': - return RedisCache(config) - elif cache_type == 'none': - return NoneCache(config) - else: - raise ValueError(f"Unknown cache type: {cache_type}") + @staticmethod + def get_cache(cache_type='local', + config={}, + with_redis=None) -> CacheInterface | None: + if cache_type == 'local_with_redis': + return LocalCache(config, with_redis) + elif cache_type == 'gcs_with_redis': + return GCSCache(config, with_redis) + elif cache_type == 'local': + return LocalCache(config) + elif cache_type == 'gcs': + return GCSCache(config) + elif cache_type == 'redis': + return RedisCache(config) + elif cache_type == 'none': + return NoneCache(config) + else: + raise ValueError(f"Unknown cache type: {cache_type}") diff --git a/py/helpers/cache/cache_gcs.py b/py/helpers/cache/cache_gcs.py index 8916787..dfaf7aa 100644 --- a/py/helpers/cache/cache_gcs.py +++ b/py/helpers/cache/cache_gcs.py @@ -1,8 +1,8 @@ import json from pathlib import Path -from helpers.logs.logs_handler import logger as logging from google.cloud import storage +from helpers.logs.logs_handler import logger as logging from .cache_interface import CacheInterface from .cache_redis import RedisCache @@ -10,40 +10,38 @@ class GCSCache(CacheInterface): - def __init__(self, config={}, with_redis_client: RedisCache | None = None): - gcs_client = storage.Client() - bucket_name = config.get('gcs_bucket', 'cameltrain-sight') - self.bucket = gcs_client.bucket(bucket_name=bucket_name) - self.redis_client = with_redis_client - self.gcs_base_dir = config.get("gcs_base_dir", "sight_cache") - - def _gcs_cache_path(self, key: str): - return f"{self.gcs_base_dir}/{Path(key).with_suffix('.json')}" - - def json_get(self, key): - if self.redis_client: - try: - value = self.redis_client.json_get(key=key) - if value: - return value - except Exception as e: - logging.warning("GOT THE ISSUE IN REDIS", e) - return None - blob = self.bucket.blob( - self._gcs_cache_path(key=key.replace(':', '/'))) - if blob.exists(): - value = json.loads(blob.download_as_text()) - if self.redis_client: - self.redis_client.json_set(key=key, value=value) - return value + def __init__(self, config={}, with_redis_client: RedisCache | None = None): + gcs_client = storage.Client() + bucket_name = config.get('gcs_bucket', 'cameltrain-sight') + self.bucket = gcs_client.bucket(bucket_name=bucket_name) + self.redis_client = with_redis_client + self.gcs_base_dir = config.get("gcs_base_dir", "sight_cache") + + def _gcs_cache_path(self, key: str): + return f"{self.gcs_base_dir}/{Path(key).with_suffix('.json')}" + + def json_get(self, key): + if self.redis_client: + try: + value = self.redis_client.json_get(key=key) + if value: + return value + except Exception as e: + logging.warning("GOT THE ISSUE IN REDIS", e) return None - - def json_set(self, key, value): - if self.redis_client: - try: - self.redis_client.json_set(key=key, value=value) - except Exception as e: - logging.warning("GOT THE ISSUE IN REDIS", e) - blob = self.bucket.blob( - self._gcs_cache_path(key=key.replace(':', '/'))) - blob.upload_from_string(json.dumps(value)) + blob = self.bucket.blob(self._gcs_cache_path(key=key.replace(':', '/'))) + if blob.exists(): + value = json.loads(blob.download_as_text()) + if self.redis_client: + self.redis_client.json_set(key=key, value=value) + return value + return None + + def json_set(self, key, value): + if self.redis_client: + try: + self.redis_client.json_set(key=key, value=value) + except Exception as e: + logging.warning("GOT THE ISSUE IN REDIS", e) + blob = self.bucket.blob(self._gcs_cache_path(key=key.replace(':', '/'))) + blob.upload_from_string(json.dumps(value)) diff --git a/py/helpers/cache/cache_helper.py b/py/helpers/cache/cache_helper.py index 9f391b2..777ce68 100644 --- a/py/helpers/cache/cache_helper.py +++ b/py/helpers/cache/cache_helper.py @@ -1,29 +1,30 @@ import hashlib import json + from .cache_factory import CacheFactory def sort_nested_dict_or_list(d): - if isinstance(d, dict): - return {k: sort_nested_dict_or_list(v) for k, v in sorted(d.items())} - elif isinstance(d, list): - return [sort_nested_dict_or_list(x) for x in d] - else: - return d + if isinstance(d, dict): + return {k: sort_nested_dict_or_list(v) for k, v in sorted(d.items())} + elif isinstance(d, list): + return [sort_nested_dict_or_list(x) for x in d] + else: + return d class CacheConfig: - @staticmethod - def get_redis_instance(cache_type='none'): - if 'with_redis' in cache_type: - cache_redis = CacheFactory.get_cache(cache_type='redis', config={}) - if cache_redis.get_client() is None: - raise Exception( - 'Redis config maybe wrong or you havn\'t started the redis instance ' - ) - return cache_redis - return None + @staticmethod + def get_redis_instance(cache_type='none'): + if 'with_redis' in cache_type: + cache_redis = CacheFactory.get_cache(cache_type='redis', config={}) + if cache_redis.get_client() is None: + raise Exception( + 'Redis config maybe wrong or you havn\'t started the redis instance ' + ) + return cache_redis + return None # @staticmethod @@ -37,25 +38,25 @@ def get_redis_instance(cache_type='none'): class CacheKeyMaker: - def __init__(self): - pass - - def _serialize(self, *args, **kwargs): - """Serialize arguments to create a unique key""" - combined = list(sort_nested_dict_or_list(list(args))) + list( - sort_nested_dict_or_list(list(kwargs.values()))) - serialized_combined = json.dumps(sort_nested_dict_or_list(combined), - sort_keys=True) - return serialized_combined - - def make_key(self, *args, **kwargs): - """Generate a cache key based on provided arguments""" - serialized = self._serialize(*args, **kwargs) - key = f"{hashlib.md5(serialized.encode()).hexdigest()}" - return key - - def make_custom_key(self, custom_part, *args, **kwargs): - """Generate a custom cache key with a specified part""" - serialized = self._serialize(*args, **kwargs) - key = f"{custom_part}:{hashlib.md5(serialized.encode()).hexdigest()}" - return key + def __init__(self): + pass + + def _serialize(self, *args, **kwargs): + """Serialize arguments to create a unique key""" + combined = list(sort_nested_dict_or_list(list(args))) + list( + sort_nested_dict_or_list(list(kwargs.values()))) + serialized_combined = json.dumps(sort_nested_dict_or_list(combined), + sort_keys=True) + return serialized_combined + + def make_key(self, *args, **kwargs): + """Generate a cache key based on provided arguments""" + serialized = self._serialize(*args, **kwargs) + key = f"{hashlib.md5(serialized.encode()).hexdigest()}" + return key + + def make_custom_key(self, custom_part, *args, **kwargs): + """Generate a custom cache key with a specified part""" + serialized = self._serialize(*args, **kwargs) + key = f"{custom_part}:{hashlib.md5(serialized.encode()).hexdigest()}" + return key diff --git a/py/helpers/cache/cache_local.py b/py/helpers/cache/cache_local.py index 753a3b3..1bed28e 100644 --- a/py/helpers/cache/cache_local.py +++ b/py/helpers/cache/cache_local.py @@ -1,8 +1,8 @@ import json import os -from helpers.logs.logs_handler import logger as logging from pathlib import Path +from helpers.logs.logs_handler import logger as logging from redis import StrictRedis from .cache_interface import CacheInterface @@ -10,43 +10,42 @@ class LocalCache(CacheInterface): - def __init__(self, - config: dict = {}, - with_redis_client: StrictRedis | None = None): - base_dir = config.get("local_base_dir", "./.cache_local_data") - self.redis_client = with_redis_client - self.current_script_path = os.path.dirname(os.path.abspath(__file__)) - self.base_dir = os.path.join(self.current_script_path, - f"../../{base_dir}") - - def _local_cache_path(self, key: str): - return Path(self.base_dir) / Path(key).with_suffix(".json") - - def json_get(self, key: str): - if self.redis_client: - try: - value = self.redis_client.json_get(key=key) - if value: - return value - except Exception as e: - logging.warning("GOT THE ISSUE IN REDIS", e) - return None - path = self._local_cache_path(key.replace(":", "/")) - if path.exists(): - with open(path, "r") as file: - value = json.load(file) - if self.redis_client: - self.redis_client.json_set(key, value) - return value + def __init__(self, + config: dict = {}, + with_redis_client: StrictRedis | None = None): + base_dir = config.get("local_base_dir", "./.cache_local_data") + self.redis_client = with_redis_client + self.current_script_path = os.path.dirname(os.path.abspath(__file__)) + self.base_dir = os.path.join(self.current_script_path, f"../../{base_dir}") + + def _local_cache_path(self, key: str): + return Path(self.base_dir) / Path(key).with_suffix(".json") + + def json_get(self, key: str): + if self.redis_client: + try: + value = self.redis_client.json_get(key=key) + if value: + return value + except Exception as e: + logging.warning("GOT THE ISSUE IN REDIS", e) return None - - def json_set(self, key, value): + path = self._local_cache_path(key.replace(":", "/")) + if path.exists(): + with open(path, "r") as file: + value = json.load(file) if self.redis_client: - try: - self.redis_client.json_set(key=key, value=value) - except Exception as e: - logging.warning("GOT THE ISSUE IN REDIS", e) - path = self._local_cache_path(key.replace(":", "/")) - path.parent.mkdir(parents=True, exist_ok=True) - with open(path, "w") as file: - json.dump(value, file) + self.redis_client.json_set(key, value) + return value + return None + + def json_set(self, key, value): + if self.redis_client: + try: + self.redis_client.json_set(key=key, value=value) + except Exception as e: + logging.warning("GOT THE ISSUE IN REDIS", e) + path = self._local_cache_path(key.replace(":", "/")) + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, "w") as file: + json.dump(value, file) diff --git a/py/helpers/cache/cache_none.py b/py/helpers/cache/cache_none.py index 6fce380..482fd11 100644 --- a/py/helpers/cache/cache_none.py +++ b/py/helpers/cache/cache_none.py @@ -1,20 +1,20 @@ import json import os +from typing import Any + from helpers.logs.logs_handler import logger as logging + from .cache_interface import CacheInterface -from typing import Any class NoneCache(CacheInterface): - def __init__(self, - config: dict = {}, - with_redis_client: Any | None = None): - logging.warning('CACHE-TYPE-NONE -init # cache-ignore') + def __init__(self, config: dict = {}, with_redis_client: Any | None = None): + logging.warning('CACHE-TYPE-NONE -init # cache-ignore') - def json_get(self, key: str) -> None: - logging.warning('CACHE-TYPE-NONE -trying to get # cache-ignore') - return None + def json_get(self, key: str) -> None: + logging.warning('CACHE-TYPE-NONE -trying to get # cache-ignore') + return None - def json_set(self, key, value): - logging.warning('CACHE-TYPE-NONE -trying to set # cache-ignore') + def json_set(self, key, value): + logging.warning('CACHE-TYPE-NONE -trying to set # cache-ignore') diff --git a/py/helpers/cache/cache_redis.py b/py/helpers/cache/cache_redis.py index 9f99cd2..5c4e367 100644 --- a/py/helpers/cache/cache_redis.py +++ b/py/helpers/cache/cache_redis.py @@ -1,4 +1,5 @@ import json + from helpers.logs.logs_handler import logger as logging import redis from redis.commands.json.path import Path @@ -8,32 +9,32 @@ class RedisCache(CacheInterface): - def __init__(self, config={}): - try: - self.redis_client = redis.StrictRedis( - host=config.get("redis_host", "localhost"), - port=config.get("redis_port", 1234), - password=config.get("password", ""), - db=config.get("redis_db", 0), - ) - self.redis_client.ping() - except (redis.ConnectionError, redis.TimeoutError) as e: - logging.warning(f"RediConnection failed: {e}") - self.redis_client = None - - def get_client(self): - return self.redis_client - - def json_get(self, key): - if self.redis_client is None: - # logging.error('redis client not found..!!') - raise Exception("redis client not found , check connection !!") - value = self.redis_client.json().get(key) - return value if value else None - - def json_set(self, key, value): - if self.redis_client is None: - # logging.error('redis client not found..!!') - raise Exception("redis client not found , check connection !!") - - self.redis_client.json().set(key, Path.root_path(), value) + def __init__(self, config={}): + try: + self.redis_client = redis.StrictRedis( + host=config.get("redis_host", "localhost"), + port=config.get("redis_port", 1234), + password=config.get("password", ""), + db=config.get("redis_db", 0), + ) + self.redis_client.ping() + except (redis.ConnectionError, redis.TimeoutError) as e: + logging.warning(f"RediConnection failed: {e}") + self.redis_client = None + + def get_client(self): + return self.redis_client + + def json_get(self, key): + if self.redis_client is None: + # logging.error('redis client not found..!!') + raise Exception("redis client not found , check connection !!") + value = self.redis_client.json().get(key) + return value if value else None + + def json_set(self, key, value): + if self.redis_client is None: + # logging.error('redis client not found..!!') + raise Exception("redis client not found , check connection !!") + + self.redis_client.json().set(key, Path.root_path(), value) diff --git a/py/helpers/cache/tests/test_cache_gcs.py b/py/helpers/cache/tests/test_cache_gcs.py index 9fe0ad0..51f3952 100644 --- a/py/helpers/cache/tests/test_cache_gcs.py +++ b/py/helpers/cache/tests/test_cache_gcs.py @@ -1,8 +1,8 @@ import unittest -from tests.colorful_tests import ColorfulTestRunner from helpers.cache.cache_factory import GCSCache from helpers.cache.cache_factory import RedisCache +from tests.colorful_tests import ColorfulTestRunner class CacheGCSTest(unittest.TestCase): diff --git a/py/helpers/cache/tests/test_cache_local.py b/py/helpers/cache/tests/test_cache_local.py index 3330f34..317efb0 100644 --- a/py/helpers/cache/tests/test_cache_local.py +++ b/py/helpers/cache/tests/test_cache_local.py @@ -1,9 +1,9 @@ import unittest -from tests.colorful_tests import ColorfulTestRunner from helpers.cache.cache_factory import LocalCache from helpers.cache.cache_factory import RedisCache from helpers.cache.cache_helper import CacheKeyMaker +from tests.colorful_tests import ColorfulTestRunner class CacheLocalTest(unittest.TestCase): diff --git a/py/helpers/cache/tests/test_cache_none.py b/py/helpers/cache/tests/test_cache_none.py index 513358b..5c7b8ef 100644 --- a/py/helpers/cache/tests/test_cache_none.py +++ b/py/helpers/cache/tests/test_cache_none.py @@ -1,41 +1,41 @@ import unittest -from tests.colorful_tests import ColorfulTestRunner from helpers.cache.cache_factory import CacheFactory from helpers.cache.cache_helper import CacheKeyMaker +from tests.colorful_tests import ColorfulTestRunner class CacheNoneTest(unittest.TestCase): - @staticmethod - def test_none_cache(): - # Initialize the cache - cache = CacheFactory.get_cache(cache_type='none') + @staticmethod + def test_none_cache(): + # Initialize the cache + cache = CacheFactory.get_cache(cache_type='none') - key_maker = CacheKeyMaker() + key_maker = CacheKeyMaker() - key = key_maker.make_custom_key( - custom_part=":".join(["ACR203", "FVS", "fire"]), - managed_sample={ - "fire": "20%", - "base": None - }, - ) + key = key_maker.make_custom_key( + custom_part=":".join(["ACR203", "FVS", "fire"]), + managed_sample={ + "fire": "20%", + "base": None + }, + ) - # Set data in the cache - cache.json_set( - key, - {"Fire": [2023, 2034, 3004]}, - ) + # Set data in the cache + cache.json_set( + key, + {"Fire": [2023, 2034, 3004]}, + ) - # Retrieve data from the cache - result = cache.json_get(key) + # Retrieve data from the cache + result = cache.json_get(key) - # Assert the retrieved data is correct - expected_result = None - assert (result == expected_result - ), f"Expected {expected_result}, but got {result}" + # Assert the retrieved data is correct + expected_result = None + assert (result == expected_result + ), f"Expected {expected_result}, but got {result}" if __name__ == "__main__": - unittest.main(testRunner=ColorfulTestRunner()) + unittest.main(testRunner=ColorfulTestRunner()) diff --git a/py/helpers/cache/tests/test_cache_redis.py b/py/helpers/cache/tests/test_cache_redis.py index 57c60e5..ee3c87c 100644 --- a/py/helpers/cache/tests/test_cache_redis.py +++ b/py/helpers/cache/tests/test_cache_redis.py @@ -1,48 +1,48 @@ import unittest -from tests.colorful_tests import ColorfulTestRunner -from helpers.cache.cache_redis import RedisCache from helpers.cache.cache_factory import CacheFactory +from helpers.cache.cache_redis import RedisCache +from tests.colorful_tests import ColorfulTestRunner class CacheRedisTest(unittest.TestCase): - def test_redis_cache(self): - # Configuration for the Redis cache - config = {'redis_host': 'localhost', 'redis_port': 1234, 'redis_db': 0} + def test_redis_cache(self): + # Configuration for the Redis cache + config = {'redis_host': 'localhost', 'redis_port': 1234, 'redis_db': 0} - # Initialize the Redis cache - cache = RedisCache(config=config) + # Initialize the Redis cache + cache = RedisCache(config=config) - self.assertIsNotNone( - cache.get_client(), - 'Cache client is not found , check your redis connection !!') + self.assertIsNotNone( + cache.get_client(), + 'Cache client is not found , check your redis connection !!') - # Set data in the Redis cache - cache.json_set("ACR203:2013:FVS:MANAGED:FIRE_0001011100", - {"Fire": [2023, 2034, 3004]}) + # Set data in the Redis cache + cache.json_set("ACR203:2013:FVS:MANAGED:FIRE_0001011100", + {"Fire": [2023, 2034, 3004]}) - # Retrieve data from the Redis cache - result = cache.json_get("ACR203:2013:FVS:MANAGED:FIRE_0001011100") + # Retrieve data from the Redis cache + result = cache.json_get("ACR203:2013:FVS:MANAGED:FIRE_0001011100") - # Assert the retrieved data is correct - expected_result = {"Fire": [2023, 2034, 3004]} - self.assertEqual(result, expected_result, - f"Expected {expected_result}, but got {result}") + # Assert the retrieved data is correct + expected_result = {"Fire": [2023, 2034, 3004]} + self.assertEqual(result, expected_result, + f"Expected {expected_result}, but got {result}") - def test_redis_via_factory(self): + def test_redis_via_factory(self): - client = CacheFactory.get_cache('redis', { - 'redis_host': 'localhost', - 'redis_port': 1234, - 'redis_db': 0 - }) - client.json_set('KEY', {'welcome': 'bhai'}) - self.assertTrue({'welcome': 'bhai'} == client.json_get('KEY')) - # with self.assertRaises(Exception) as context: - # client.json_set('KEY', {}) - # self.assertTrue('redis client not found , check connection !!' in str(context.exception)) + client = CacheFactory.get_cache('redis', { + 'redis_host': 'localhost', + 'redis_port': 1234, + 'redis_db': 0 + }) + client.json_set('KEY', {'welcome': 'bhai'}) + self.assertTrue({'welcome': 'bhai'} == client.json_get('KEY')) + # with self.assertRaises(Exception) as context: + # client.json_set('KEY', {}) + # self.assertTrue('redis client not found , check connection !!' in str(context.exception)) if __name__ == "__main__": - unittest.main(testRunner=ColorfulTestRunner()) + unittest.main(testRunner=ColorfulTestRunner()) diff --git a/py/helpers/logs/logs_handler.py b/py/helpers/logs/logs_handler.py index 0473866..f5ea7f8 100644 --- a/py/helpers/logs/logs_handler.py +++ b/py/helpers/logs/logs_handler.py @@ -7,21 +7,22 @@ # logging.warning("warning") # logging.error("error") - - - - import logging + from google.cloud import logging as cloud_logging # Set this to True for Cloud logging USE_CLOUD_LOGGING = False + class CustomAdapter(logging.LoggerAdapter): - def process(self, msg, kwargs): - # Include the extra context from the adapter into the log message - extra_info = ' | '.join(f'{key}: {value}' for key, value in self.extra.items()) - return f'{extra_info} | {msg}', kwargs + + def process(self, msg, kwargs): + # Include the extra context from the adapter into the log message + extra_info = ' | '.join( + f'{key}: {value}' for key, value in self.extra.items()) + return f'{extra_info} | {msg}', kwargs + if USE_CLOUD_LOGGING: logging_client = cloud_logging.Client() diff --git a/py/sight/attribute.py b/py/sight/attribute.py index b137083..919e5d0 100644 --- a/py/sight/attribute.py +++ b/py/sight/attribute.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Hierarchical attribute annotations in the sight log.""" from sight.sight import Sight diff --git a/py/sight/block.py b/py/sight/block.py index e6cf96c..7b00db3 100644 --- a/py/sight/block.py +++ b/py/sight/block.py @@ -15,6 +15,7 @@ import inspect from typing import Dict, Optional, Text + from helpers.logs.logs_handler import logger as logging from sight.exception import exception from sight.location import Location @@ -23,35 +24,34 @@ class Block(object): - """Encapsulates start and stop points where a Sight log block is active.""" - - def __init__(self, *args): - if len(args) == 2: - (label, sight) = args - self.create_label(label, sight) - return - if len(args) == 3 and isinstance(args[2], dict): - (label, sight, attributes) = args - self.create_label(label, sight, attributes) - return - - if len(args) == 3: - (key, value, sight) = args - self.create_label('%s=%s' % (key, value), sight, - {str(key): str(value)}) - return - (key, value, sight, attributes) = args - full_attributes = attributes.copy() - full_attributes[key] = value - self.create_label('%s=%s' % (key, value), sight, full_attributes) - - def create_label( - self, - label: str, - sight: Sight, - attributes: Optional[Dict[Text, Text]] = None, - ) -> Optional[Location]: - """Creates and enters a block with a given label and attributes. + """Encapsulates start and stop points where a Sight log block is active.""" + + def __init__(self, *args): + if len(args) == 2: + (label, sight) = args + self.create_label(label, sight) + return + if len(args) == 3 and isinstance(args[2], dict): + (label, sight, attributes) = args + self.create_label(label, sight, attributes) + return + + if len(args) == 3: + (key, value, sight) = args + self.create_label('%s=%s' % (key, value), sight, {str(key): str(value)}) + return + (key, value, sight, attributes) = args + full_attributes = attributes.copy() + full_attributes[key] = value + self.create_label('%s=%s' % (key, value), sight, full_attributes) + + def create_label( + self, + label: str, + sight: Sight, + attributes: Optional[Dict[Text, Text]] = None, + ) -> Optional[Location]: + """Creates and enters a block with a given label and attributes. Args: label: The label that identifies this block. @@ -62,50 +62,50 @@ def create_label( Returns: The starting location of this block. """ - self.sight = sight - if sight is None: - logging.info('<<< %s', label) - return None - - if not self.sight.is_logging_enabled(): - return None - - self.label = label - if attributes: - self.attributes = attributes - else: - self.attributes = dict() - for key in sorted(self.attributes.keys()): - self.sight.set_attribute(key, self.attributes.get(key)) - # pytype: disable=attribute-error - return self.sight.enter_block(self.label, sight_pb2.Object(), - inspect.currentframe().f_back.f_back) - # pytype: enable=attribute-error - - def __enter__(self): - return self - - def __exit__(self, exc_type, value, traceback): - if not self.sight: - return - - if not self.sight.is_logging_enabled(): - return - - if exc_type is not None: - # pytype: disable=attribute-error - exception(exc_type, value, traceback, self.sight, - inspect.currentframe().f_back) - # pytype: enable=attribute-error - - if self.sight is None: - logging.info('>>> %s', self.label) - return - - # pytype: disable=attribute-error - self.sight.exit_block(self.label, sight_pb2.Object(), - inspect.currentframe().f_back) - # pytype: enable=attribute-error - - for key in sorted(self.attributes.keys(), reverse=True): - self.sight.unset_attribute(key) + self.sight = sight + if sight is None: + logging.info('<<< %s', label) + return None + + if not self.sight.is_logging_enabled(): + return None + + self.label = label + if attributes: + self.attributes = attributes + else: + self.attributes = dict() + for key in sorted(self.attributes.keys()): + self.sight.set_attribute(key, self.attributes.get(key)) + # pytype: disable=attribute-error + return self.sight.enter_block(self.label, sight_pb2.Object(), + inspect.currentframe().f_back.f_back) + # pytype: enable=attribute-error + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + if not self.sight: + return + + if not self.sight.is_logging_enabled(): + return + + if exc_type is not None: + # pytype: disable=attribute-error + exception(exc_type, value, traceback, self.sight, + inspect.currentframe().f_back) + # pytype: enable=attribute-error + + if self.sight is None: + logging.info('>>> %s', self.label) + return + + # pytype: disable=attribute-error + self.sight.exit_block(self.label, sight_pb2.Object(), + inspect.currentframe().f_back) + # pytype: enable=attribute-error + + for key in sorted(self.attributes.keys(), reverse=True): + self.sight.unset_attribute(key) diff --git a/py/sight/data_structures.py b/py/sight/data_structures.py index 037fa99..86372d7 100644 --- a/py/sight/data_structures.py +++ b/py/sight/data_structures.py @@ -11,38 +11,42 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Logging for data structures and base types.""" import inspect +import math from typing import Any, List, Optional, Sequence +import warnings -import math import numpy as np import pandas as pd from sight.location import Location -# import tensorflow as tf - from sight.proto import sight_pb2 from sight.widgets.decision import decision from sight.widgets.numpy_sight import numpy_sight from sight.widgets.pandas_sight import pandas_sight + +# import tensorflow as tf + # from py.widgets.simulation import simulation_state # from py.widgets.tensorflow_sight import tensorflow_sight -import warnings # Suppress FutureWarning messages warnings.simplefilter(action='ignore', category=FutureWarning) import warnings + # Suppress FutureWarning messages warnings.simplefilter(action='ignore', category=FutureWarning) + class File: - def __init__(self, path: str, mime_type: str, binary: bool=True): + + def __init__(self, path: str, mime_type: str, binary: bool = True): self.path = path self.mime_type = mime_type + def sanitize_dict(d) -> dict: """ preprocess data which can't be handle by the AVRO file format. Args: @@ -65,9 +69,11 @@ def sanitize_dict(d) -> dict: sanitized[k] = v return sanitized -def log_var( - name: str, obj_to_log: Any, sight: Any, frame: Optional[Any] = None -) -> Optional[Location]: + +def log_var(name: str, + obj_to_log: Any, + sight: Any, + frame: Optional[Any] = None) -> Optional[Location]: """Documents a named Python object, if Sight is being used. Args: @@ -111,9 +117,9 @@ def log_var( sight.exit_block(name, end_obj) -def log( - obj_to_log: Any, sight: Any, frame: Optional[Any] = None -) -> Optional[Location]: +def log(obj_to_log: Any, + sight: Any, + frame: Optional[Any] = None) -> Optional[Location]: """Documents any Python object, if Sight is being used. Args: @@ -178,11 +184,8 @@ def log( #sight_obj.value.bytes_value = f.read() sight_obj.value.mime_type = obj_to_log.mime_type sight.log_object(sight_obj, True) - elif ( - isinstance(obj_to_log, list) - or isinstance(obj_to_log, tuple) - or isinstance(obj_to_log, set) - ): + elif (isinstance(obj_to_log, list) or isinstance(obj_to_log, tuple) or + isinstance(obj_to_log, set)): sight_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START sight_obj.block_start.sub_type = sight_pb2.BlockStart.ST_LIST sight_obj.block_start.list.sub_type = sight_pb2.ListStart.ST_HETEROGENEOUS @@ -200,15 +203,11 @@ def log( end_obj = sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_BLOCK_END) sight.exit_block(label, end_obj) - elif ( - isinstance(obj_to_log, np.int64) - or isinstance(obj_to_log, np.float64) - or isinstance(obj_to_log, bool) - ): + elif (isinstance(obj_to_log, np.int64) or + isinstance(obj_to_log, np.float64) or isinstance(obj_to_log, bool)): numpy_sight.log('np scalar', obj_to_log, sight, frame) elif isinstance(obj_to_log, np.ndarray) or isinstance( - obj_to_log, numpy_sight.LabeledNpArray - ): + obj_to_log, numpy_sight.LabeledNpArray): numpy_sight.log('np array', obj_to_log, sight, frame) # elif isinstance(obj_to_log, tf.Tensor): # tensorflow_sight.log('TensorFlow Tensor', obj_to_log, sight, frame) @@ -216,7 +215,7 @@ def log( pandas_sight.log('pandas.DataFrame', obj_to_log, sight, frame) elif isinstance(obj_to_log, dict) or hasattr(obj_to_log, '__dict__'): if isinstance(obj_to_log, dict): - if(sight.params.file_format == '.avro'): + if (sight.params.file_format == '.avro'): # need to handle inf/NaN values in dictionary for avro obj_dict = sanitize_dict(obj_to_log) else: @@ -243,8 +242,7 @@ def log( item_end_obj = sight_pb2.Object( sub_type=sight_pb2.Object.SubType.ST_BLOCK_END, block_start=sight_pb2.BlockStart( - sub_type=sight_pb2.BlockStart.ST_LIST - ), + sub_type=sight_pb2.BlockStart.ST_LIST), ) sight.exit_block('map_entry', item_end_obj) @@ -262,8 +260,7 @@ def log( def get_full_sublog_of_first_element( - log_segment: Sequence[sight_pb2.Object], -) -> List[sight_pb2.Object]: + log_segment: Sequence[sight_pb2.Object],) -> List[sight_pb2.Object]: """Returns the section of an ordered log that corresponds to the first element. If the first element is a BlockStart, this function searches the log for the @@ -288,7 +285,7 @@ def get_full_sublog_of_first_element( elif log_segment[j].sub_type == sight_pb2.Object.ST_BLOCK_END: depth -= 1 if depth == 0: - return list(log_segment[0 : j + 1]) + return list(log_segment[0:j + 1]) return list(log_segment[0:1]) @@ -319,16 +316,14 @@ def from_ordered_log(log_segment: List[sight_pb2.Object]) -> Any: if start.value.sub_type == sight_pb2.Value.ST_NONE: return None elif start.sub_type == sight_pb2.Object.SubType.ST_BLOCK_START: - sub_log = log_segment[1 : len(log_segment) - 1] + sub_log = log_segment[1:len(log_segment) - 1] if start.block_start.sub_type == sight_pb2.BlockStart.ST_LIST: - if ( - start.block_start.list.sub_type - == sight_pb2.ListStart.ST_HETEROGENEOUS - ): + if (start.block_start.list.sub_type == + sight_pb2.ListStart.ST_HETEROGENEOUS): list_objects = [] i = 0 while i < len(sub_log): - cur = get_full_sublog_of_first_element(sub_log[i : len(sub_log)]) + cur = get_full_sublog_of_first_element(sub_log[i:len(sub_log)]) list_objects.append(from_ordered_log(cur)) i += len(cur) @@ -339,20 +334,16 @@ def from_ordered_log(log_segment: List[sight_pb2.Object]) -> Any: elif start.block_start.label == 'set': return set(list_objects) elif start.block_start.list.sub_type == sight_pb2.ListStart.ST_MAP_ENTRY: - key_sub_log = get_full_sublog_of_first_element( - sub_log[0 : len(sub_log)] - ) + key_sub_log = get_full_sublog_of_first_element(sub_log[0:len(sub_log)]) value_sub_log = get_full_sublog_of_first_element( - sub_log[len(key_sub_log) : len(sub_log)] - ) + sub_log[len(key_sub_log):len(sub_log)]) return (from_ordered_log(key_sub_log), from_ordered_log(value_sub_log)) elif start.block_start.list.sub_type == sight_pb2.ListStart.ST_MAP: map_object = {} i = 0 while i < len(sub_log) - 1: key_value_sub_log = get_full_sublog_of_first_element( - sub_log[i : len(sub_log)] - ) + sub_log[i:len(sub_log)]) (key, value) = from_ordered_log(key_value_sub_log) map_object[key] = value i += len(key_value_sub_log) diff --git a/py/sight/data_structures_test.py b/py/sight/data_structures_test.py index a17e0eb..873e38e 100644 --- a/py/sight/data_structures_test.py +++ b/py/sight/data_structures_test.py @@ -11,19 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Tests for py.sight.""" import inspect from absl.testing import absltest +from google3.analysis.dremel.core.capacitor.public.python import ( + pywrap_record_reader +) import numpy as np from sight.proto import sight_pb2 from sight.py import data_structures from sight.sight import Sight -from google3.analysis.dremel.core.capacitor.public.python import pywrap_record_reader - class DataStructuresTest(absltest.TestCase): @@ -31,8 +31,7 @@ class DataStructuresTest(absltest.TestCase): def _read_capacitor_file(file_path: str): protos = [] record_reader = pywrap_record_reader.RecordReader.CreateFromPath( - file_path, ['*'], 60.0 - ) + file_path, ['*'], 60.0) protos.extend(record_reader.IterRecords()) return sorted(protos, key=lambda x: x.index) @@ -56,9 +55,8 @@ def testBaseTypeLogging(self): data_structures.log(None, sight, frame) # ASSERT - actual_log = self._read_capacitor_file( - params.log_dir_path + '/testBaseTypeLogging.capacitor' - ) + actual_log = self._read_capacitor_file(params.log_dir_path + + '/testBaseTypeLogging.capacitor') self.assertEqual('abc', data_structures.from_log([actual_log[0]])) self.assertEqual(b'xyz', data_structures.from_log([actual_log[1]])) @@ -84,9 +82,8 @@ def testSequenceLogging(self): data_structures.log({'abc', b'xyz', 5, 2.5, True, None}, sight, frame) # ASSERT - actual_log = self._read_capacitor_file( - params.log_dir_path + '/testSequenceLogging.capacitor' - ) + actual_log = self._read_capacitor_file(params.log_dir_path + + '/testSequenceLogging.capacitor') self.assertEqual( ['abc', b'xyz', 5, 2.5, True, None], @@ -116,12 +113,15 @@ def testDictLogging(self): data_structures.log({'abc': b'xyz', 5: 2.5, True: None}, sight, frame) # ASSERT - actual_log = self._read_capacitor_file( - params.log_dir_path + '/testDictLogging.capacitor' - ) + actual_log = self._read_capacitor_file(params.log_dir_path + + '/testDictLogging.capacitor') self.assertEqual( - {'abc': b'xyz', 5: 2.5, True: None}, + { + 'abc': b'xyz', + 5: 2.5, + True: None + }, data_structures.from_log(actual_log), ) @@ -137,17 +137,22 @@ def testNamedValueDictLogging(self): # ACT frame = inspect.currentframe() with Sight(params) as sight: - data_structures.log_var( - 'map', {'abc': b'xyz', 5: 2.5, True: None}, sight, frame - ) + data_structures.log_var('map', { + 'abc': b'xyz', + 5: 2.5, + True: None + }, sight, frame) # ASSERT actual_log = self._read_capacitor_file( - params.log_dir_path + '/testNamedValueDictLogging.capacitor' - ) + params.log_dir_path + '/testNamedValueDictLogging.capacitor') self.assertEqual( - ('map', {'abc': b'xyz', 5: 2.5, True: None}), + ('map', { + 'abc': b'xyz', + 5: 2.5, + True: None + }), data_structures.from_log(actual_log), ) @@ -166,13 +171,11 @@ def testTensorInt64Logging(self): data_structures.log(np.array([[1, 2], [3, 4], [5, 6]]), sight, frame) # ASSERT - actual_log = self._read_capacitor_file( - params.log_dir_path + '/testTensorInt64Logging.capacitor' - ) + actual_log = self._read_capacitor_file(params.log_dir_path + + '/testTensorInt64Logging.capacitor') - np.testing.assert_array_almost_equal( - np.array([[1, 2], [3, 4], [5, 6]]), data_structures.from_log(actual_log) - ) + np.testing.assert_array_almost_equal(np.array([[1, 2], [3, 4], [5, 6]]), + data_structures.from_log(actual_log)) def testTensorDoubleLogging(self): # SETUP @@ -186,14 +189,12 @@ def testTensorDoubleLogging(self): # ACT frame = inspect.currentframe() with Sight(params) as sight: - data_structures.log( - np.array([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6]]), sight, frame - ) + data_structures.log(np.array([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6]]), sight, + frame) # ASSERT - actual_log = self._read_capacitor_file( - params.log_dir_path + '/testTensorDoubleLogging.capacitor' - ) + actual_log = self._read_capacitor_file(params.log_dir_path + + '/testTensorDoubleLogging.capacitor') np.testing.assert_array_almost_equal( np.array([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6]]), diff --git a/py/sight/demo/async_demo.py b/py/sight/demo/async_demo.py index ae50331..099bc7a 100644 --- a/py/sight/demo/async_demo.py +++ b/py/sight/demo/async_demo.py @@ -1,4 +1,3 @@ - # Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,13 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo for the python bindings to the Sight logging library.""" +import asyncio import os +from typing import Sequence + from absl import app from absl import flags -import asyncio import numpy as np import pandas as pd from sight import data_structures @@ -26,7 +26,6 @@ from sight.block import Block from sight.proto import sight_pb2 from sight.sight import Sight -from typing import Sequence FLAGS = flags.FLAGS @@ -46,16 +45,18 @@ def main(argv): async def coroutine_one(sight) -> None: - with Block("1", sight): - print('1 sight.location=', sight.location.get()) - await asyncio.sleep(2) - sight.text('Coroutine one finished') + with Block("1", sight): + print('1 sight.location=', sight.location.get()) + await asyncio.sleep(2) + sight.text('Coroutine one finished') + async def coroutine_two(sight) -> None: - with Block("2", sight): - print('2 sight.location=', sight.location.get()) - await asyncio.sleep(1) - sight.text('Coroutine two finished') + with Block("2", sight): + print('2 sight.location=', sight.location.get()) + await asyncio.sleep(1) + sight.text('Coroutine two finished') + async def main(argv: Sequence[str]) -> None: with get_sight_instance() as sight: @@ -68,8 +69,10 @@ async def main(argv: Sequence[str]) -> None: await task1 await task2 + def main_wrapper(argv: Sequence[str]): asyncio.run(main(argv)) + if __name__ == "__main__": app.run(main_wrapper) diff --git a/py/sight/demo/asyncq.py b/py/sight/demo/asyncq.py index 669a407..5a63ba8 100644 --- a/py/sight/demo/asyncq.py +++ b/py/sight/demo/asyncq.py @@ -1,32 +1,34 @@ -from absl import app -from absl import flags import asyncio import itertools as it import os +import random +import time +from typing import Sequence + +from absl import app +from absl import flags from sight import data_structures from sight.attribute import Attribute from sight.block import Block from sight.proto import sight_pb2 from sight.sight import Sight -import random -import time -from typing import Sequence - _NPROD = flags.DEFINE_integer('nprod', 1, 'Number of producers.') _NCON = flags.DEFINE_integer('ncon', 1, 'Number of consumers.') async def makeitem(size: int = 5) -> str: - return os.urandom(size).hex() + return os.urandom(size).hex() + async def randsleep(caller, sight: Sight) -> None: # i = random.randint(0, 10) - i=.1 + i = .1 if caller: sight.text(f"{caller} sleeping for {i} seconds.") await asyncio.sleep(i) + async def produce(name: int, q: asyncio.Queue, sight: Sight) -> None: n = random.randint(0, 10) # n=5 @@ -40,6 +42,7 @@ async def produce(name: int, q: asyncio.Queue, sight: Sight) -> None: sight.text(f"Producer {name} added <{i}> to queue.") print(f"Producer {name} added <{i}> to queue.") + async def consume(name: int, q: asyncio.Queue, sight: Sight) -> None: with Block('consumer', name, sight): while True: @@ -47,11 +50,12 @@ async def consume(name: int, q: asyncio.Queue, sight: Sight) -> None: i, t = await q.get() now = time.perf_counter() sight.text(f"Consumer {name} got element <{i}>" - f" in {now-t:0.5f} seconds.") + f" in {now-t:0.5f} seconds.") print(f"Consumer {name} got element <{i}>" f" in {now-t:0.5f} seconds.") q.task_done() + def get_sight_instance(): params = sight_pb2.Params( label='demo file', @@ -60,13 +64,18 @@ def get_sight_instance(): sight_obj = Sight(params) return sight_obj + async def main(argv: Sequence[str]) -> None: with get_sight_instance() as sight: start = time.perf_counter() q = asyncio.Queue() - producers = [sight.create_task(produce(n, q, sight)) for n in range(_NPROD.value)] - consumers = [sight.create_task(consume(n, q, sight)) for n in range(_NCON.value)] + producers = [ + sight.create_task(produce(n, q, sight)) for n in range(_NPROD.value) + ] + consumers = [ + sight.create_task(consume(n, q, sight)) for n in range(_NCON.value) + ] await asyncio.gather(*producers) await q.join() # Implicitly awaits consumers, too for c in consumers: @@ -75,8 +84,10 @@ async def main(argv: Sequence[str]) -> None: elapsed = time.perf_counter() - start sight.text(f"Program completed in {elapsed:0.5f} seconds.") + def main_wrapper(argv: Sequence[str]): asyncio.run(main(argv)) + if __name__ == "__main__": app.run(main_wrapper) diff --git a/py/sight/demo/cartpole/demo_cartpole.py b/py/sight/demo/cartpole/demo_cartpole.py index e0e7e7b..53fb7eb 100644 --- a/py/sight/demo/cartpole/demo_cartpole.py +++ b/py/sight/demo/cartpole/demo_cartpole.py @@ -17,85 +17,84 @@ def warn(*args, **kwargs): - pass + pass warnings.warn = warn import os -import gym -from helpers.logs.logs_handler import logger as logging - -import numpy as np from typing import Sequence + from absl import app from absl import flags from acme import wrappers - +import gym +from helpers.logs.logs_handler import logger as logging +import numpy as np +from sight.demo.cartpole.driver_cartpole import driver_fn from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision -from sight.demo.cartpole.driver_cartpole import driver_fn FLAGS = flags.FLAGS def get_sight_instance(): - params = sight_pb2.Params( - label='cartpole_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='cartpole_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - # decision.run(sight=sight, env=wrappers.GymWrapper(gym.make("CartPole-v1"))) - decision.run( - state_attrs={ - "Cart Position": + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + # decision.run(sight=sight, env=wrappers.GymWrapper(gym.make("CartPole-v1"))) + decision.run( + state_attrs={ + "Cart Position": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=-4.8, max_value=4.8, ), - "Cart Velocity": + "Cart Velocity": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=-3.40, max_value=3.40, ), - "Pole Angle": + "Pole Angle": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=-0.418, max_value=0.418, ), - "Pole Angular Velocity": + "Pole Angular Velocity": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=-3.40, max_value=3.40, ), - }, - action_attrs={ - # "direction": sight_pb2.DecisionConfigurationStart.AttrProps( - # valid_int_values=[0,1], - # ), - "direction": + }, + action_attrs={ + # "direction": sight_pb2.DecisionConfigurationStart.AttrProps( + # valid_int_values=[0,1], + # ), + "direction": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=1, # step_size=1 ), - }, - # env=wrappers.GymWrapper(gym.make('CartPole-v1')), - driver_fn=driver_fn, - sight=sight, - ) + }, + # env=wrappers.GymWrapper(gym.make('CartPole-v1')), + driver_fn=driver_fn, + sight=sight, + ) if __name__ == "__main__": - # logging.basicConfig(level=logging.DEBUG, ) - # print(logging.getLogger(__name__)) - app.run(main) + # logging.basicConfig(level=logging.DEBUG, ) + # print(logging.getLogger(__name__)) + app.run(main) diff --git a/py/sight/demo/cartpole/demo_pendulum.py b/py/sight/demo/cartpole/demo_pendulum.py index 618f2e6..caa6acc 100644 --- a/py/sight/demo/cartpole/demo_pendulum.py +++ b/py/sight/demo/cartpole/demo_pendulum.py @@ -17,76 +17,75 @@ def warn(*args, **kwargs): - pass + pass warnings.warn = warn import os -import gym -from helpers.logs.logs_handler import logger as logging - -import numpy as np from typing import Sequence + from absl import app from absl import flags from acme import wrappers - +import gym +from helpers.logs.logs_handler import logger as logging +import numpy as np +from sight.demo.cartpole.driver_pendulum import driver_fn from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision -from sight.demo.cartpole.driver_pendulum import driver_fn FLAGS = flags.FLAGS def get_sight_instance(): - params = sight_pb2.Params( - label='cartpole_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='cartpole_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - # decision.run(sight=sight, env=wrappers.GymWrapper(gym.make("CartPole-v1"))) - decision.run( - state_attrs={ - "x": + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + # decision.run(sight=sight, env=wrappers.GymWrapper(gym.make("CartPole-v1"))) + decision.run( + state_attrs={ + "x": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=-1.0, max_value=1.0, ), - "y": + "y": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=-1.0, max_value=1.0, ), - "Angular Velocity": + "Angular Velocity": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=-8.0, max_value=8.0, ), - }, - action_attrs={ - "Torque": + }, + action_attrs={ + "Torque": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=-2.0, max_value=2.0, # step_size=1 ), - }, - driver_fn=driver_fn, - sight=sight, - ) + }, + driver_fn=driver_fn, + sight=sight, + ) if __name__ == "__main__": - # logging.basicConfig(level=logging.DEBUG, ) - # print(logging.getLogger(__name__)) - app.run(main) + # logging.basicConfig(level=logging.DEBUG, ) + # print(logging.getLogger(__name__)) + app.run(main) diff --git a/py/sight/demo/cartpole/driver_cartpole.py b/py/sight/demo/cartpole/driver_cartpole.py index af13f99..ccecf62 100644 --- a/py/sight/demo/cartpole/driver_cartpole.py +++ b/py/sight/demo/cartpole/driver_cartpole.py @@ -13,17 +13,17 @@ # limitations under the License. """Default Driver function to be used while training within the Sight log.""" -from helpers.logs.logs_handler import logger as logging +import math +from acme import specs +from acme import wrappers +import dm_env import gym +from gym.utils import seeding +from helpers.logs.logs_handler import logger as logging import numpy as np -import dm_env -import math -from acme import wrappers from sight import data_structures from sight.widgets.decision import decision -from gym.utils import seeding -from acme import specs import tree _file_name = "driver.py" @@ -49,97 +49,95 @@ def reset(): - global reset_next_step - global state - reset_next_step = False - np_random, seed = seeding.np_random() - low = -0.05 - high = 0.05 - state = np_random.uniform(low=low, high=high, size=(4, )) - observation = np.array(state, dtype=np.float32) - return dm_env.restart(observation) + global reset_next_step + global state + reset_next_step = False + np_random, seed = seeding.np_random() + low = -0.05 + high = 0.05 + state = np_random.uniform(low=low, high=high, size=(4,)) + observation = np.array(state, dtype=np.float32) + return dm_env.restart(observation) def step(action): - global reset_next_step - global state - if reset_next_step: - return reset() - - # observation, reward, done, info = self._environment.step(action) - - x, x_dot, theta, theta_dot = state - force = force_mag if action == 1 else -force_mag - costheta = math.cos(theta) - sintheta = math.sin(theta) - - temp = (force + polemass_length * theta_dot**2 * sintheta) / total_mass - thetaacc = (gravity * sintheta - costheta * temp) / ( - length * (4.0 / 3.0 - masspole * costheta**2 / total_mass)) - xacc = temp - polemass_length * thetaacc * costheta / total_mass - - x = x + tau * x_dot - x_dot = x_dot + tau * xacc - theta = theta + tau * theta_dot - theta_dot = theta_dot + tau * thetaacc - state = (x, x_dot, theta, theta_dot) - - terminated = bool(x < -x_threshold or x > x_threshold - or theta < -theta_threshold_radians - or theta > theta_threshold_radians) - if not terminated: - reward = 1.0 - else: - reward = 0.0 - - observation, reward, done, info = np.array( - state, dtype=np.float32), reward, terminated, {} - reset_next_step = done - - # Convert the type of the reward based on the spec, respecting the scalar or - # array property. - reward = tree.map_structure( - lambda x, t: ( # pylint: disable=g-long-lambda - t.dtype.type(x) - if np.isscalar(x) else np.asarray(x, dtype=t.dtype)), - reward, - specs.Array(shape=(), dtype=float, name='reward')) - - if done: - truncated = info.get('TimeLimit.truncated', False) - if truncated: - return dm_env.truncation(reward, observation) - return dm_env.termination(reward, observation) - return dm_env.transition(reward, observation) + global reset_next_step + global state + if reset_next_step: + return reset() + + # observation, reward, done, info = self._environment.step(action) + + x, x_dot, theta, theta_dot = state + force = force_mag if action == 1 else -force_mag + costheta = math.cos(theta) + sintheta = math.sin(theta) + + temp = (force + polemass_length * theta_dot**2 * sintheta) / total_mass + thetaacc = (gravity * sintheta - costheta * temp) / ( + length * (4.0 / 3.0 - masspole * costheta**2 / total_mass)) + xacc = temp - polemass_length * thetaacc * costheta / total_mass + + x = x + tau * x_dot + x_dot = x_dot + tau * xacc + theta = theta + tau * theta_dot + theta_dot = theta_dot + tau * thetaacc + state = (x, x_dot, theta, theta_dot) + + terminated = bool(x < -x_threshold or x > x_threshold or + theta < -theta_threshold_radians or + theta > theta_threshold_radians) + if not terminated: + reward = 1.0 + else: + reward = 0.0 + + observation, reward, done, info = np.array( + state, dtype=np.float32), reward, terminated, {} + reset_next_step = done + + # Convert the type of the reward based on the spec, respecting the scalar or + # array property. + reward = tree.map_structure( + lambda x, t: ( # pylint: disable=g-long-lambda + t.dtype.type(x) if np.isscalar(x) else np.asarray(x, dtype=t.dtype)), + reward, + specs.Array(shape=(), dtype=float, name='reward')) + + if done: + truncated = info.get('TimeLimit.truncated', False) + if truncated: + return dm_env.truncation(reward, observation) + return dm_env.termination(reward, observation) + return dm_env.transition(reward, observation) def driver_fn(sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: env: The dm_env type env obcject used to call the reset and step methods. sight: The Sight logger object used to drive decisions. """ - method_name = 'driver_fn' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + method_name = 'driver_fn' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + + timestep = reset() + + state_attrs = decision.get_state_attrs(sight) + for i in range(len(state_attrs)): + data_structures.log_var(state_attrs[i], timestep.observation[i], sight) - timestep = reset() + while not timestep.last(): + chosen_action = decision.decision_point("DP_label", sight) + timestep = step(chosen_action) - state_attrs = decision.get_state_attrs(sight) for i in range(len(state_attrs)): - data_structures.log_var(state_attrs[i], timestep.observation[i], sight) - - while not timestep.last(): - chosen_action = decision.decision_point("DP_label", sight) - timestep = step(chosen_action) - - for i in range(len(state_attrs)): - data_structures.log_var(state_attrs[i], timestep.observation[i], - sight) - - decision.decision_outcome( - "DO_label", - timestep.reward, - sight, - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + data_structures.log_var(state_attrs[i], timestep.observation[i], sight) + + decision.decision_outcome( + "DO_label", + timestep.reward, + sight, + ) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/py/sight/demo/cartpole/driver_pendulum.py b/py/sight/demo/cartpole/driver_pendulum.py index 44788cc..f82dbc6 100644 --- a/py/sight/demo/cartpole/driver_pendulum.py +++ b/py/sight/demo/cartpole/driver_pendulum.py @@ -13,17 +13,17 @@ # limitations under the License. """Default Driver function to be used while training within the Sight log.""" -from helpers.logs.logs_handler import logger as logging +import math +from acme import specs +from acme import wrappers +import dm_env import gym +from gym.utils import seeding +from helpers.logs.logs_handler import logger as logging import numpy as np -import dm_env -import math -from acme import wrappers from sight import data_structures from sight.widgets.decision import decision -from gym.utils import seeding -from acme import specs import tree _file_name = "driver.py" @@ -45,100 +45,97 @@ def reset(): - global reset_next_step - global state - global elapsed_steps - elapsed_steps = 0 - reset_next_step = False - np_random, seed = seeding.np_random() - high = np.array([DEFAULT_X, DEFAULT_Y]) - low = -high - state = np_random.uniform(low=low, high=high) - theta, thetadot = state - observation = np.array( - [np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) - return dm_env.restart(observation) + global reset_next_step + global state + global elapsed_steps + elapsed_steps = 0 + reset_next_step = False + np_random, seed = seeding.np_random() + high = np.array([DEFAULT_X, DEFAULT_Y]) + low = -high + state = np_random.uniform(low=low, high=high) + theta, thetadot = state + observation = np.array([np.cos(theta), np.sin(theta), thetadot], + dtype=np.float32) + return dm_env.restart(observation) def step(action): - # from gym_wrapper.py - global reset_next_step - global state - global elapsed_steps - # print("State : ", state) - if reset_next_step: - return reset() - - # step of pendulum - th, thdot = state - u = np.clip(action, -max_torque, max_torque)[0] - costs = angle_normalize(th)**2 + 0.1 * thdot**2 + 0.001 * (u**2) - - newthdot = thdot + (3 * g / (2 * l) * np.sin(th) + 3.0 / - (m * l**2) * u) * dt - newthdot = np.clip(newthdot, -max_speed, max_speed) - newth = th + newthdot * dt - - state = np.array([newth, newthdot]) - theta, thetadot = state - latest_state = np.array( - [np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) - - observation, reward, done, info = latest_state, -costs, False, {} - elapsed_steps += 1 - if elapsed_steps >= max_episode_steps: - done = True - - # from gym_wrapper.py - reset_next_step = done - # Convert the type of the reward based on the spec, respecting the scalar or - # array property. - reward = tree.map_structure( - lambda x, t: ( # pylint: disable=g-long-lambda - t.dtype.type(x) - if np.isscalar(x) else np.asarray(x, dtype=t.dtype)), - reward, - specs.Array(shape=(), dtype=float, name='reward')) - - if done: - truncated = info.get('TimeLimit.truncated', False) - if truncated: - return dm_env.truncation(reward, observation) - return dm_env.termination(reward, observation) - return dm_env.transition(reward, observation) + # from gym_wrapper.py + global reset_next_step + global state + global elapsed_steps + # print("State : ", state) + if reset_next_step: + return reset() + + # step of pendulum + th, thdot = state + u = np.clip(action, -max_torque, max_torque)[0] + costs = angle_normalize(th)**2 + 0.1 * thdot**2 + 0.001 * (u**2) + + newthdot = thdot + (3 * g / (2 * l) * np.sin(th) + 3.0 / (m * l**2) * u) * dt + newthdot = np.clip(newthdot, -max_speed, max_speed) + newth = th + newthdot * dt + + state = np.array([newth, newthdot]) + theta, thetadot = state + latest_state = np.array( + [np.cos(theta), np.sin(theta), thetadot], dtype=np.float32) + + observation, reward, done, info = latest_state, -costs, False, {} + elapsed_steps += 1 + if elapsed_steps >= max_episode_steps: + done = True + + # from gym_wrapper.py + reset_next_step = done + # Convert the type of the reward based on the spec, respecting the scalar or + # array property. + reward = tree.map_structure( + lambda x, t: ( # pylint: disable=g-long-lambda + t.dtype.type(x) if np.isscalar(x) else np.asarray(x, dtype=t.dtype)), + reward, + specs.Array(shape=(), dtype=float, name='reward')) + + if done: + truncated = info.get('TimeLimit.truncated', False) + if truncated: + return dm_env.truncation(reward, observation) + return dm_env.termination(reward, observation) + return dm_env.transition(reward, observation) def angle_normalize(x): - return ((x + np.pi) % (2 * np.pi)) - np.pi + return ((x + np.pi) % (2 * np.pi)) - np.pi def driver_fn(sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: env: The dm_env type env obcject used to call the reset and step methods. sight: The Sight logger object used to drive decisions. """ - method_name = 'driver_fn' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + method_name = 'driver_fn' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + + timestep = reset() + + state_attrs = decision.get_state_attrs(sight) + for i in range(len(state_attrs)): + data_structures.log_var(state_attrs[i], timestep.observation[i], sight) - timestep = reset() + while not timestep.last(): + chosen_action = decision.decision_point("DP_label", sight) + timestep = step(chosen_action) - state_attrs = decision.get_state_attrs(sight) for i in range(len(state_attrs)): - data_structures.log_var(state_attrs[i], timestep.observation[i], sight) - - while not timestep.last(): - chosen_action = decision.decision_point("DP_label", sight) - timestep = step(chosen_action) - - for i in range(len(state_attrs)): - data_structures.log_var(state_attrs[i], timestep.observation[i], - sight) - - decision.decision_outcome( - "DO_label", - timestep.reward, - sight, - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + data_structures.log_var(state_attrs[i], timestep.observation[i], sight) + + decision.decision_outcome( + "DO_label", + timestep.reward, + sight, + ) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/py/sight/demo/cartpole/gym_demo.py b/py/sight/demo/cartpole/gym_demo.py index 5899d15..caface0 100644 --- a/py/sight/demo/cartpole/gym_demo.py +++ b/py/sight/demo/cartpole/gym_demo.py @@ -17,48 +17,47 @@ def warn(*args, **kwargs): - pass + pass warnings.warn = warn import os -import gym -from helpers.logs.logs_handler import logger as logging - -# from helpers.logs.logs_handler import logger as logging - from typing import Sequence + from absl import app from absl import flags from acme import wrappers - +import gym +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision +# from helpers.logs.logs_handler import logger as logging + FLAGS = flags.FLAGS def get_sight_instance(): - params = sight_pb2.Params( - label='gym_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='gym_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - with get_sight_instance() as sight: - decision.run(sight=sight, - env=wrappers.GymWrapper(gym.make(flags.FLAGS.env_name))) + with get_sight_instance() as sight: + decision.run(sight=sight, + env=wrappers.GymWrapper(gym.make(flags.FLAGS.env_name))) if __name__ == "__main__": - # logging.basicConfig(level=logging.DEBUG, ) - # print(logging.getLogger(__name__)) - app.run(main) + # logging.basicConfig(level=logging.DEBUG, ) + # print(logging.getLogger(__name__)) + app.run(main) diff --git a/py/sight/demo/demo.py b/py/sight/demo/demo.py index 7a2a4f9..9836a7f 100644 --- a/py/sight/demo/demo.py +++ b/py/sight/demo/demo.py @@ -11,11 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo for the python bindings to the Sight logging library.""" -import os import inspect +import os + from absl import app from absl import flags import numpy as np @@ -44,15 +44,15 @@ def main(argv): with get_sight_instance() as sight: # if(FLAGS.parent_id): - # sight_obj = sight_pb2.Object() - # # sight_obj.log_uid = str(sight.id) - # # sight_obj.set_attribute('log_uid', sight.id) - # sight_obj.sub_type = sight_pb2.Object.SubType.ST_LINK - # sight_obj.link.linked_sight_id = FLAGS.parent_id - # sight_obj.link.link_type = sight_pb2.Link.LinkType.LT_CHILD_TO_PARENT - # frame = inspect.currentframe().f_back.f_back.f_back - # sight.set_object_code_loc(sight_obj, frame) - # sight.log_object(sight_obj, True) + # sight_obj = sight_pb2.Object() + # # sight_obj.log_uid = str(sight.id) + # # sight_obj.set_attribute('log_uid', sight.id) + # sight_obj.sub_type = sight_pb2.Object.SubType.ST_LINK + # sight_obj.link.linked_sight_id = FLAGS.parent_id + # sight_obj.link.link_type = sight_pb2.Link.LinkType.LT_CHILD_TO_PARENT + # frame = inspect.currentframe().f_back.f_back.f_back + # sight.set_object_code_loc(sight_obj, frame) + # sight.log_object(sight_obj, True) with Block("A-block", sight): sight.text("A preText") diff --git a/py/sight/demo/dummy.py b/py/sight/demo/dummy.py index cf0614e..06f8da1 100644 --- a/py/sight/demo/dummy.py +++ b/py/sight/demo/dummy.py @@ -2,26 +2,28 @@ def warn(*args, **kwargs): - pass + pass warnings.warn = warn -import os import json +import os + from absl import app from absl import flags -from sight.proto import sight_pb2 -from sight.sight import Sight from sight import service_utils as service from sight.proto import sight_pb2 -from sight_service.proto import service_pb2 +from sight.sight import Sight from sight.widgets.decision import decision +from sight_service.proto import service_pb2 FLAGS = flags.FLAGS + # Define the black box function to optimize. def black_box_function(args): - return sum(xi**2 for xi in args) + return sum(xi**2 for xi in args) + def driver(sight: Sight) -> None: """Executes the logic of searching for a value. @@ -33,10 +35,11 @@ def driver(sight: Sight) -> None: next_point = decision.decision_point("label", sight) reward = black_box_function(list(next_point.values())) - outcome = {'sum' : 30, 'avg' : 10} + outcome = {'sum': 30, 'avg': 10} decision.decision_outcome(json.dumps(next_point), sight, reward, outcome) + def get_sight_instance(): print('creating sight object') params = sight_pb2.Params( @@ -52,42 +55,57 @@ def main(argv): raise app.UsageError("Too many command-line arguments.") with get_sight_instance() as sight: - decision.run( - driver_fn=driver, - state_attrs={ - 'state_1': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=100, - ), - 'state_2': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=100, - ), - 'state_3': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=100, - ) - }, - action_attrs={ - 'action_1': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=10, - ), - 'action_2': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=10, - ), - 'action_3': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=10, - ) - }, - sight=sight, - outcome_attrs={ - 'sum': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=10000, - description='The sum of choosen action params.', - ), - 'avg': sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, max_value=100, - description='The avg of choosen action params.', - ) - } - ) + decision.run(driver_fn=driver, + state_attrs={ + 'state_1': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=100, + ), + 'state_2': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=100, + ), + 'state_3': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=100, + ) + }, + action_attrs={ + 'action_1': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=10, + ), + 'action_2': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=10, + ), + 'action_3': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=10, + ) + }, + sight=sight, + outcome_attrs={ + 'sum': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=10000, + description='The sum of choosen action params.', + ), + 'avg': + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=100, + description='The avg of choosen action params.', + ) + }) + if __name__ == "__main__": app.run(main) diff --git a/py/sight/demo/fn_sphere.py b/py/sight/demo/fn_sphere.py index 8f904a9..f686b93 100644 --- a/py/sight/demo/fn_sphere.py +++ b/py/sight/demo/fn_sphere.py @@ -21,26 +21,26 @@ def warn(*args, **kwargs): warnings.warn = warn +import inspect +import json import os import random from typing import Sequence from absl import app from absl import flags +import numpy as np from sight import data_structures from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision -import numpy as np -import json -import inspect FLAGS = flags.FLAGS # Define the black box function to optimize. def black_box_function(args): - return sum(xi**2 for xi in args) + return sum(xi**2 for xi in args) def driver(sight: Sight) -> None: @@ -51,53 +51,53 @@ def driver(sight: Sight) -> None: """ for _ in range(1): - next_point = decision.decision_point("label", sight) - print('next_point : ', next_point) - reward = black_box_function(list(next_point.values())) - print('reward : ', reward) - decision.decision_outcome(json.dumps(next_point), sight, reward) + next_point = decision.decision_point("label", sight) + print('next_point : ', next_point) + reward = black_box_function(list(next_point.values())) + print('reward : ', reward) + decision.decision_outcome(json.dumps(next_point), sight, reward) def get_sight_instance(): - params = sight_pb2.Params( - label='benchmark_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='benchmark_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - num_attributes = 10 - attr_range = (0, 5) - action_attrs={} - for i in range(num_attributes): - key = f"{i}" # Generate unique keys - action_attrs[key] = sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=attr_range[0], - max_value=attr_range[1], - ) - - with get_sight_instance() as sight: - - if(FLAGS.parent_id): - sight_obj = sight_pb2.Object() - sight_obj.sub_type = sight_pb2.Object.SubType.ST_LINK - sight_obj.link.linked_sight_id = FLAGS.parent_id - sight_obj.link.link_type = sight_pb2.Link.LinkType.LT_CHILD_TO_PARENT - frame = inspect.currentframe().f_back.f_back.f_back - sight.set_object_code_loc(sight_obj, frame) - sight.log_object(sight_obj, True) - - decision.run( - driver_fn=driver, - action_attrs=action_attrs, - sight=sight, - ) + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + num_attributes = 10 + attr_range = (0, 5) + action_attrs = {} + for i in range(num_attributes): + key = f"{i}" # Generate unique keys + action_attrs[key] = sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=attr_range[0], + max_value=attr_range[1], + ) + + with get_sight_instance() as sight: + + if (FLAGS.parent_id): + sight_obj = sight_pb2.Object() + sight_obj.sub_type = sight_pb2.Object.SubType.ST_LINK + sight_obj.link.linked_sight_id = FLAGS.parent_id + sight_obj.link.link_type = sight_pb2.Link.LinkType.LT_CHILD_TO_PARENT + frame = inspect.currentframe().f_back.f_back.f_back + sight.set_object_code_loc(sight_obj, frame) + sight.log_object(sight_obj, True) + + decision.run( + driver_fn=driver, + action_attrs=action_attrs, + sight=sight, + ) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/demo/fn_sphere_parallel.py b/py/sight/demo/fn_sphere_parallel.py index e1bae0c..82c48c0 100644 --- a/py/sight/demo/fn_sphere_parallel.py +++ b/py/sight/demo/fn_sphere_parallel.py @@ -16,29 +16,28 @@ def warn(*args, **kwargs): - pass + pass warnings.warn = warn +import inspect +import multiprocessing +import os +import re +import subprocess +import time from typing import Sequence from absl import app from absl import flags from helpers.logs.logs_handler import logger as logging -import multiprocessing -import subprocess -import re -from sight.proto import sight_pb2 -from sight.sight import Sight -import time -import os -import inspect - -from sight_service.proto import service_pb2 from sight import service_utils as service from sight.attribute import Attribute from sight.block import Block +from sight.proto import sight_pb2 +from sight.sight import Sight +from sight_service.proto import service_pb2 FLAGS = flags.FLAGS @@ -48,248 +47,244 @@ def warn(*args, **kwargs): def build_push_service_img(sight_id): - build_out = subprocess.run( - [ - 'docker', - 'build', - '-t', - f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id, - '-f', - _DOCKER_FILE_PATH, - '.', - ], - check=True, - capture_output=True, - ) - # logging.info('build_out=%s', build_out) - - # Step 2: Retrieve an OAuth2 access token - access_token_cmd = ['gcloud', 'auth', 'print-access-token'] - access_token_result = subprocess.run(access_token_cmd, - capture_output=True, - text=True, - check=True) - access_token = access_token_result.stdout.strip() - - # Step 3: Authenticate with gcr.io using the access token - login_cmd = [ - 'docker', - 'login', - '-u', - 'oauth2accesstoken', - '-p', - access_token, - 'https://gcr.io', - ] - subprocess.run(login_cmd, check=True) - - # Step 4: push created image to gcr.io - push_out = subprocess.run( - [ - 'docker', 'push', - f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id - ], - check=True, - capture_output=True, - ) - # logging.info('push_out=%s', push_out) - - return f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id + build_out = subprocess.run( + [ + 'docker', + 'build', + '-t', + f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id, + '-f', + _DOCKER_FILE_PATH, + '.', + ], + check=True, + capture_output=True, + ) + # logging.info('build_out=%s', build_out) + + # Step 2: Retrieve an OAuth2 access token + access_token_cmd = ['gcloud', 'auth', 'print-access-token'] + access_token_result = subprocess.run(access_token_cmd, + capture_output=True, + text=True, + check=True) + access_token = access_token_result.stdout.strip() + + # Step 3: Authenticate with gcr.io using the access token + login_cmd = [ + 'docker', + 'login', + '-u', + 'oauth2accesstoken', + '-p', + access_token, + 'https://gcr.io', + ] + subprocess.run(login_cmd, check=True) + + # Step 4: push created image to gcr.io + push_out = subprocess.run( + ['docker', 'push', f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id], + check=True, + capture_output=True, + ) + # logging.info('push_out=%s', push_out) + + return f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}' + sight_id def delete_service_img(sight_id): - print('deleting image : gcr.io/' + _PROJECT_ID + '/' + _SERVICE_PREFIX + - sight_id) - subprocess.run( - [ - 'gcloud', - 'container', - 'images', - 'delete', - f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}{sight_id}', - '--quiet', - '--force-delete-tags', - ], - check=True, - ) + print('deleting image : gcr.io/' + _PROJECT_ID + '/' + _SERVICE_PREFIX + + sight_id) + subprocess.run( + [ + 'gcloud', + 'container', + 'images', + 'delete', + f'gcr.io/{_PROJECT_ID}/{_SERVICE_PREFIX}{sight_id}', + '--quiet', + '--force-delete-tags', + ], + check=True, + ) def delete_service(service_name): - print('deleting sight service') - sight_service_name = _SERVICE_PREFIX + service_name - cmd_args = [ - 'gcloud', 'run', 'services', 'delete', sight_service_name, '--quiet' - ] - result = subprocess.run(args=cmd_args, capture_output=True, text=True) - # print('result from deletion :', result) - if result.returncode == 0: - print( - f'Successfully deleted Cloud Run service: {_SERVICE_PREFIX}{service_name}' - ) - else: - print(f'Error deleting Cloud Run service: {result.stderr}') + print('deleting sight service') + sight_service_name = _SERVICE_PREFIX + service_name + cmd_args = [ + 'gcloud', 'run', 'services', 'delete', sight_service_name, '--quiet' + ] + result = subprocess.run(args=cmd_args, capture_output=True, text=True) + # print('result from deletion :', result) + if result.returncode == 0: + print( + f'Successfully deleted Cloud Run service: {_SERVICE_PREFIX}{service_name}' + ) + else: + print(f'Error deleting Cloud Run service: {result.stderr}') def run_experiment(sight_id, optimizer_value, image_id, table_queue): - cmd_args = [ - 'python', - 'py/sight/demo/fn_sphere.py', - '--decision_mode', - 'train', - '--deployment_mode', - 'distributed', - '--num_train_workers', - '1', - '--num_trials', - '10', - '--optimizer_type', - optimizer_value, - '--docker_image', - 'gcr.io/cameltrain/sight-worker', - # '--service_docker_file', 'sight_service/Dockerfile' - '--service_docker_img', - image_id, - '--parent_id', - sight_id - ] - result = subprocess.run(args=cmd_args, capture_output=True, text=True) - # print('here result is : ', result.stdout) - table_name = re.search(r'table generated\s*:\s*([^\s]+)', result.stdout) - service_name = re.search(r'_SERVICE_ID=\s*([0-9a-fA-F-]+)', result.stdout) - print('service_name : ', service_name.group(1)) - # raise SystemExit - - if (table_name and service_name): - # print(result.stdout) - table_queue.put( - (optimizer_value, table_name.group(1), service_name.group(1))) - # else: - print(f'whole log from {optimizer_value} : ', result.stderr) + cmd_args = [ + 'python', + 'py/sight/demo/fn_sphere.py', + '--decision_mode', + 'train', + '--deployment_mode', + 'distributed', + '--num_train_workers', + '1', + '--num_trials', + '10', + '--optimizer_type', + optimizer_value, + '--docker_image', + 'gcr.io/cameltrain/sight-worker', + # '--service_docker_file', 'sight_service/Dockerfile' + '--service_docker_img', + image_id, + '--parent_id', + sight_id + ] + result = subprocess.run(args=cmd_args, capture_output=True, text=True) + # print('here result is : ', result.stdout) + table_name = re.search(r'table generated\s*:\s*([^\s]+)', result.stdout) + service_name = re.search(r'_SERVICE_ID=\s*([0-9a-fA-F-]+)', result.stdout) + print('service_name : ', service_name.group(1)) + # raise SystemExit + + if (table_name and service_name): + # print(result.stdout) + table_queue.put( + (optimizer_value, table_name.group(1), service_name.group(1))) + # else: + print(f'whole log from {optimizer_value} : ', result.stderr) def get_sight_instance(): - params = sight_pb2.Params( - label='sphere_parallel', - bucket_name=f'{_PROJECT_ID}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='sphere_parallel', + bucket_name=f'{_PROJECT_ID}-sight', + ) + sight_obj = Sight(params) + return sight_obj def check_exp_status(exp_sight_id, exp_service_id): - print('in check exp_status .........................') - print(exp_sight_id, exp_service_id) - os.environ['SIGHT_SERVICE_ID'] = exp_service_id - req = service_pb2.CurrentStatusRequest() - req.client_id = exp_sight_id - response = service.call( - lambda s, meta: s.CurrentStatus(req, 300, metadata=meta)) - print('response :', response.status) - if (response.status == service_pb2.CurrentStatusResponse.Status.SUCCESS): - return True - else: - return False + print('in check exp_status .........................') + print(exp_sight_id, exp_service_id) + os.environ['SIGHT_SERVICE_ID'] = exp_service_id + req = service_pb2.CurrentStatusRequest() + req.client_id = exp_sight_id + response = service.call( + lambda s, meta: s.CurrentStatus(req, 300, metadata=meta)) + print('response :', response.status) + if (response.status == service_pb2.CurrentStatusResponse.Status.SUCCESS): + return True + else: + return False def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - image_id = build_push_service_img(str(sight.id)) - print('image_id : ', image_id) - sight.text(image_id) - - # optimizer_values = ['ng_random_search', 'ng_pso', 'ng_cga'] - # optimizer_values = ['ng_random_search', 'ng_pso', 'ng_cga', 'ng_es', 'ng_dl_opo', 'ng_dde'] - optimizer_values = ['ng_cga'] - # optimizer_values = ['bayesian_opt'] - - # optimizer_values = [ - # 'ng_random_search', 'ng_pso', 'ng_de', 'ng_cga', 'ng_es', 'ng_dl_opo', 'ng_dde', - # 'ng_nmm', 'ng_tiny_spsa', 'ng_scr_hammersley_search', - # 'ng_two_points_de', 'ng_cma_small', 'ng_cma', 'ng_auto', 'ng_bo', - # 'ng_voronoi_de', 'bayesian_opt' - # ] - table_queue = multiprocessing.Queue() - processes = [] - - for optimizer_value in optimizer_values: - process = multiprocessing.Process(target=run_experiment, - args=(str(sight.id), - optimizer_value, image_id, - table_queue)) - processes.append(process) - process.start() - print('all process started.....') - - for process in processes: - process.join() - print('all process finished.....') - - delete_service_img(str(sight.id)) - - experiment_details = {} - while not table_queue.empty(): - optimizer_value, table_name, service_name = table_queue.get() - with Block("Superscript Experiment Details", sight): - with Attribute("optimizer", optimizer_value, sight): - sight_id_match = re.search(r'\.(.*?)_log$', table_name) - exp_sight_id = sight_id_match.group(1) - # with Attribute("sight_id", exp_sight_id, sight): - # with Attribute("table_name", table_name, sight): - # sight.text(f"{optimizer_value}:{exp_sight_id}") - sight_obj = sight_pb2.Object() - sight_obj.sub_type = sight_pb2.Object.SubType.ST_LINK - sight_obj.link.linked_sight_id = str(exp_sight_id) - sight_obj.link.link_type = sight_pb2.Link.LinkType.LT_PARENT_TO_CHILD - frame = inspect.currentframe().f_back.f_back.f_back - sight.set_object_code_loc(sight_obj, frame) - sight.log_object(sight_obj, True) - experiment_details[optimizer_value] = [ - exp_sight_id, table_name, service_name - ] - - print('experiment_details : ', experiment_details) - - print('waiting for all experiments to get completed.......') - completed_services = [] - while True: - print( - "checking if remaining experiments got compelted or not, to delete it's service" - ) - # completed_services = [] - # for k,v in experiment_details.items(): - # if check_exp_status(v[0], v[2]): - # service_name = v[2] - # sight_id = v[0] - # completed_services.append(service_name) - # del experiment_details[k] - # delete_service(service_name) - - for k in list(experiment_details.keys()): - v = experiment_details[k] - if check_exp_status(v[0], v[2]): - service_name = v[2] - # sight_id = v[0] - completed_services.append(service_name) - del experiment_details[k] - delete_service(service_name) - - # Check if all services have succeeded - print('completed_services : ', completed_services) - if len(completed_services) == len(optimizer_values): - # print() - break # All services have succeeded, exit loop - - # Wait for some time before polling again - print('going in sleep mode for 60 sec') - time.sleep(60) # Polling interval of 60 seconds - - logging.info( - 'Log GUI : https://streamlit-app-dq7fdwqgbq-uc.a.run.app/?' - 'log_id=%s', str(sight.id)) + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + image_id = build_push_service_img(str(sight.id)) + print('image_id : ', image_id) + sight.text(image_id) + + # optimizer_values = ['ng_random_search', 'ng_pso', 'ng_cga'] + # optimizer_values = ['ng_random_search', 'ng_pso', 'ng_cga', 'ng_es', 'ng_dl_opo', 'ng_dde'] + optimizer_values = ['ng_cga'] + # optimizer_values = ['bayesian_opt'] + + # optimizer_values = [ + # 'ng_random_search', 'ng_pso', 'ng_de', 'ng_cga', 'ng_es', 'ng_dl_opo', 'ng_dde', + # 'ng_nmm', 'ng_tiny_spsa', 'ng_scr_hammersley_search', + # 'ng_two_points_de', 'ng_cma_small', 'ng_cma', 'ng_auto', 'ng_bo', + # 'ng_voronoi_de', 'bayesian_opt' + # ] + table_queue = multiprocessing.Queue() + processes = [] + + for optimizer_value in optimizer_values: + process = multiprocessing.Process(target=run_experiment, + args=(str(sight.id), optimizer_value, + image_id, table_queue)) + processes.append(process) + process.start() + print('all process started.....') + + for process in processes: + process.join() + print('all process finished.....') + + delete_service_img(str(sight.id)) + + experiment_details = {} + while not table_queue.empty(): + optimizer_value, table_name, service_name = table_queue.get() + with Block("Superscript Experiment Details", sight): + with Attribute("optimizer", optimizer_value, sight): + sight_id_match = re.search(r'\.(.*?)_log$', table_name) + exp_sight_id = sight_id_match.group(1) + # with Attribute("sight_id", exp_sight_id, sight): + # with Attribute("table_name", table_name, sight): + # sight.text(f"{optimizer_value}:{exp_sight_id}") + sight_obj = sight_pb2.Object() + sight_obj.sub_type = sight_pb2.Object.SubType.ST_LINK + sight_obj.link.linked_sight_id = str(exp_sight_id) + sight_obj.link.link_type = sight_pb2.Link.LinkType.LT_PARENT_TO_CHILD + frame = inspect.currentframe().f_back.f_back.f_back + sight.set_object_code_loc(sight_obj, frame) + sight.log_object(sight_obj, True) + experiment_details[optimizer_value] = [ + exp_sight_id, table_name, service_name + ] + + print('experiment_details : ', experiment_details) + + print('waiting for all experiments to get completed.......') + completed_services = [] + while True: + print( + "checking if remaining experiments got compelted or not, to delete it's service" + ) + # completed_services = [] + # for k,v in experiment_details.items(): + # if check_exp_status(v[0], v[2]): + # service_name = v[2] + # sight_id = v[0] + # completed_services.append(service_name) + # del experiment_details[k] + # delete_service(service_name) + + for k in list(experiment_details.keys()): + v = experiment_details[k] + if check_exp_status(v[0], v[2]): + service_name = v[2] + # sight_id = v[0] + completed_services.append(service_name) + del experiment_details[k] + delete_service(service_name) + + # Check if all services have succeeded + print('completed_services : ', completed_services) + if len(completed_services) == len(optimizer_values): + # print() + break # All services have succeeded, exit loop + + # Wait for some time before polling again + print('going in sleep mode for 60 sec') + time.sleep(60) # Polling interval of 60 seconds + + logging.info( + 'Log GUI : https://streamlit-app-dq7fdwqgbq-uc.a.run.app/?' + 'log_id=%s', str(sight.id)) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/demo/fractal_demo.py b/py/sight/demo/fractal_demo.py index f617c23..f2fe090 100644 --- a/py/sight/demo/fractal_demo.py +++ b/py/sight/demo/fractal_demo.py @@ -11,22 +11,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the fractal environment.""" import os from typing import Sequence + from absl import app from absl import flags +from google3.googlex.fractal.library.rl.fourier_acme.fourier_environment import ( + fourier_environment +) +import numpy as np from sight import data_structures from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision -import numpy as np -from google3.googlex.fractal.library.rl.fourier_acme.fourier_environment import fourier_environment FLAGS = flags.FLAGS + def driver_fn(sim_env, sight: Sight) -> None: """Driver function to run the loop.""" initial_state = sim_env.reset() @@ -62,11 +65,9 @@ def main(argv: Sequence[str]) -> None: raise app.UsageError("Too many command-line arguments.") with get_sight_instance() as sight: - decision.run( - sight=sight, - driver_fn=driver_fn, - env=fourier_environment.get_fourier_rl_environment() - ) + decision.run(sight=sight, + driver_fn=driver_fn, + env=fourier_environment.get_fourier_rl_environment()) if __name__ == "__main__": diff --git a/py/sight/demo/gym_demo_env.py b/py/sight/demo/gym_demo_env.py index cc3acf2..4a4c002 100644 --- a/py/sight/demo/gym_demo_env.py +++ b/py/sight/demo/gym_demo_env.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the Sight Decision API to train gym environment.""" import warnings @@ -24,18 +23,19 @@ def warn(*args, **kwargs): warnings.warn = warn import os -import gym from typing import Sequence + from absl import app from absl import flags from acme import wrappers - +import gym from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision FLAGS = flags.FLAGS + def get_sight_instance(): params = sight_pb2.Params( label='gym_experiment', @@ -50,9 +50,8 @@ def main(argv: Sequence[str]) -> None: raise app.UsageError("Too many command-line arguments.") with get_sight_instance() as sight: - decision.run( - sight=sight, env=wrappers.GymWrapper(gym.make(flags.FLAGS.env_name)) - ) + decision.run(sight=sight, + env=wrappers.GymWrapper(gym.make(flags.FLAGS.env_name))) if __name__ == "__main__": diff --git a/py/sight/demo/kokua_demo.py b/py/sight/demo/kokua_demo.py index c4a25b6..266e217 100644 --- a/py/sight/demo/kokua_demo.py +++ b/py/sight/demo/kokua_demo.py @@ -23,12 +23,11 @@ from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision - -from sklearn.svm import SVC from sklearn.datasets import load_breast_cancer -from sklearn.preprocessing import MinMaxScaler -from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import MinMaxScaler +from sklearn.svm import SVC FLAGS = flags.FLAGS @@ -48,66 +47,67 @@ # Define the black box function to optimize. def black_box_function(C, degree): - # C: SVC hyper parameter to optimize for. - model = SVC(C=C, degree=degree) - model.fit(X_train_scaled, y_train) - y_score = model.decision_function(X_test_scaled) - f = roc_auc_score(y_test, y_score) - return f + # C: SVC hyper parameter to optimize for. + model = SVC(C=C, degree=degree) + model.fit(X_train_scaled, y_train) + y_score = model.decision_function(X_test_scaled) + f = roc_auc_score(y_test, y_score) + return f def driver(sight: Sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: sight: The Sight logger object used to drive decisions. """ - for _ in range(1): - next_point = decision.decision_point("label", sight) - next_point["degree"] = int(next_point["degree"]) - reward = black_box_function(next_point["C"], next_point["degree"]) + for _ in range(1): + next_point = decision.decision_point("label", sight) + next_point["degree"] = int(next_point["degree"]) + reward = black_box_function(next_point["C"], next_point["degree"]) - sight.text("C=%s, degree=%s, f(x)=%s" % ( - next_point["C"], - next_point["degree"], - reward, - )) - print("C : ", next_point["C"], ", degree : ", next_point["degree"], ", reward : ", reward) - decision.decision_outcome("target", reward, sight) + sight.text("C=%s, degree=%s, f(x)=%s" % ( + next_point["C"], + next_point["degree"], + reward, + )) + print("C : ", next_point["C"], ", degree : ", next_point["degree"], + ", reward : ", reward) + decision.decision_outcome("target", reward, sight) def get_sight_instance(): - params = sight_pb2.Params( - label='kokua_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='kokua_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - decision.run( - driver_fn=driver, - action_attrs={ - "C": + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + decision.run( + driver_fn=driver, + action_attrs={ + "C": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0.1, max_value=10, ), - "degree": + "degree": sight_pb2.DecisionConfigurationStart.AttrProps( min_value=1, max_value=5, ), - }, - sight=sight, - ) + }, + sight=sight, + ) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/demo/portfolio_demo.py b/py/sight/demo/portfolio_demo.py index 81c24df..25c4ea6 100644 --- a/py/sight/demo/portfolio_demo.py +++ b/py/sight/demo/portfolio_demo.py @@ -18,7 +18,7 @@ def warn(*args, **kwargs): - pass + pass warnings.warn = warn @@ -26,20 +26,20 @@ def warn(*args, **kwargs): import asyncio import os import threading -from typing import Sequence, Any +from typing import Any, Sequence from absl import app from absl import flags +# from fvs_sight.fvs_api import action_attrs +# from fvs_sight.fvs_api import outcome_attrs +from fvs_sight import fvs_api import pandas as pd from sight.attribute import Attribute from sight.block import Block from sight.proto import sight_pb2 from sight.sight import Sight -# from fvs_sight.fvs_api import action_attrs -# from fvs_sight.fvs_api import outcome_attrs -from fvs_sight import fvs_api -from sight.widgets.decision import decision # from sight.widgets.decision.proposal import spawn_workers, launch_worklist_scheduler, propose_actions +from sight.widgets.decision import decision from sight.widgets.decision import proposal from sight.widgets.decision.resource_lock import RWLockDictWrapper @@ -155,89 +155,92 @@ def warn(*args, **kwargs): FLAGS = flags.FLAGS + def get_sight_instance(): - params = sight_pb2.Params( - label="kokua_experiment", - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label="kokua_experiment", + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj + async def propose_actions(sight: Sight, base_project_config: dict[str, Any], treatments: dict[str, Any]) -> pd.Series: - treatment_project_config = treatments - tasks = [] - with Attribute("Managed", "0", sight): - # base_sim = decision.propose_actions(sight, - # action_dict=base_project_config) - # await proposal.push_message(sight.id, base_sim) - # unmanaged_task = sight.create_task( - # proposal.fetch_outcome(sight.id, base_sim)) - # tasks.append(unmanaged_task) - unmanaged_task = sight.create_task( - proposal.propose_actions(sight, action_dict=base_project_config)) - tasks.append(unmanaged_task) - with Attribute("Managed", "1", sight): - # treatment_sim = decision.propose_actions( - # sight, action_dict=treatment_project_config) - # await proposal.push_message(sight.id, treatment_sim) - # managed_task = sight.create_task( - # proposal.fetch_outcome(sight.id, treatment_sim)) - # tasks.append(managed_task) - managed_task = sight.create_task( - proposal.propose_actions(sight, action_dict=treatment_project_config)) - tasks.append(managed_task) - - [unmanaged_response, managed_response] = await asyncio.gather(*tasks) - return unmanaged_response, managed_response + treatment_project_config = treatments + tasks = [] + with Attribute("Managed", "0", sight): + # base_sim = decision.propose_actions(sight, + # action_dict=base_project_config) + # await proposal.push_message(sight.id, base_sim) + # unmanaged_task = sight.create_task( + # proposal.fetch_outcome(sight.id, base_sim)) + # tasks.append(unmanaged_task) + unmanaged_task = sight.create_task( + proposal.propose_actions(sight, action_dict=base_project_config)) + tasks.append(unmanaged_task) + with Attribute("Managed", "1", sight): + # treatment_sim = decision.propose_actions( + # sight, action_dict=treatment_project_config) + # await proposal.push_message(sight.id, treatment_sim) + # managed_task = sight.create_task( + # proposal.fetch_outcome(sight.id, treatment_sim)) + # tasks.append(managed_task) + managed_task = sight.create_task( + proposal.propose_actions(sight, action_dict=treatment_project_config)) + tasks.append(managed_task) + + [unmanaged_response, managed_response] = await asyncio.gather(*tasks) + return unmanaged_response, managed_response async def main(sight: Sight, argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - sample_list = [sample for i in range(FLAGS.num_trials)] - - # print('SIGHT ID => ',sight.id) - with Block("Propose actions", sight): - with Attribute("project_id", "APR107", sight): - tasks = [] - print("len(sample_list) : ", len(sample_list)) - for id in range(len(sample_list)): - with Attribute("sample_id", id, sight): - tasks.append( - sight.create_task( - # both base and treatment are considerred to be same dict here - propose_actions(sight, sample_list[id], sample_list[id]) - ) - ) - - print("waiting for all get outcome to finish.....") - diff_time_series = await asyncio.gather(*tasks) - print("all get outcome are finished.....") - print(f'Combine Series : {diff_time_series}') + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + sample_list = [sample for i in range(FLAGS.num_trials)] + + # print('SIGHT ID => ',sight.id) + with Block("Propose actions", sight): + with Attribute("project_id", "APR107", sight): + tasks = [] + print("len(sample_list) : ", len(sample_list)) + for id in range(len(sample_list)): + with Attribute("sample_id", id, sight): + tasks.append( + sight.create_task( + # both base and treatment are considerred to be same dict here + propose_actions(sight, sample_list[id], sample_list[id]))) + + print("waiting for all get outcome to finish.....") + diff_time_series = await asyncio.gather(*tasks) + print("all get outcome are finished.....") + print(f'Combine Series : {diff_time_series}') + def main_wrapper(argv): - start_time = time.perf_counter() - with get_sight_instance() as sight: - decision.run(action_attrs=fvs_api.get_action_attrs(), - outcome_attrs=fvs_api.get_outcome_attrs(), - sight=sight) - asyncio.run(main(sight, argv)) - - end_time = time.perf_counter() - elapsed_time = end_time - start_time - print(f"Elapsed time: {elapsed_time} seconds") - hours, remainder = divmod(elapsed_time, 3600) - minutes, seconds = divmod(remainder, 60) - - if hours > 0: - print(f"Elapsed time: {int(hours)} hour(s), {int(minutes)} minute(s), {seconds:.2f} second(s)") - elif minutes > 0: - print(f"Elapsed time: {int(minutes)} minute(s), {seconds:.2f} second(s)") - else: - print(f"Elapsed time: {seconds:.2f} second(s)") + start_time = time.perf_counter() + with get_sight_instance() as sight: + decision.run(action_attrs=fvs_api.get_action_attrs(), + outcome_attrs=fvs_api.get_outcome_attrs(), + sight=sight) + asyncio.run(main(sight, argv)) + + end_time = time.perf_counter() + elapsed_time = end_time - start_time + print(f"Elapsed time: {elapsed_time} seconds") + hours, remainder = divmod(elapsed_time, 3600) + minutes, seconds = divmod(remainder, 60) + + if hours > 0: + print( + f"Elapsed time: {int(hours)} hour(s), {int(minutes)} minute(s), {seconds:.2f} second(s)" + ) + elif minutes > 0: + print(f"Elapsed time: {int(minutes)} minute(s), {seconds:.2f} second(s)") + else: + print(f"Elapsed time: {seconds:.2f} second(s)") if __name__ == "__main__": - app.run(main_wrapper) + app.run(main_wrapper) diff --git a/py/sight/demo/propose_action.py b/py/sight/demo/propose_action.py index 5b1bb1c..e5df312 100644 --- a/py/sight/demo/propose_action.py +++ b/py/sight/demo/propose_action.py @@ -21,7 +21,7 @@ def warn(*args, **kwargs): - pass + pass warnings.warn = warn @@ -31,20 +31,22 @@ def warn(*args, **kwargs): from absl import app from absl import flags +# from fvs_sight.fvs_api import action_attrs, outcome_attrs +from fvs_sight import fvs_api +import pandas as pd from sight import data_structures -from sight.proto import sight_pb2 -from sight.sight import Sight +from sight import service_utils as service from sight.attribute import Attribute from sight.block import Block +from sight.proto import sight_pb2 +from sight.sight import Sight from sight.widgets.decision import decision -import pandas as pd -from sight.widgets.decision.single_action_optimizer_client import SingleActionOptimizerClient from sight.widgets.decision import trials -# from fvs_sight.fvs_api import action_attrs, outcome_attrs -from fvs_sight import fvs_api -from sight_service.proto import service_pb2 -from sight import service_utils as service +from sight.widgets.decision.single_action_optimizer_client import ( + SingleActionOptimizerClient +) from sight_service.optimizer_instance import param_proto_to_dict +from sight_service.proto import service_pb2 _RUN_MODE = flags.DEFINE_enum( 'run_mode', @@ -56,165 +58,158 @@ def warn(*args, **kwargs): def get_sight_instance(): - params = sight_pb2.Params( - label="kokua_experiment", - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label="kokua_experiment", + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def launch_dummy_optimizer(sight): - optimizer_object = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_WORKLIST_SCHEDULER, sight) - decision_configuration = sight_pb2.DecisionConfigurationStart() - decision_configuration.optimizer_type = optimizer_object.optimizer_type() - - decision_configuration.num_trials = FLAGS.num_trials - decision_configuration.choice_config[sight.params.label].CopyFrom( - optimizer_object.create_config()) - # decision._attr_dict_to_proto(state_attrs, - # decision_configuration.state_attrs) - decision.attr_dict_to_proto(fvs_api.get_action_attrs(), - decision_configuration.action_attrs) - decision.attr_dict_to_proto(fvs_api.get_outcome_attrs(), - decision_configuration.outcome_attrs) - trials.launch( - optimizer_object, - decision_configuration, - FLAGS.num_train_workers, - sight, - ) + optimizer_object = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER, + sight) + decision_configuration = sight_pb2.DecisionConfigurationStart() + decision_configuration.optimizer_type = optimizer_object.optimizer_type() + + decision_configuration.num_trials = FLAGS.num_trials + decision_configuration.choice_config[sight.params.label].CopyFrom( + optimizer_object.create_config()) + # decision._attr_dict_to_proto(state_attrs, + # decision_configuration.state_attrs) + decision.attr_dict_to_proto(fvs_api.get_action_attrs(), + decision_configuration.action_attrs) + decision.attr_dict_to_proto(fvs_api.get_outcome_attrs(), + decision_configuration.outcome_attrs) + trials.launch( + optimizer_object, + decision_configuration, + FLAGS.num_train_workers, + sight, + ) def simulate_fvs(sight, params_dict): - print('here params_dict is :', params_dict) - mitigation_list = [101, 102, 103, 104, 105] - sim_stream = pd.Series(mitigation_list) - # print(sim_stream) - return sim_stream + print('here params_dict is :', params_dict) + mitigation_list = [101, 102, 103, 104, 105] + sim_stream = pd.Series(mitigation_list) + # print(sim_stream) + return sim_stream def driver_func(sight): - params_dict = decision.decision_point("label", sight) - # params_dict = {'fvs_type':'managed','region':'BM','project_id':'ACR173','desc': 'fire_projectACR173', 'fire-SIMFIRE_27-1_cycle': 2028, 'fire-SIMFIRE_27-6_stand_area_burned': 10.0, 'fire-SIMFIRE_30-1_cycle': 2031, 'fire-SIMFIRE_30-6_stand_area_burned': 10.0, 'fire-SIMFIRE_31-1_cycle': 2032, 'fire-SIMFIRE_31-6_stand_area_burned': 10.0} - print('params_dict : ', params_dict) - # raise SystemError + params_dict = decision.decision_point("label", sight) + # params_dict = {'fvs_type':'managed','region':'BM','project_id':'ACR173','desc': 'fire_projectACR173', 'fire-SIMFIRE_27-1_cycle': 2028, 'fire-SIMFIRE_27-6_stand_area_burned': 10.0, 'fire-SIMFIRE_30-1_cycle': 2031, 'fire-SIMFIRE_30-6_stand_area_burned': 10.0, 'fire-SIMFIRE_31-1_cycle': 2032, 'fire-SIMFIRE_31-6_stand_area_burned': 10.0} + print('params_dict : ', params_dict) + # raise SystemError - sim_stream = simulate_fvs(sight, params_dict) + sim_stream = simulate_fvs(sight, params_dict) - outcome = {'time_series': sim_stream} - print("outcome : ", outcome) + outcome = {'time_series': sim_stream} + print("outcome : ", outcome) - decision.decision_outcome('outcome_label', - sight, - reward=0, - outcome=outcome) + decision.decision_outcome('outcome_label', sight, reward=0, outcome=outcome) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - - if (_RUN_MODE.value == 'propose_action'): - launch_dummy_optimizer(sight) - - with Block("Propose actions", sight): - with Attribute("project_id", "APR107", sight): - with Attribute("sample_id", "Id-1", sight): - with Attribute("managed", "1", sight): - # get actions containing fvs params from the fire model - actions_dict = {'a1': 1, 'a2': 1} - unique_action_id = decision.propose_actions( - sight, actions_dict) - # actions_dict = {'a1': 3, 'a2': 3} - # unique_action_id = decision.propose_actions( - # sight, actions_dict) - # actions_dict = {'a1': 5, 'a2': 5} - # unique_action_id = decision.propose_actions( - # sight, actions_dict) - # print("unique_action_id : ", unique_action_id) - with Attribute("managed", "0", sight): - # get actions containing fvs params from the fire model - actions_dict = {'a1': 2, 'a2': 2} - unique_action_id = decision.propose_actions( - sight, actions_dict) - # actions_dict = {'a1': 4, 'a2': 4} - # unique_action_id = decision.propose_actions( - # sight, actions_dict) - # actions_dict = {'a1': 6, 'a2': 6} - # unique_action_id = decision.propose_actions( - # sight, actions_dict) - - # print("unique_action_id : ", unique_action_id) - - # # spawn workers - # trials.start_jobs( - # num_train_workers=1, - # num_trials=1, - # binary_path='fvs_sight/fvs_worker.py', - # optimizer_type='worklist_scheduler', - # docker_image='gcr.io/cameltrain/sight-portfolio-worker', - # decision_mode='train', - # deployment_mode='worker_mode', - # worker_mode='dsub_cloud_worker', - # sight=sight, - # ) - - elif (_RUN_MODE.value == 'get_outcome'): - if (not FLAGS.sight_log_id): - raise ValueError( - "sight_log_id have to be passed from the proposed action run for get outcome" - ) - - request = service_pb2.GetOutcomeRequest() - request.client_id = str(FLAGS.sight_log_id) - action_list = [1] #, 3, 4, 5, 6 - # request.unique_ids.append(1) - request.unique_ids.extend(action_list) - response = service.call( - lambda s, meta: s.GetOutcome(request, 300, metadata=meta)) - - outcome_list = [] - id = 0 - for outcome in response.outcome: - if (outcome.status == - service_pb2.GetOutcomeResponse.Outcome.Status.PENDING): - print(f"action id : {action_list[id]} is pending...") - elif (outcome.status == - service_pb2.GetOutcomeResponse.Outcome.Status.ACTIVE): - print(f"action id : {action_list[id]} is active...") - elif (outcome.status == - service_pb2.GetOutcomeResponse.Outcome.Status.NOT_EXIST): - print(f"action id : {action_list[id]} does not exist...") - elif (outcome.status == - service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED): - print(f"action id : {action_list[id]} is completed...") - outcome_dict = {} - outcome_dict['action_id'] = outcome.action_id - outcome_dict['reward'] = outcome.reward - outcome_dict['action'] = param_proto_to_dict( - outcome.action_attrs) - outcome_dict['outcome'] = param_proto_to_dict( - outcome.outcome_attrs) - print('here : ', outcome_dict['outcome']['time_series'], type(outcome_dict['outcome']['time_series'])) - outcome_dict['attributes'] = param_proto_to_dict( - outcome.attributes) - outcome_list.append(outcome_dict) - - id += 1 - - print('outcome_list : ', outcome_list) - - else: - raise ValueError( - "run_mode have to be passed for this script to run either as propse_action or get_outcome" - ) + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + + if (_RUN_MODE.value == 'propose_action'): + launch_dummy_optimizer(sight) + + with Block("Propose actions", sight): + with Attribute("project_id", "APR107", sight): + with Attribute("sample_id", "Id-1", sight): + with Attribute("managed", "1", sight): + # get actions containing fvs params from the fire model + actions_dict = {'a1': 1, 'a2': 1} + unique_action_id = decision.propose_actions(sight, actions_dict) + # actions_dict = {'a1': 3, 'a2': 3} + # unique_action_id = decision.propose_actions( + # sight, actions_dict) + # actions_dict = {'a1': 5, 'a2': 5} + # unique_action_id = decision.propose_actions( + # sight, actions_dict) + # print("unique_action_id : ", unique_action_id) + with Attribute("managed", "0", sight): + # get actions containing fvs params from the fire model + actions_dict = {'a1': 2, 'a2': 2} + unique_action_id = decision.propose_actions(sight, actions_dict) + # actions_dict = {'a1': 4, 'a2': 4} + # unique_action_id = decision.propose_actions( + # sight, actions_dict) + # actions_dict = {'a1': 6, 'a2': 6} + # unique_action_id = decision.propose_actions( + # sight, actions_dict) + + # print("unique_action_id : ", unique_action_id) + + # # spawn workers + # trials.start_jobs( + # num_train_workers=1, + # num_trials=1, + # binary_path='fvs_sight/fvs_worker.py', + # optimizer_type='worklist_scheduler', + # docker_image='gcr.io/cameltrain/sight-portfolio-worker', + # decision_mode='train', + # deployment_mode='worker_mode', + # worker_mode='dsub_cloud_worker', + # sight=sight, + # ) + + elif (_RUN_MODE.value == 'get_outcome'): + if (not FLAGS.sight_log_id): + raise ValueError( + "sight_log_id have to be passed from the proposed action run for get outcome" + ) + + request = service_pb2.GetOutcomeRequest() + request.client_id = str(FLAGS.sight_log_id) + action_list = [1] #, 3, 4, 5, 6 + # request.unique_ids.append(1) + request.unique_ids.extend(action_list) + response = service.call( + lambda s, meta: s.GetOutcome(request, 300, metadata=meta)) + + outcome_list = [] + id = 0 + for outcome in response.outcome: + if (outcome.status == + service_pb2.GetOutcomeResponse.Outcome.Status.PENDING): + print(f"action id : {action_list[id]} is pending...") + elif (outcome.status == + service_pb2.GetOutcomeResponse.Outcome.Status.ACTIVE): + print(f"action id : {action_list[id]} is active...") + elif (outcome.status == + service_pb2.GetOutcomeResponse.Outcome.Status.NOT_EXIST): + print(f"action id : {action_list[id]} does not exist...") + elif (outcome.status == + service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED): + print(f"action id : {action_list[id]} is completed...") + outcome_dict = {} + outcome_dict['action_id'] = outcome.action_id + outcome_dict['reward'] = outcome.reward + outcome_dict['action'] = param_proto_to_dict(outcome.action_attrs) + outcome_dict['outcome'] = param_proto_to_dict(outcome.outcome_attrs) + print('here : ', outcome_dict['outcome']['time_series'], + type(outcome_dict['outcome']['time_series'])) + outcome_dict['attributes'] = param_proto_to_dict(outcome.attributes) + outcome_list.append(outcome_dict) + + id += 1 + + print('outcome_list : ', outcome_list) + + else: + raise ValueError( + "run_mode have to be passed for this script to run either as propse_action or get_outcome" + ) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/demo/search_optimization.py b/py/sight/demo/search_optimization.py index bd29670..65ff164 100644 --- a/py/sight/demo/search_optimization.py +++ b/py/sight/demo/search_optimization.py @@ -26,95 +26,93 @@ def diff_abs(x) -> float: - """Differentiable variant of the absolute value function.""" - return math.sqrt(x * x + 0.1) + """Differentiable variant of the absolute value function.""" + return math.sqrt(x * x + 0.1) def driver(sight: Sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: sight: The Sight logger object used to drive decisions. """ - for i in range(1): - target = random.randrange(0, 1000) - current = random.randrange(0, 1000) - - step = 0 - data_structures.log_var('current', current, sight) - data_structures.log_var('target', target, sight) - - step += 1 - while current != target and step < 100: - decision.decision_outcome('distance', -diff_abs(target - current), - sight) - - data_structures.log_var('current', current, sight) - choice = decision.decision_point( - 'move', - sight, - lambda: { - 'go1': (random.randrange( - current // 2 if current < target else target // 2, - current * 2 if current > target else target * 2, - ) - current), - # 'go2': - # random.randrange( - # current // 2 - # if current < target else target // 2, current * 2 - # if current > target else target * 2) - current, - # f'{math.ceil((target - current)/2) if target > current else math.floor((target - current)/2)}' - }, - ) - logging.info('choice=%s', choice) - - current += int( - choice['go1']) # + choice['go2']) #int((choice*2 - 1)*100) - logging.info( - '%d: %d: amount=%s, current=%s, target=%s', - i, - step, - int(choice['go1']), - # int(choice['go2']), - current, - target, - ) - step += 1 - decision.decision_outcome('distance', -diff_abs(target - current), - sight) + for i in range(1): + target = random.randrange(0, 1000) + current = random.randrange(0, 1000) + + step = 0 + data_structures.log_var('current', current, sight) + data_structures.log_var('target', target, sight) + + step += 1 + while current != target and step < 100: + decision.decision_outcome('distance', -diff_abs(target - current), sight) + + data_structures.log_var('current', current, sight) + choice = decision.decision_point( + 'move', + sight, + lambda: { + 'go1': (random.randrange( + current // 2 if current < target else target // 2, + current * 2 if current > target else target * 2, + ) - current), + # 'go2': + # random.randrange( + # current // 2 + # if current < target else target // 2, current * 2 + # if current > target else target * 2) - current, + # f'{math.ceil((target - current)/2) if target > current else math.floor((target - current)/2)}' + }, + ) + logging.info('choice=%s', choice) + + current += int( + choice['go1']) # + choice['go2']) #int((choice*2 - 1)*100) + logging.info( + '%d: %d: amount=%s, current=%s, target=%s', + i, + step, + int(choice['go1']), + # int(choice['go2']), + current, + target, + ) + step += 1 + decision.decision_outcome('distance', -diff_abs(target - current), sight) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - params = sight_pb2.Params( - label='SearchOptimization', - log_owner='bronevet@google.com', - # local=True, - capacitor_output=True, - log_dir_path='/tmp/', - ) - - with Sight(params) as sight: - decision.run( - driver_fn=driver, - state_attrs={ - 'current': + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + params = sight_pb2.Params( + label='SearchOptimization', + log_owner='bronevet@google.com', + # local=True, + capacitor_output=True, + log_dir_path='/tmp/', + ) + + with Sight(params) as sight: + decision.run( + driver_fn=driver, + state_attrs={ + 'current': sight_pb2.DecisionConfigurationStart.AttrProps(min_value=0, max_value=1000), - 'target': + 'target': sight_pb2.DecisionConfigurationStart.AttrProps(min_value=0, max_value=1000), - }, - action_attrs={ - 'go1': + }, + action_attrs={ + 'go1': sight_pb2.DecisionConfigurationStart.AttrProps(min_value=0, max_value=100), - }, - sight=sight, - ) + }, + sight=sight, + ) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/demo/secret_find.py b/py/sight/demo/secret_find.py index 07af7cf..9da3b31 100644 --- a/py/sight/demo/secret_find.py +++ b/py/sight/demo/secret_find.py @@ -28,51 +28,51 @@ def driver(sight: Sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: sight: The Sight logger object used to drive decisions. """ - secret_num = random.randrange(0, 1000) - logging.info('secret_num=%s', secret_num) - choice = decision.decision_point('move', sight) - logging.info('choice=%s, error=%s', choice, choice['guess'] - secret_num) + secret_num = random.randrange(0, 1000) + logging.info('secret_num=%s', secret_num) + choice = decision.decision_point('move', sight) + logging.info('choice=%s, error=%s', choice, choice['guess'] - secret_num) - decision.decision_outcome('distance', -abs(choice['guess'] - secret_num), - sight) + decision.decision_outcome('distance', -abs(choice['guess'] - secret_num), + sight) - proposed_guess = secret_num + (choice['guess'] - secret_num) / 2 - logging.info('proposed_guess=%s', proposed_guess) - decision.propose_action(-abs(choice['guess'] - secret_num) / 2, - {'guess': proposed_guess}, sight) + proposed_guess = secret_num + (choice['guess'] - secret_num) / 2 + logging.info('proposed_guess=%s', proposed_guess) + decision.propose_action(-abs(choice['guess'] - secret_num) / 2, + {'guess': proposed_guess}, sight) def get_sight_instance(): - params = sight_pb2.Params( - label='secret_find_experiment', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + params = sight_pb2.Params( + label='secret_find_experiment', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') - with get_sight_instance() as sight: - decision.run( - driver_fn=driver, - state_attrs={}, - action_attrs={ - 'guess': + with get_sight_instance() as sight: + decision.run( + driver_fn=driver, + state_attrs={}, + action_attrs={ + 'guess': sight_pb2.DecisionConfigurationStart.AttrProps(min_value=0, max_value=1000, step_size=10), - }, - sight=sight, - ) + }, + sight=sight, + ) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/demo/shower_demo_with_env.py b/py/sight/demo/shower_demo_with_env.py index a423d29..8b17953 100644 --- a/py/sight/demo/shower_demo_with_env.py +++ b/py/sight/demo/shower_demo_with_env.py @@ -11,11 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the environment created using dm_env to train shower maintenance system.""" import os from typing import Sequence + from absl import app from absl import flags from sight.proto import sight_pb2 @@ -25,6 +25,7 @@ FLAGS = flags.FLAGS + def get_sight_instance(): params = sight_pb2.Params( label='shower_experiment', diff --git a/py/sight/demo/shower_demo_without_env.py b/py/sight/demo/shower_demo_without_env.py index 4d3a302..91e044f 100644 --- a/py/sight/demo/shower_demo_without_env.py +++ b/py/sight/demo/shower_demo_without_env.py @@ -11,11 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the Sight Decision API to train custom shower env.""" import os from typing import Sequence + from absl import app from absl import flags from sight.proto import sight_pb2 @@ -25,6 +25,7 @@ FLAGS = flags.FLAGS + def get_sight_instance(): params = sight_pb2.Params( label='shower_experiment', @@ -43,18 +44,20 @@ def main(argv: Sequence[str]) -> None: sight=sight, driver_fn=driver_fn, state_attrs={ - "Temperature": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, - max_value=100, - # step_size=1, - ), + "Temperature": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=100, + # step_size=1, + ), }, action_attrs={ - "Direction": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=-2, - max_value=2, - # step_size=1, - ), + "Direction": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=-2, + max_value=2, + # step_size=1, + ), }, ) diff --git a/py/sight/demo/sir.py b/py/sight/demo/sir.py index 287b05f..4efb783 100644 --- a/py/sight/demo/sir.py +++ b/py/sight/demo/sir.py @@ -13,6 +13,7 @@ # limitations under the License. """Simulation of the Susceptible Infected Recovered model using Sight.""" +import os from typing import Dict, Sequence from absl import app @@ -26,7 +27,6 @@ from sight.widgets.simulation.simulation import Simulation from sight.widgets.simulation.simulation_state import SimulationState from sight.widgets.simulation.simulation_time_step import SimulationTimeStep -import os _LAST_TS = flags.DEFINE_integer('last_ts', 10, 'The final day of the simulation.') @@ -39,65 +39,65 @@ def driver(sight: Sight) -> None: - """Solves Lotka-Volterra equations using explicit Euler method.""" - dt = .1 + """Solves Lotka-Volterra equations using explicit Euler method.""" + dt = .1 - # data_structures.log_var('S', S, sight) - # data_structures.log_var('I', I, sight) - # data_structures.log_var('R', R, sight) - action = decision.decision_point('init', sight) - print('dt=%s, action=%s' % (dt, action)) - I, R = 1, 0 - S = int(action['population']) - I - R + # data_structures.log_var('S', S, sight) + # data_structures.log_var('I', I, sight) + # data_structures.log_var('R', R, sight) + action = decision.decision_point('init', sight) + print('dt=%s, action=%s' % (dt, action)) + I, R = 1, 0 + S = int(action['population']) - I - R - hist = [] - for idx in range(int(int(action['num_days']) / dt) - 1): - dotS = -action['beta'] * S * I / int(action['population']) - dotI = action['beta'] * S * I / int( - action['population']) - action['gamma'] * I - dotR = action['gamma'] * I + hist = [] + for idx in range(int(int(action['num_days']) / dt) - 1): + dotS = -action['beta'] * S * I / int(action['population']) + dotI = action['beta'] * S * I / int( + action['population']) - action['gamma'] * I + dotR = action['gamma'] * I - S += dotS * dt - I += dotI * dt - R += dotR * dt + S += dotS * dt + I += dotI * dt + R += dotR * dt - print('%d: S=(%s/d%s), dotI=(%s/d%s), dotR=(%s/d%s)' % - (idx, S, dotS, I, dotI, R, dotR)) + print('%d: S=(%s/d%s), dotI=(%s/d%s), dotR=(%s/d%s)' % + (idx, S, dotS, I, dotI, R, dotR)) - # data_structures.log_var('S', S, sight) - # data_structures.log_var('I', I, sight) - # data_structures.log_var('R', R, sight) - hist.append([S, I, R]) - data_structures.log_var('time series', - pd.DataFrame(hist, columns=['S', 'I', 'R']), sight) - decision.decision_outcome('out', - sight, - reward=R, - outcome={ - 'S': S, - 'I': I, - 'R': R - }) + # data_structures.log_var('S', S, sight) + # data_structures.log_var('I', I, sight) + # data_structures.log_var('R', R, sight) + hist.append([S, I, R]) + data_structures.log_var('time series', + pd.DataFrame(hist, columns=['S', 'I', 'R']), sight) + decision.decision_outcome('out', + sight, + reward=R, + outcome={ + 'S': S, + 'I': I, + 'R': R + }) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') - with Sight( - sight_pb2.Params( - label='SIR', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - )) as sight: - decision.run( - driver_fn=driver, - description=''' + with Sight( + sight_pb2.Params( + label='SIR', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + )) as sight: + decision.run( + driver_fn=driver, + description=''' I am building an SIR model to analyze the progress of Measles infections in Los Angeles during the summer of 2020. I need to configure this model's parameters based on data from the Los Angeles County Department of Public Health. ''', - state_attrs={}, - action_attrs={ - 'population': + state_attrs={}, + action_attrs={ + 'population': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=_MAX_POP.value, @@ -106,7 +106,7 @@ def main(argv: Sequence[str]) -> None: discrete_prob_dist=sight_pb2.DiscreteProbDist( uniform=sight_pb2.DiscreteProbDist.Uniform( min_val=0, max_val=_MAX_POP.value))), - 'num_days': + 'num_days': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=_MAX_DAYS.value, @@ -115,7 +115,7 @@ def main(argv: Sequence[str]) -> None: discrete_prob_dist=sight_pb2.DiscreteProbDist( uniform=sight_pb2.DiscreteProbDist.Uniform( min_val=0, max_val=_MAX_DAYS.value))), - 'beta': + 'beta': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=.2, @@ -123,7 +123,7 @@ def main(argv: Sequence[str]) -> None: continuous_prob_dist=sight_pb2.ContinuousProbDist( uniform=sight_pb2.ContinuousProbDist.Uniform( min_val=0, max_val=.2))), - 'gamma': + 'gamma': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=.2, @@ -131,33 +131,33 @@ def main(argv: Sequence[str]) -> None: continuous_prob_dist=sight_pb2.ContinuousProbDist( uniform=sight_pb2.ContinuousProbDist.Uniform( min_val=0, max_val=.2))), - }, - outcome_attrs={ - 'S': + }, + outcome_attrs={ + 'S': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=_MAX_POP.value, description= 'The number of people who are susceptible to the disease.', ), - 'I': + 'I': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=_MAX_POP.value, description= 'The number of people who are infected by the disease.', ), - 'R': + 'R': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=_MAX_POP.value, description= 'The number of people who have recovered from the disease.', ), - }, - sight=sight, - ) + }, + sight=sight, + ) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/demo/spawn_workers.py b/py/sight/demo/spawn_workers.py index 5ab5710..73dc8fe 100644 --- a/py/sight/demo/spawn_workers.py +++ b/py/sight/demo/spawn_workers.py @@ -13,10 +13,10 @@ # limitations under the License. """Binary to spawn multiple workers with given file.""" -import os +from datetime import datetime import math +import os import subprocess -from datetime import datetime from typing import Any, Callable, Dict, Optional, Sequence, Text, Tuple from absl import app @@ -30,31 +30,31 @@ def get_sight_instance(): - print('creating sight object') - params = sight_pb2.Params( - label='original_demo', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - ) - sight_obj = Sight(params) - return sight_obj + print('creating sight object') + params = sight_pb2.Params( + label='original_demo', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + ) + sight_obj = Sight(params) + return sight_obj def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") - - with get_sight_instance() as sight: - trials.start_jobs( - num_train_workers=1, - binary_path='py/sight/demo/demo.py', - optimizer_type='worklist_scheduler', - docker_image='gcr.io/cameltrain/sight-portfolio-worker', - decision_mode='train', - deployment_mode='worker_mode', - worker_mode='dsub_cloud_worker', - sight=sight, - ) + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + + with get_sight_instance() as sight: + trials.start_jobs( + num_train_workers=1, + binary_path='py/sight/demo/demo.py', + optimizer_type='worklist_scheduler', + docker_image='gcr.io/cameltrain/sight-portfolio-worker', + decision_mode='train', + deployment_mode='worker_mode', + worker_mode='dsub_cloud_worker', + sight=sight, + ) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/demo/stream.py b/py/sight/demo/stream.py index 6294598..e326ea2 100644 --- a/py/sight/demo/stream.py +++ b/py/sight/demo/stream.py @@ -11,13 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of an application that streams its output over time.""" +import math import os +import time + from absl import app from absl import flags -import math import numpy as np import pandas as pd from sight import data_structures @@ -25,27 +26,30 @@ from sight.block import Block from sight.proto import sight_pb2 from sight.sight import Sight -import time FLAGS = flags.FLAGS + def main(argv): if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") - with Sight(sight_pb2.Params( - label='demo file', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - )) as sight: + with Sight( + sight_pb2.Params( + label='demo file', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + )) as sight: for ts in range(1000): print('ts=%s' % ts) with Block("Time step", '%08d' % ts, sight): sight.text('Time!') - data_structures.log_var('state', - pd.DataFrame( - {'Val': [math.sin(math.radians(ts + i)) for i in range(100)]}), - sight) + data_structures.log_var( + 'state', + pd.DataFrame( + {'Val': [math.sin(math.radians(ts + i)) for i in range(100)]}), + sight) time.sleep(5) + if __name__ == "__main__": app.run(main) diff --git a/py/sight/demo/sweetness.py b/py/sight/demo/sweetness.py index 145055f..2d3c812 100644 --- a/py/sight/demo/sweetness.py +++ b/py/sight/demo/sweetness.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo of using the Sight Decision API to train sweetness controller.""" import warnings @@ -35,6 +34,7 @@ def warn(*args, **kwargs): FLAGS = flags.FLAGS + def driver(sight: Sight) -> None: """Executes the logic of searching for a value. @@ -47,19 +47,17 @@ def driver(sight: Sight) -> None: for _ in range(1): choice = decision.decision_point("candy", sight) - sight.text( - "sweet_tooth=%s, choice=%s, joy=%s" - % ( - sweet_tooth, - choice["sweetness"], - float(choice["sweetness"]) * sweet_tooth, - ) - ) + sight.text("sweet_tooth=%s, choice=%s, joy=%s" % ( + sweet_tooth, + choice["sweetness"], + float(choice["sweetness"]) * sweet_tooth, + )) reward = float(choice["sweetness"]) * sweet_tooth decision.decision_outcome("joy", sight, reward) + def get_sight_instance(): params = sight_pb2.Params( label='sweetness_experiment', @@ -68,6 +66,7 @@ def get_sight_instance(): sight_obj = Sight(params) return sight_obj + def main(argv: Sequence[str]) -> None: if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") @@ -76,18 +75,20 @@ def main(argv: Sequence[str]) -> None: decision.run( driver_fn=driver, state_attrs={ - "sweet_tooth": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, - max_value=10, - step_size=1, - ), + "sweet_tooth": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=10, + step_size=1, + ), }, action_attrs={ - "sweetness": sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=0, - max_value=3, - step_size=1, - ), + "sweetness": + sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=0, + max_value=3, + step_size=1, + ), }, sight=sight, ) diff --git a/py/sight/demo/test_rpc.py b/py/sight/demo/test_rpc.py index 2125ec8..f00c28b 100644 --- a/py/sight/demo/test_rpc.py +++ b/py/sight/demo/test_rpc.py @@ -1,24 +1,20 @@ -from absl import app from typing import Sequence -from sight_service.proto import service_pb2 +from absl import app from sight import service_utils as service from sight.widgets.decision import decision - - +from sight_service.proto import service_pb2 def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - req = service_pb2.TestRequest() - response = service.call( - lambda s, meta: s.Test(req, 300, metadata=meta) - ) + req = service_pb2.TestRequest() + response = service.call(lambda s, meta: s.Test(req, 300, metadata=meta)) - print(response) + print(response) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/demo/volterra_lotka.py b/py/sight/demo/volterra_lotka.py index 88756be..9c53a67 100644 --- a/py/sight/demo/volterra_lotka.py +++ b/py/sight/demo/volterra_lotka.py @@ -13,21 +13,21 @@ # limitations under the License. """Simulation of the Lotka-Volterra equations using Sight.""" +import math +# from sight.widgets.simulation.simulation import Simulation +# from sight.widgets.simulation.simulation_state import SimulationState +# from sight.widgets.simulation.simulation_time_step import SimulationTimeStep +import os from typing import Dict, Sequence from absl import app from absl import flags from helpers.logs.logs_handler import logger as logging -import math import numpy as np from sight import data_structures from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision -# from sight.widgets.simulation.simulation import Simulation -# from sight.widgets.simulation.simulation_state import SimulationState -# from sight.widgets.simulation.simulation_time_step import SimulationTimeStep -import os _LAST_TS = flags.DEFINE_integer('last_ts', 10, 'The final day of the simulation.') @@ -45,92 +45,92 @@ def default_params() -> Dict[str, float]: - """Returns the run's default configuration parameters. + """Returns the run's default configuration parameters. These are used if the Decision API doesn't set them to something else while searching for a good configuration. """ - return { - 'R0': _R0.value, - 'F0': _F0.value, - 'alpha': _ALPHA.value, - 'beta': _BETA.value, - 'gamma': _GAMMA.value, - 'delta': _DELTA.value, - } + return { + 'R0': _R0.value, + 'F0': _F0.value, + 'alpha': _ALPHA.value, + 'beta': _BETA.value, + 'gamma': _GAMMA.value, + 'delta': _DELTA.value, + } def driver(sight: Sight) -> None: - """Solves Lotka-Volterra equations using explicit Euler method.""" - steps = np.linspace(0, _LAST_TS.value, _NUM_ITERS.value) - # logging.info('steps=%s', steps) - data_structures.log_var('R', 0, sight) - data_structures.log_var('F', 0, sight) - action = decision.decision_point('init', sight) #, default_params) - logging.info('action=%s', action) - print(len(steps)) - for idx in range(len(steps) - 1): - # with SimulationTimeStep( - # time_step_index=[idx], - # time_step=steps[idx], - # time_step_size=_LAST_TS.value / _NUM_ITERS.value, - # time_step_units=sight_pb2.SimulationTimeStepStart.TSU_UNKNOWN, - # sight=sight, - # ): - - if idx == 0: - r = action['R0'] - f = action['F0'] - alpha = action['alpha'] - beta = action['beta'] - gamma = action['gamma'] - delta = action['delta'] - - dt = steps[idx + 1] - steps[idx] - last_r = r - # logging.info('%s: dt=%s', idx, dt) - r = r * (1 + alpha * dt - gamma * dt * f) - f = f * (1 - beta * dt + delta * dt * last_r) - # logging.info('%s: r=%s, f=%s', idx, r, f) - - logging.info('r=%s', r) - if math.isinf(r): - decision.decision_outcome('prey_pop', -1000, sight) - else: - decision.decision_outcome('prey_pop', - r if r < 100 else 100 - 3 * (r - 100), sight) - - # with SimulationState({}, sight): - # data_structures.log_var('R', r, sight) - # data_structures.log_var('F', f, sight) + """Solves Lotka-Volterra equations using explicit Euler method.""" + steps = np.linspace(0, _LAST_TS.value, _NUM_ITERS.value) + # logging.info('steps=%s', steps) + data_structures.log_var('R', 0, sight) + data_structures.log_var('F', 0, sight) + action = decision.decision_point('init', sight) #, default_params) + logging.info('action=%s', action) + print(len(steps)) + for idx in range(len(steps) - 1): + # with SimulationTimeStep( + # time_step_index=[idx], + # time_step=steps[idx], + # time_step_size=_LAST_TS.value / _NUM_ITERS.value, + # time_step_units=sight_pb2.SimulationTimeStepStart.TSU_UNKNOWN, + # sight=sight, + # ): + + if idx == 0: + r = action['R0'] + f = action['F0'] + alpha = action['alpha'] + beta = action['beta'] + gamma = action['gamma'] + delta = action['delta'] + + dt = steps[idx + 1] - steps[idx] + last_r = r + # logging.info('%s: dt=%s', idx, dt) + r = r * (1 + alpha * dt - gamma * dt * f) + f = f * (1 - beta * dt + delta * dt * last_r) + # logging.info('%s: r=%s, f=%s', idx, r, f) + + logging.info('r=%s', r) + if math.isinf(r): + decision.decision_outcome('prey_pop', -1000, sight) + else: + decision.decision_outcome('prey_pop', r if r < 100 else 100 - 3 * (r - 100), + sight) + + # with SimulationState({}, sight): + # data_structures.log_var('R', r, sight) + # data_structures.log_var('F', f, sight) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - with Sight( - sight_pb2.Params( - label='Volterra-Lotka', - bucket_name=f'{os.environ["PROJECT_ID"]}-sight', - text_output=True, - )) as sight: - # Simulation.run_decision_configuration( - # label='Volterra-Lotka', - # parameters={ - # 'LAST_TS': _LAST_TS.value, - # '_NUM_ITERS': _NUM_ITERS.value, - # 'R0': _R0.value, - # 'F0': _F0.value, - # 'alpha': _ALPHA.value, - # 'beta': _BETA.value, - # 'gamma': _GAMMA.value, - # 'delta': _DELTA.value, - # }, - # reference_trace_file_path=flags.FLAGS.reference_run_file, - decision.run( - driver_fn=driver, - description=''' + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + with Sight( + sight_pb2.Params( + label='Volterra-Lotka', + bucket_name=f'{os.environ["PROJECT_ID"]}-sight', + text_output=True, + )) as sight: + # Simulation.run_decision_configuration( + # label='Volterra-Lotka', + # parameters={ + # 'LAST_TS': _LAST_TS.value, + # '_NUM_ITERS': _NUM_ITERS.value, + # 'R0': _R0.value, + # 'F0': _F0.value, + # 'alpha': _ALPHA.value, + # 'beta': _BETA.value, + # 'gamma': _GAMMA.value, + # 'delta': _DELTA.value, + # }, + # reference_trace_file_path=flags.FLAGS.reference_run_file, + decision.run( + driver_fn=driver, + description=''' The Lotka-Volterra equations, also known as the Lotka-Volterra predator-prey model, are a pair of first-order nonlinear differential equations, frequently used to describe the dynamics of biological systems in which two species interact, one as a predator and the other as prey. The prey are assumed to have an unlimited food supply and to reproduce exponentially, unless subject to predation; this exponential growth is represented in the equation above by the term αx. The rate of predation on the prey is assumed to be proportional to the rate at which the predators and the prey meet; this is represented above by βxy. If either x or y is zero, then there can be no predation. With these two terms the prey equation above can be interpreted as follows: the rate of change of the prey's population is given by its own growth rate minus the rate at which it is preyed upon. @@ -144,66 +144,65 @@ def main(argv: Sequence[str]) -> None: Predators have limitless appetite. Both populations can be described by a single variable. This amounts to assuming that the populations do not have a spatial or age distribution that contributes to the dynamics. ''', - state_attrs={ - 'R': + state_attrs={ + 'R': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=100, - description='The number of prey animals in the population' - ), - 'F': + description='The number of prey animals in the population'), + 'F': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=100, description= 'The number of predator animals in the population'), - }, - action_attrs={ - 'R0': + }, + action_attrs={ + 'R0': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=20, description= 'The number of predator animals in the population at the start of the simulation.' ), - 'F0': + 'F0': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=20, description= 'The number of prey animals in the population at the start of the simulation.' ), - 'alpha': + 'alpha': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=20, description='The growth rate of the prey.', ), - 'beta': + 'beta': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=20, description= 'The effect of the presence of predators on the prey growth rate, for example by predator eating the prey.' ), - 'gamma': + 'gamma': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=20, description= 'The death rate of the predators independent of the prey.', ), - 'delta': + 'delta': sight_pb2.DecisionConfigurationStart.AttrProps( min_value=0, max_value=20, description= 'The effect of the presence of prey on the predator\'s growth rate, for example how the predator eating the prey affects the predator population.', ), - }, - sight=sight, - ) + }, + sight=sight, + ) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/exception.py b/py/sight/exception.py index f6549b1..c9a011e 100644 --- a/py/sight/exception.py +++ b/py/sight/exception.py @@ -18,7 +18,7 @@ def exception(exc_type, value, traceback, sight, frame): - """Documents an exception events in the Sight log if Sight is being used. + """Documents an exception events in the Sight log if Sight is being used. Args: exc_type: The exc_type of the exception that was thrown @@ -28,15 +28,15 @@ def exception(exc_type, value, traceback, sight, frame): not being used. frame: The call stack frame that contains the calling context information. """ - logging.exception( - 'Exception: exc_type=%s, value=%s, traceback=%s', - str(exc_type), - str(value), - str(traceback), - ) - if sight is not None: - sight.enter_block('Exception', sight_pb2.Object(), frame=frame) - sight.text_block('exc_type', str(exc_type), frame=frame) - sight.text_block('value', str(value), frame=frame) - sight.text_block('traceback', str(traceback), frame=frame) - sight.exit_block('Exception', sight_pb2.Object(), frame=frame) + logging.exception( + 'Exception: exc_type=%s, value=%s, traceback=%s', + str(exc_type), + str(value), + str(traceback), + ) + if sight is not None: + sight.enter_block('Exception', sight_pb2.Object(), frame=frame) + sight.text_block('exc_type', str(exc_type), frame=frame) + sight.text_block('value', str(value), frame=frame) + sight.text_block('traceback', str(traceback), frame=frame) + sight.exit_block('Exception', sight_pb2.Object(), frame=frame) diff --git a/py/sight/gcs_utils.py b/py/sight/gcs_utils.py index d9eebb6..edd739c 100644 --- a/py/sight/gcs_utils.py +++ b/py/sight/gcs_utils.py @@ -16,14 +16,14 @@ import os import subprocess -from helpers.logs.logs_handler import logger as logging from google.cloud import bigquery from google.cloud import storage +from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 def upload_blob_from_stream(bucket_name, gcp_path, file_obj, file_name, count): - """uploads given file to the bucket. + """uploads given file to the bucket. Args: bucket_name: name of the bucket to store the file @@ -32,20 +32,20 @@ def upload_blob_from_stream(bucket_name, gcp_path, file_obj, file_name, count): file_name: name given to file count: chunk number of file """ - storage_client = storage.Client() - bucket = storage_client.bucket(bucket_name) - if not bucket.exists(): - # logging.info(f"creating bucket {bucket_name}, as it didn't exist....") - bucket = storage_client.create_bucket(bucket_name) - - blob_name = gcp_path + '/' + file_name + '_' + str(count) + '.avro' - blob = bucket.blob(blob_name) - # Rewind the stream to the beginning. This step can be omitted if the input - # stream will always be at a correct position. - file_obj.seek(0) - # Upload data from the stream to your bucket. - blob.upload_from_file(file_obj) - # logging.info(f'Stream data uploaded to {blob_name} in bucket {bucket_name}.') + storage_client = storage.Client() + bucket = storage_client.bucket(bucket_name) + if not bucket.exists(): + # logging.info(f"creating bucket {bucket_name}, as it didn't exist....") + bucket = storage_client.create_bucket(bucket_name) + + blob_name = gcp_path + '/' + file_name + '_' + str(count) + '.avro' + blob = bucket.blob(blob_name) + # Rewind the stream to the beginning. This step can be omitted if the input + # stream will always be at a correct position. + file_obj.seek(0) + # Upload data from the stream to your bucket. + blob.upload_from_file(file_obj) + # logging.info(f'Stream data uploaded to {blob_name} in bucket {bucket_name}.') def create_table( @@ -55,7 +55,7 @@ def create_table( external_file_format, external_file_uri, ): - """Create BigQuery external table mapping to file in GCS bucket. + """Create BigQuery external table mapping to file in GCS bucket. Args: project_id: GCP projectId. @@ -69,56 +69,56 @@ def create_table( Returns: """ - try: - # Check if the dataset exists - client = bigquery.Client(project_id) - dataset = client.get_dataset(dataset_name) - # logging.info(f"Dataset {dataset_name} already exists.") - except Exception as e: - # If the dataset does not exist, create a new dataset - dataset = bigquery.Dataset(f"{project_id}.{dataset_name}") - dataset = client.create_dataset(dataset) - # logging.info(f"Dataset {dataset_name} created.") - - # logging.info( - # 'Creating external table %s mapping to : %s.', - # table_name, - # external_file_uri, - # ) - try: - client = bigquery.Client(project_id) - dataset_ref = client.dataset(dataset_name) - table_ref = bigquery.TableReference(dataset_ref, table_name) - table = bigquery.Table(table_ref) - - external_config = bigquery.ExternalConfig(external_file_format) - external_config.source_uris = [external_file_uri] - table.external_data_configuration = external_config - client.create_table(table) - # logging.info('%s table successfully created.', table_name) - except Exception as e: - logging.info(f"Error creating table: {e}") + try: + # Check if the dataset exists + client = bigquery.Client(project_id) + dataset = client.get_dataset(dataset_name) + # logging.info(f"Dataset {dataset_name} already exists.") + except Exception as e: + # If the dataset does not exist, create a new dataset + dataset = bigquery.Dataset(f"{project_id}.{dataset_name}") + dataset = client.create_dataset(dataset) + # logging.info(f"Dataset {dataset_name} created.") + + # logging.info( + # 'Creating external table %s mapping to : %s.', + # table_name, + # external_file_uri, + # ) + try: + client = bigquery.Client(project_id) + dataset_ref = client.dataset(dataset_name) + table_ref = bigquery.TableReference(dataset_ref, table_name) + table = bigquery.Table(table_ref) + + external_config = bigquery.ExternalConfig(external_file_format) + external_config.source_uris = [external_file_uri] + table.external_data_configuration = external_config + client.create_table(table) + # logging.info('%s table successfully created.', table_name) + except Exception as e: + logging.info(f"Error creating table: {e}") def create_external_bq_table(params: sight_pb2.Params, file_name: str, client_id: int): - """create external table in BigQuery from avro files using URI, located in the bucket. + """create external table in BigQuery from avro files using URI, located in the bucket. Args: params: sight parameters to get details of the files file_name: name of the file client_id: sight client id """ - external_file_uri = ( - params.external_file_uri + params.bucket_name + '/' + params.gcp_path + - '/' - # + '/client_' - + params.label + '_' + str(client_id) + '/' + '*' + params.file_format) - if 'PARENT_LOG_ID' not in os.environ: - create_table( - os.environ["PROJECT_ID"], - params.dataset_name, - file_name, - params.external_file_format, - external_file_uri, - ) + external_file_uri = ( + params.external_file_uri + params.bucket_name + '/' + params.gcp_path + + '/' + # + '/client_' + + params.label + '_' + str(client_id) + '/' + '*' + params.file_format) + if 'PARENT_LOG_ID' not in os.environ: + create_table( + os.environ["PROJECT_ID"], + params.dataset_name, + file_name, + params.external_file_format, + external_file_uri, + ) diff --git a/py/sight/location.py b/py/sight/location.py index ec4b44e..bada768 100644 --- a/py/sight/location.py +++ b/py/sight/location.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Unique IDs for locations within the Sight log.""" @@ -21,7 +20,7 @@ class Location(object): def __init__(self): self.id = [0] - def clone(self):# -> Location: + def clone(self): # -> Location: new_loc = Location() new_loc.id = list(self.id) return new_loc diff --git a/py/sight/location_test.py b/py/sight/location_test.py index 4454333..749c931 100644 --- a/py/sight/location_test.py +++ b/py/sight/location_test.py @@ -11,11 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Tests for google3.googlex.cortex.sight.py.location.""" -from sight import Location from absl.testing import absltest +from sight import Location class LocationTest(absltest.TestCase): @@ -56,9 +55,8 @@ def testEnterExitLocation(self): self.assertEqual(loc.pos(), i) # ASSERT - self.assertEqual( - str(loc), '0000000000:0000000001:0000000002:0000000003:0000000004' - ) + self.assertEqual(str(loc), + '0000000000:0000000001:0000000002:0000000003:0000000004') for i in range(4, 0, -1): # ACT @@ -85,9 +83,8 @@ def testNextLocation(self): self.assertEqual(loc.pos(), i + j) # ASSERT - self.assertEqual( - str(loc), '0000000000:0000000005:0000000006:0000000007:0000000008' - ) + self.assertEqual(str(loc), + '0000000000:0000000005:0000000006:0000000007:0000000008') def testNextAllLocation(self): # SETUP @@ -100,9 +97,8 @@ def testNextAllLocation(self): loc.next_all() # ASSERT - self.assertEqual( - str(loc), '0000000001:0000000002:0000000003:0000000004:0000000005' - ) + self.assertEqual(str(loc), + '0000000001:0000000002:0000000003:0000000004:0000000005') for i in range(4, 0, -1): # ACT diff --git a/py/sight/proto/example_pb2.py b/py/sight/proto/example_pb2.py index 77e646c..5697c86 100644 --- a/py/sight/proto/example_pb2.py +++ b/py/sight/proto/example_pb2.py @@ -2,28 +2,30 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: sight/proto/example.proto """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - from sight.proto import feature_pb2 as sight_dot_proto_dot_feature__pb2 - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19sight/proto/example.proto\x12\rsight.x.proto\x1a\x19sight/proto/feature.proto\"4\n\x07\x45xample\x12)\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x17.sight.x.proto.Features\"o\n\x0fSequenceExample\x12(\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x17.sight.x.proto.Features\x12\x32\n\rfeature_lists\x18\x02 \x01(\x0b\x32\x1b.sight.x.proto.FeatureListsB\x81\x01\n\x16org.tensorflow.exampleB\rExampleProtosP\x01ZSgithub.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto\xf8\x01\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x19sight/proto/example.proto\x12\rsight.x.proto\x1a\x19sight/proto/feature.proto\"4\n\x07\x45xample\x12)\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x17.sight.x.proto.Features\"o\n\x0fSequenceExample\x12(\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x17.sight.x.proto.Features\x12\x32\n\rfeature_lists\x18\x02 \x01(\x0b\x32\x1b.sight.x.proto.FeatureListsB\x81\x01\n\x16org.tensorflow.exampleB\rExampleProtosP\x01ZSgithub.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto\xf8\x01\x01\x62\x06proto3' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight.proto.example_pb2', globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight.proto.example_pb2', + globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\026org.tensorflow.exampleB\rExampleProtosP\001ZSgithub.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto\370\001\001' - _EXAMPLE._serialized_start=71 - _EXAMPLE._serialized_end=123 - _SEQUENCEEXAMPLE._serialized_start=125 - _SEQUENCEEXAMPLE._serialized_end=236 + _EXAMPLE._serialized_start = 71 + _EXAMPLE._serialized_end = 123 + _SEQUENCEEXAMPLE._serialized_start = 125 + _SEQUENCEEXAMPLE._serialized_end = 236 # @@protoc_insertion_point(module_scope) diff --git a/py/sight/proto/feature_pb2.py b/py/sight/proto/feature_pb2.py index 9520dd7..50670f3 100644 --- a/py/sight/proto/feature_pb2.py +++ b/py/sight/proto/feature_pb2.py @@ -2,21 +2,22 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: sight/proto/feature.proto """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19sight/proto/feature.proto\x12\rsight.x.proto\"\x1a\n\tBytesList\x12\r\n\x05value\x18\x01 \x03(\x0c\"\x1e\n\tFloatList\x12\x11\n\x05value\x18\x01 \x03(\x02\x42\x02\x10\x01\"\x1e\n\tInt64List\x12\x11\n\x05value\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xa1\x01\n\x07\x46\x65\x61ture\x12.\n\nbytes_list\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.BytesListH\x00\x12.\n\nfloat_list\x18\x02 \x01(\x0b\x32\x18.sight.x.proto.FloatListH\x00\x12.\n\nint64_list\x18\x03 \x01(\x0b\x32\x18.sight.x.proto.Int64ListH\x00\x42\x06\n\x04kind\"\x89\x01\n\x08\x46\x65\x61tures\x12\x35\n\x07\x66\x65\x61ture\x18\x01 \x03(\x0b\x32$.sight.x.proto.Features.FeatureEntry\x1a\x46\n\x0c\x46\x65\x61tureEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.sight.x.proto.Feature:\x02\x38\x01\"6\n\x0b\x46\x65\x61tureList\x12\'\n\x07\x66\x65\x61ture\x18\x01 \x03(\x0b\x32\x16.sight.x.proto.Feature\"\xa2\x01\n\x0c\x46\x65\x61tureLists\x12\x42\n\x0c\x66\x65\x61ture_list\x18\x01 \x03(\x0b\x32,.sight.x.proto.FeatureLists.FeatureListEntry\x1aN\n\x10\x46\x65\x61tureListEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.sight.x.proto.FeatureList:\x02\x38\x01\x42\x81\x01\n\x16org.tensorflow.exampleB\rFeatureProtosP\x01ZSgithub.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto\xf8\x01\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x19sight/proto/feature.proto\x12\rsight.x.proto\"\x1a\n\tBytesList\x12\r\n\x05value\x18\x01 \x03(\x0c\"\x1e\n\tFloatList\x12\x11\n\x05value\x18\x01 \x03(\x02\x42\x02\x10\x01\"\x1e\n\tInt64List\x12\x11\n\x05value\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xa1\x01\n\x07\x46\x65\x61ture\x12.\n\nbytes_list\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.BytesListH\x00\x12.\n\nfloat_list\x18\x02 \x01(\x0b\x32\x18.sight.x.proto.FloatListH\x00\x12.\n\nint64_list\x18\x03 \x01(\x0b\x32\x18.sight.x.proto.Int64ListH\x00\x42\x06\n\x04kind\"\x89\x01\n\x08\x46\x65\x61tures\x12\x35\n\x07\x66\x65\x61ture\x18\x01 \x03(\x0b\x32$.sight.x.proto.Features.FeatureEntry\x1a\x46\n\x0c\x46\x65\x61tureEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.sight.x.proto.Feature:\x02\x38\x01\"6\n\x0b\x46\x65\x61tureList\x12\'\n\x07\x66\x65\x61ture\x18\x01 \x03(\x0b\x32\x16.sight.x.proto.Feature\"\xa2\x01\n\x0c\x46\x65\x61tureLists\x12\x42\n\x0c\x66\x65\x61ture_list\x18\x01 \x03(\x0b\x32,.sight.x.proto.FeatureLists.FeatureListEntry\x1aN\n\x10\x46\x65\x61tureListEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.sight.x.proto.FeatureList:\x02\x38\x01\x42\x81\x01\n\x16org.tensorflow.exampleB\rFeatureProtosP\x01ZSgithub.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto\xf8\x01\x01\x62\x06proto3' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight.proto.feature_pb2', globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight.proto.feature_pb2', + globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None @@ -29,22 +30,22 @@ _FEATURES_FEATUREENTRY._serialized_options = b'8\001' _FEATURELISTS_FEATURELISTENTRY._options = None _FEATURELISTS_FEATURELISTENTRY._serialized_options = b'8\001' - _BYTESLIST._serialized_start=44 - _BYTESLIST._serialized_end=70 - _FLOATLIST._serialized_start=72 - _FLOATLIST._serialized_end=102 - _INT64LIST._serialized_start=104 - _INT64LIST._serialized_end=134 - _FEATURE._serialized_start=137 - _FEATURE._serialized_end=298 - _FEATURES._serialized_start=301 - _FEATURES._serialized_end=438 - _FEATURES_FEATUREENTRY._serialized_start=368 - _FEATURES_FEATUREENTRY._serialized_end=438 - _FEATURELIST._serialized_start=440 - _FEATURELIST._serialized_end=494 - _FEATURELISTS._serialized_start=497 - _FEATURELISTS._serialized_end=659 - _FEATURELISTS_FEATURELISTENTRY._serialized_start=581 - _FEATURELISTS_FEATURELISTENTRY._serialized_end=659 + _BYTESLIST._serialized_start = 44 + _BYTESLIST._serialized_end = 70 + _FLOATLIST._serialized_start = 72 + _FLOATLIST._serialized_end = 102 + _INT64LIST._serialized_start = 104 + _INT64LIST._serialized_end = 134 + _FEATURE._serialized_start = 137 + _FEATURE._serialized_end = 298 + _FEATURES._serialized_start = 301 + _FEATURES._serialized_end = 438 + _FEATURES_FEATUREENTRY._serialized_start = 368 + _FEATURES_FEATUREENTRY._serialized_end = 438 + _FEATURELIST._serialized_start = 440 + _FEATURELIST._serialized_end = 494 + _FEATURELISTS._serialized_start = 497 + _FEATURELISTS._serialized_end = 659 + _FEATURELISTS_FEATURELISTENTRY._serialized_start = 581 + _FEATURELISTS_FEATURELISTENTRY._serialized_end = 659 # @@protoc_insertion_point(module_scope) diff --git a/py/sight/proto/sight_pb2.py b/py/sight/proto/sight_pb2.py index 2935e48..def7fc7 100644 --- a/py/sight/proto/sight_pb2.py +++ b/py/sight/proto/sight_pb2.py @@ -2,23 +2,27 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: sight/proto/sight.proto """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - from sight.proto import example_pb2 as sight_dot_proto_dot_example__pb2 -from sight.proto.widgets.pipeline.flume import flume_pb2 as sight_dot_proto_dot_widgets_dot_pipeline_dot_flume_dot_flume__pb2 - +from sight.proto.widgets.pipeline.flume import ( + flume_pb2 as sight_dot_proto_dot_widgets_dot_pipeline_dot_flume_dot_flume__pb2 +) -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xa8\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\xce\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\xa4\x08\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\"\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\"\xb8\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"A\n\rDecisionParam\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xa8\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\xce\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\xa4\x08\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\"\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\"\xb8\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"A\n\rDecisionParam\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight.proto.sight_pb2', globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight.proto.sight_pb2', + globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None @@ -30,144 +34,144 @@ _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_options = b'8\001' _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._options = None _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_options = b'8\001' - _ATTRIBUTE._serialized_start=117 - _ATTRIBUTE._serialized_end=156 - _OBJECT._serialized_start=159 - _OBJECT._serialized_end=1744 - _OBJECT_METRICS._serialized_start=1264 - _OBJECT_METRICS._serialized_end=1352 - _OBJECT_ORDER._serialized_start=1354 - _OBJECT_ORDER._serialized_end=1383 - _OBJECT_SUBTYPE._serialized_start=1386 - _OBJECT_SUBTYPE._serialized_end=1724 - _PROPOSEACTION._serialized_start=1747 - _PROPOSEACTION._serialized_end=1883 - _CONFIGURATIONSTART._serialized_start=1886 - _CONFIGURATIONSTART._serialized_end=2122 - _CONFIGURATIONSTART_SUBTYPE._serialized_start=2046 - _CONFIGURATIONSTART_SUBTYPE._serialized_end=2102 - _EXCEPTION._serialized_start=2124 - _EXCEPTION._serialized_end=2183 - _TENSOR._serialized_start=2186 - _TENSOR._serialized_end=2913 - _TENSOR_STRINGVALUES._serialized_start=2652 - _TENSOR_STRINGVALUES._serialized_end=2681 - _TENSOR_BYTESVALUES._serialized_start=2683 - _TENSOR_BYTESVALUES._serialized_end=2711 - _TENSOR_INT64VALUES._serialized_start=2713 - _TENSOR_INT64VALUES._serialized_end=2741 - _TENSOR_DOUBLEVALUES._serialized_start=2743 - _TENSOR_DOUBLEVALUES._serialized_end=2772 - _TENSOR_BOOLVALUES._serialized_start=2774 - _TENSOR_BOOLVALUES._serialized_end=2801 - _TENSOR_SUBTYPE._serialized_start=2803 - _TENSOR_SUBTYPE._serialized_end=2899 - _LINK._serialized_start=2916 - _LINK._serialized_end=3072 - _LINK_LINKTYPE._serialized_start=2998 - _LINK_LINKTYPE._serialized_end=3072 - _TENSORFLOWEXAMPLE._serialized_start=3075 - _TENSORFLOWEXAMPLE._serialized_end=3345 - _LOG._serialized_start=3347 - _LOG._serialized_end=3388 - _TEXT._serialized_start=3390 - _TEXT._serialized_end=3510 - _TEXT_SUBTYPE._serialized_start=3459 - _TEXT_SUBTYPE._serialized_end=3510 - _VALUE._serialized_start=3513 - _VALUE._serialized_end=3885 - _VALUE_SUBTYPE._serialized_start=3749 - _VALUE_SUBTYPE._serialized_end=3871 - _BLOCKSTART._serialized_start=3888 - _BLOCKSTART._serialized_end=5208 - _BLOCKSTART_SUBTYPE._serialized_start=4726 - _BLOCKSTART_SUBTYPE._serialized_end=5188 - _BLOCKEND._serialized_start=5211 - _BLOCKEND._serialized_end=6271 - _BLOCKEND_METRICS._serialized_start=5774 - _BLOCKEND_METRICS._serialized_end=5808 - _BLOCKEND_SUBTYPE._serialized_start=5811 - _BLOCKEND_SUBTYPE._serialized_end=6251 - _LISTSTART._serialized_start=6274 - _LISTSTART._serialized_end=6449 - _LISTSTART_SUBTYPE._serialized_start=6339 - _LISTSTART_SUBTYPE._serialized_end=6449 - _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_start=6451 - _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_end=6525 - _ATTRIBUTESTART._serialized_start=6527 - _ATTRIBUTESTART._serialized_end=6588 - _ATTRIBUTEEND._serialized_start=6590 - _ATTRIBUTEEND._serialized_end=6617 - _PARAMS._serialized_start=6620 - _PARAMS._serialized_end=7054 - _SIMULATIONSTART._serialized_start=7056 - _SIMULATIONSTART._serialized_end=7073 - _SIMULATIONPARAMETERSSTART._serialized_start=7075 - _SIMULATIONPARAMETERSSTART._serialized_end=7102 - _SIMULATIONTIMESTEPSTART._serialized_start=7105 - _SIMULATIONTIMESTEPSTART._serialized_end=7417 - _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_start=7280 - _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_end=7417 - _CONTINUOUSPROBDIST._serialized_start=7420 - _CONTINUOUSPROBDIST._serialized_end=7660 - _CONTINUOUSPROBDIST_GAUSSIAN._serialized_start=7568 - _CONTINUOUSPROBDIST_GAUSSIAN._serialized_end=7607 - _CONTINUOUSPROBDIST_UNIFORM._serialized_start=7609 - _CONTINUOUSPROBDIST_UNIFORM._serialized_end=7652 - _DISCRETEPROBDIST._serialized_start=7663 - _DISCRETEPROBDIST._serialized_end=7794 - _DISCRETEPROBDIST_UNIFORM._serialized_start=7743 - _DISCRETEPROBDIST_UNIFORM._serialized_end=7786 - _DECISIONCONFIGURATIONSTART._serialized_start=7797 - _DECISIONCONFIGURATIONSTART._serialized_end=11419 - _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_start=8258 - _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_end=8272 - _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_start=8275 - _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_end=8516 - _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_start=8374 - _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_end=8516 - _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_start=8518 - _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_end=8571 - _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_start=8573 - _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_end=8597 - _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_start=8600 - _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_end=8963 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_start=8794 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_end=8881 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_start=8883 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_end=8963 - _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_start=8965 - _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_end=8984 - _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_start=8986 - _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_end=9013 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_start=9016 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_end=9414 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_start=9133 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_end=9414 - _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_start=9416 - _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_end=9429 - _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_start=9431 - _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_end=9456 - _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_start=9459 - _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_end=10399 - _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_start=10401 - _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_end=10508 - _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_start=10511 - _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_end=10780 - _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_start=10782 - _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_end=10884 - _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_start=10886 - _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_end=10989 - _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_start=10991 - _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_end=11095 - _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_start=11098 - _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_end=11332 - _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_start=11334 - _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_end=11419 - _DECISIONPARAM._serialized_start=11421 - _DECISIONPARAM._serialized_end=11486 - _DECISIONPOINT._serialized_start=11489 - _DECISIONPOINT._serialized_end=11654 - _DECISIONOUTCOME._serialized_start=11657 - _DECISIONOUTCOME._serialized_end=11785 + _ATTRIBUTE._serialized_start = 117 + _ATTRIBUTE._serialized_end = 156 + _OBJECT._serialized_start = 159 + _OBJECT._serialized_end = 1744 + _OBJECT_METRICS._serialized_start = 1264 + _OBJECT_METRICS._serialized_end = 1352 + _OBJECT_ORDER._serialized_start = 1354 + _OBJECT_ORDER._serialized_end = 1383 + _OBJECT_SUBTYPE._serialized_start = 1386 + _OBJECT_SUBTYPE._serialized_end = 1724 + _PROPOSEACTION._serialized_start = 1747 + _PROPOSEACTION._serialized_end = 1883 + _CONFIGURATIONSTART._serialized_start = 1886 + _CONFIGURATIONSTART._serialized_end = 2122 + _CONFIGURATIONSTART_SUBTYPE._serialized_start = 2046 + _CONFIGURATIONSTART_SUBTYPE._serialized_end = 2102 + _EXCEPTION._serialized_start = 2124 + _EXCEPTION._serialized_end = 2183 + _TENSOR._serialized_start = 2186 + _TENSOR._serialized_end = 2913 + _TENSOR_STRINGVALUES._serialized_start = 2652 + _TENSOR_STRINGVALUES._serialized_end = 2681 + _TENSOR_BYTESVALUES._serialized_start = 2683 + _TENSOR_BYTESVALUES._serialized_end = 2711 + _TENSOR_INT64VALUES._serialized_start = 2713 + _TENSOR_INT64VALUES._serialized_end = 2741 + _TENSOR_DOUBLEVALUES._serialized_start = 2743 + _TENSOR_DOUBLEVALUES._serialized_end = 2772 + _TENSOR_BOOLVALUES._serialized_start = 2774 + _TENSOR_BOOLVALUES._serialized_end = 2801 + _TENSOR_SUBTYPE._serialized_start = 2803 + _TENSOR_SUBTYPE._serialized_end = 2899 + _LINK._serialized_start = 2916 + _LINK._serialized_end = 3072 + _LINK_LINKTYPE._serialized_start = 2998 + _LINK_LINKTYPE._serialized_end = 3072 + _TENSORFLOWEXAMPLE._serialized_start = 3075 + _TENSORFLOWEXAMPLE._serialized_end = 3345 + _LOG._serialized_start = 3347 + _LOG._serialized_end = 3388 + _TEXT._serialized_start = 3390 + _TEXT._serialized_end = 3510 + _TEXT_SUBTYPE._serialized_start = 3459 + _TEXT_SUBTYPE._serialized_end = 3510 + _VALUE._serialized_start = 3513 + _VALUE._serialized_end = 3885 + _VALUE_SUBTYPE._serialized_start = 3749 + _VALUE_SUBTYPE._serialized_end = 3871 + _BLOCKSTART._serialized_start = 3888 + _BLOCKSTART._serialized_end = 5208 + _BLOCKSTART_SUBTYPE._serialized_start = 4726 + _BLOCKSTART_SUBTYPE._serialized_end = 5188 + _BLOCKEND._serialized_start = 5211 + _BLOCKEND._serialized_end = 6271 + _BLOCKEND_METRICS._serialized_start = 5774 + _BLOCKEND_METRICS._serialized_end = 5808 + _BLOCKEND_SUBTYPE._serialized_start = 5811 + _BLOCKEND_SUBTYPE._serialized_end = 6251 + _LISTSTART._serialized_start = 6274 + _LISTSTART._serialized_end = 6449 + _LISTSTART_SUBTYPE._serialized_start = 6339 + _LISTSTART_SUBTYPE._serialized_end = 6449 + _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_start = 6451 + _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_end = 6525 + _ATTRIBUTESTART._serialized_start = 6527 + _ATTRIBUTESTART._serialized_end = 6588 + _ATTRIBUTEEND._serialized_start = 6590 + _ATTRIBUTEEND._serialized_end = 6617 + _PARAMS._serialized_start = 6620 + _PARAMS._serialized_end = 7054 + _SIMULATIONSTART._serialized_start = 7056 + _SIMULATIONSTART._serialized_end = 7073 + _SIMULATIONPARAMETERSSTART._serialized_start = 7075 + _SIMULATIONPARAMETERSSTART._serialized_end = 7102 + _SIMULATIONTIMESTEPSTART._serialized_start = 7105 + _SIMULATIONTIMESTEPSTART._serialized_end = 7417 + _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_start = 7280 + _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_end = 7417 + _CONTINUOUSPROBDIST._serialized_start = 7420 + _CONTINUOUSPROBDIST._serialized_end = 7660 + _CONTINUOUSPROBDIST_GAUSSIAN._serialized_start = 7568 + _CONTINUOUSPROBDIST_GAUSSIAN._serialized_end = 7607 + _CONTINUOUSPROBDIST_UNIFORM._serialized_start = 7609 + _CONTINUOUSPROBDIST_UNIFORM._serialized_end = 7652 + _DISCRETEPROBDIST._serialized_start = 7663 + _DISCRETEPROBDIST._serialized_end = 7794 + _DISCRETEPROBDIST_UNIFORM._serialized_start = 7743 + _DISCRETEPROBDIST_UNIFORM._serialized_end = 7786 + _DECISIONCONFIGURATIONSTART._serialized_start = 7797 + _DECISIONCONFIGURATIONSTART._serialized_end = 11419 + _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_start = 8258 + _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_end = 8272 + _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_start = 8275 + _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_end = 8516 + _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_start = 8374 + _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_end = 8516 + _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_start = 8518 + _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_end = 8571 + _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_start = 8573 + _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_end = 8597 + _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_start = 8600 + _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_end = 8963 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_start = 8794 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_end = 8881 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_start = 8883 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_end = 8963 + _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_start = 8965 + _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_end = 8984 + _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_start = 8986 + _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_end = 9013 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_start = 9016 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_end = 9414 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_start = 9133 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_end = 9414 + _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_start = 9416 + _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_end = 9429 + _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_start = 9431 + _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_end = 9456 + _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_start = 9459 + _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_end = 10399 + _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_start = 10401 + _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_end = 10508 + _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_start = 10511 + _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_end = 10780 + _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_start = 10782 + _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_end = 10884 + _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_start = 10886 + _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_end = 10989 + _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_start = 10991 + _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_end = 11095 + _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_start = 11098 + _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_end = 11332 + _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_start = 11334 + _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_end = 11419 + _DECISIONPARAM._serialized_start = 11421 + _DECISIONPARAM._serialized_end = 11486 + _DECISIONPOINT._serialized_start = 11489 + _DECISIONPOINT._serialized_end = 11654 + _DECISIONOUTCOME._serialized_start = 11657 + _DECISIONOUTCOME._serialized_end = 11785 # @@protoc_insertion_point(module_scope) diff --git a/py/sight/proto/widgets/pipeline/flume/flume_pb2.py b/py/sight/proto/widgets/pipeline/flume/flume_pb2.py index 408c3cc..4b284c2 100644 --- a/py/sight/proto/widgets/pipeline/flume/flume_pb2.py +++ b/py/sight/proto/widgets/pipeline/flume/flume_pb2.py @@ -2,42 +2,43 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: sight/proto/widgets/pipeline/flume/flume.proto """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.sight/proto/widgets/pipeline/flume/flume.proto\x12\x1bsight.x.widgets.flume.proto\" \n\x0f\x46lumeDoFnCreate\x12\r\n\x05label\x18\x01 \x01(\t\"\"\n\x11\x46lumeDoFnComplete\x12\r\n\x05label\x18\x01 \x01(\t\"Y\n\x10\x46lumeDoFnStartDo\x12\x16\n\x0einput_stage_id\x18\x01 \x01(\x03\x12\x15\n\rinput_item_id\x18\x02 \x01(\x03\x12\x16\n\x0eis_passthrough\x18\x03 \x01(\x08\"(\n\x0e\x46lumeDoFnEndDo\x12\x16\n\x0einput_stage_id\x18\x01 \x01(\x03\"2\n\rFlumeDoFnEmit\x12\x10\n\x08stage_id\x18\x01 \x01(\x03\x12\x0f\n\x07item_id\x18\x02 \x01(\x03\"<\n\x0b\x46lumeDepend\x12\x16\n\x0einput_stage_id\x18\x01 \x01(\x03\x12\x15\n\rinput_item_id\x18\x02 \x01(\x03\"%\n\x14\x46lumeCompareFnCreate\x12\r\n\x05label\x18\x01 \x01(\t\"\'\n\x16\x46lumeCompareFnComplete\x12\r\n\x05label\x18\x01 \x01(\t\"~\n\x1a\x46lumeCompareFnStartCompare\x12\x17\n\x0finput1_stage_id\x18\x01 \x01(\x03\x12\x16\n\x0einput1_item_id\x18\x02 \x01(\x03\x12\x17\n\x0finput2_stage_id\x18\x03 \x01(\x03\x12\x16\n\x0einput2_item_id\x18\x04 \x01(\x03\"2\n\x18\x46lumeCompareFnEndCompare\x12\x16\n\x0einput_stage_id\x18\x01 \x01(\x03\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n.sight/proto/widgets/pipeline/flume/flume.proto\x12\x1bsight.x.widgets.flume.proto\" \n\x0f\x46lumeDoFnCreate\x12\r\n\x05label\x18\x01 \x01(\t\"\"\n\x11\x46lumeDoFnComplete\x12\r\n\x05label\x18\x01 \x01(\t\"Y\n\x10\x46lumeDoFnStartDo\x12\x16\n\x0einput_stage_id\x18\x01 \x01(\x03\x12\x15\n\rinput_item_id\x18\x02 \x01(\x03\x12\x16\n\x0eis_passthrough\x18\x03 \x01(\x08\"(\n\x0e\x46lumeDoFnEndDo\x12\x16\n\x0einput_stage_id\x18\x01 \x01(\x03\"2\n\rFlumeDoFnEmit\x12\x10\n\x08stage_id\x18\x01 \x01(\x03\x12\x0f\n\x07item_id\x18\x02 \x01(\x03\"<\n\x0b\x46lumeDepend\x12\x16\n\x0einput_stage_id\x18\x01 \x01(\x03\x12\x15\n\rinput_item_id\x18\x02 \x01(\x03\"%\n\x14\x46lumeCompareFnCreate\x12\r\n\x05label\x18\x01 \x01(\t\"\'\n\x16\x46lumeCompareFnComplete\x12\r\n\x05label\x18\x01 \x01(\t\"~\n\x1a\x46lumeCompareFnStartCompare\x12\x17\n\x0finput1_stage_id\x18\x01 \x01(\x03\x12\x16\n\x0einput1_item_id\x18\x02 \x01(\x03\x12\x17\n\x0finput2_stage_id\x18\x03 \x01(\x03\x12\x16\n\x0einput2_item_id\x18\x04 \x01(\x03\"2\n\x18\x46lumeCompareFnEndCompare\x12\x16\n\x0einput_stage_id\x18\x01 \x01(\x03\x62\x06proto3' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight.proto.widgets.pipeline.flume.flume_pb2', globals()) +_builder.BuildTopDescriptorsAndMessages( + DESCRIPTOR, 'sight.proto.widgets.pipeline.flume.flume_pb2', globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _FLUMEDOFNCREATE._serialized_start=79 - _FLUMEDOFNCREATE._serialized_end=111 - _FLUMEDOFNCOMPLETE._serialized_start=113 - _FLUMEDOFNCOMPLETE._serialized_end=147 - _FLUMEDOFNSTARTDO._serialized_start=149 - _FLUMEDOFNSTARTDO._serialized_end=238 - _FLUMEDOFNENDDO._serialized_start=240 - _FLUMEDOFNENDDO._serialized_end=280 - _FLUMEDOFNEMIT._serialized_start=282 - _FLUMEDOFNEMIT._serialized_end=332 - _FLUMEDEPEND._serialized_start=334 - _FLUMEDEPEND._serialized_end=394 - _FLUMECOMPAREFNCREATE._serialized_start=396 - _FLUMECOMPAREFNCREATE._serialized_end=433 - _FLUMECOMPAREFNCOMPLETE._serialized_start=435 - _FLUMECOMPAREFNCOMPLETE._serialized_end=474 - _FLUMECOMPAREFNSTARTCOMPARE._serialized_start=476 - _FLUMECOMPAREFNSTARTCOMPARE._serialized_end=602 - _FLUMECOMPAREFNENDCOMPARE._serialized_start=604 - _FLUMECOMPAREFNENDCOMPARE._serialized_end=654 + _FLUMEDOFNCREATE._serialized_start = 79 + _FLUMEDOFNCREATE._serialized_end = 111 + _FLUMEDOFNCOMPLETE._serialized_start = 113 + _FLUMEDOFNCOMPLETE._serialized_end = 147 + _FLUMEDOFNSTARTDO._serialized_start = 149 + _FLUMEDOFNSTARTDO._serialized_end = 238 + _FLUMEDOFNENDDO._serialized_start = 240 + _FLUMEDOFNENDDO._serialized_end = 280 + _FLUMEDOFNEMIT._serialized_start = 282 + _FLUMEDOFNEMIT._serialized_end = 332 + _FLUMEDEPEND._serialized_start = 334 + _FLUMEDEPEND._serialized_end = 394 + _FLUMECOMPAREFNCREATE._serialized_start = 396 + _FLUMECOMPAREFNCREATE._serialized_end = 433 + _FLUMECOMPAREFNCOMPLETE._serialized_start = 435 + _FLUMECOMPAREFNCOMPLETE._serialized_end = 474 + _FLUMECOMPAREFNSTARTCOMPARE._serialized_start = 476 + _FLUMECOMPAREFNSTARTCOMPARE._serialized_end = 602 + _FLUMECOMPAREFNENDCOMPARE._serialized_start = 604 + _FLUMECOMPAREFNENDCOMPARE._serialized_end = 654 # @@protoc_insertion_point(module_scope) diff --git a/py/sight/publish_log.py b/py/sight/publish_log.py index a6c30ab..4c07712 100644 --- a/py/sight/publish_log.py +++ b/py/sight/publish_log.py @@ -11,27 +11,29 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Binary that takes a local Sight log file and publishes it remotely.""" from absl import app from absl import flags -from google3.analysis.dremel.core.capacitor.public.python import pywrap_record_reader # pylint:disable=line-too-long +from google3.analysis.dremel.core.capacitor.public.python import ( + pywrap_record_reader # pylint:disable=line-too-long +) from google3.googlex.cortex.sight.proto import sight_pb2 from google3.googlex.cortex.sight.py.sight import Sight from google3.pyglib.contrib.gpathlib import gpath_flag -_LOG_FILE = gpath_flag.DEFINE_path( - 'log_file', None, 'File that contains the Sight log.', required=True -) +_LOG_FILE = gpath_flag.DEFINE_path('log_file', + None, + 'File that contains the Sight log.', + required=True) -_LOG_OWNER = flags.DEFINE_string( - 'log_owner', None, 'The owner of the published log.', required=True -) +_LOG_OWNER = flags.DEFINE_string('log_owner', + None, + 'The owner of the published log.', + required=True) -_LOG_LABEL = flags.DEFINE_string( - 'log_label', 'Sight Log', 'The descriptive label of this log.' -) +_LOG_LABEL = flags.DEFINE_string('log_label', 'Sight Log', + 'The descriptive label of this log.') FLAGS = flags.FLAGS @@ -45,11 +47,9 @@ def main(argv): label=_LOG_LABEL.value, log_owner=_LOG_OWNER.value, capacitor_output=True, - ) - ) as sight: + )) as sight: record_reader = pywrap_record_reader.RecordReader.CreateFromPath( - _LOG_FILE.value, '*', 60 - ) + _LOG_FILE.value, '*', 60) for obj in record_reader.IterRecords(): sight.log_object(obj) diff --git a/py/sight/service_utils.py b/py/sight/service_utils.py index adceffe..1b5a0b6 100644 --- a/py/sight/service_utils.py +++ b/py/sight/service_utils.py @@ -21,12 +21,13 @@ import time from typing import Any, Callable import uuid + from absl import flags -from helpers.logs.logs_handler import logger as logging from dotenv import load_dotenv import google.auth.transport.requests import google.oauth2.id_token import grpc +from helpers.logs.logs_handler import logger as logging import requests from sight_service.proto import service_pb2 from sight_service.proto import service_pb2_grpc @@ -65,10 +66,7 @@ _DEPLOYMENT_MODE = flags.DEFINE_enum( 'deployment_mode', None, - [ - 'vm', 'distributed', 'local', 'dsub_local', 'docker_local', - 'worker_mode' - ], + ['vm', 'distributed', 'local', 'dsub_local', 'docker_local', 'worker_mode'], ('The procedure to use when training a model to drive applications that ' 'use the Decision API.'), ) @@ -80,281 +78,281 @@ def get_service_id() -> str: - """find service id for sight server. + """find service id for sight server. Returns: service id """ - global _SERVICE_ID - global _SIGHT_SERVICE_KNOWN - - # print('os.environ : ', os.environ) - if 'SIGHT_SERVICE_ID' in os.environ: - # print('used env flow from get_service_id.....') - _SERVICE_ID = os.environ['SIGHT_SERVICE_ID'] - elif _SIGHT_SERVICE_KNOWN: - return _SERVICE_ID - elif _SERVICE_NAME.value: - _SERVICE_ID = _SERVICE_NAME.value - elif _SERVICE_DOCKER_FILE.value or _SERVICE_DOCKER_IMG.value: - _SERVICE_ID = str(uuid.uuid4()) - else: - _SERVICE_ID = 'default' - _SIGHT_SERVICE_KNOWN = True - - # logging.info("service id : %s%s", _SERVICE_PREFIX, _SERVICE_ID) + global _SERVICE_ID + global _SIGHT_SERVICE_KNOWN + + # print('os.environ : ', os.environ) + if 'SIGHT_SERVICE_ID' in os.environ: + # print('used env flow from get_service_id.....') + _SERVICE_ID = os.environ['SIGHT_SERVICE_ID'] + elif _SIGHT_SERVICE_KNOWN: return _SERVICE_ID + elif _SERVICE_NAME.value: + _SERVICE_ID = _SERVICE_NAME.value + elif _SERVICE_DOCKER_FILE.value or _SERVICE_DOCKER_IMG.value: + _SERVICE_ID = str(uuid.uuid4()) + else: + _SERVICE_ID = 'default' + _SIGHT_SERVICE_KNOWN = True + + # logging.info("service id : %s%s", _SERVICE_PREFIX, _SERVICE_ID) + return _SERVICE_ID def get_port_number() -> str: - if 'PORT' in os.environ: - return os.environ['PORT'] - # need to use secure channel for cloud run server - elif (FLAGS.deployment_mode in ['local', 'vm']): - return '8080' - else: - return FLAGS.port + if 'PORT' in os.environ: + return os.environ['PORT'] + # need to use secure channel for cloud run server + elif (FLAGS.deployment_mode in ['local', 'vm']): + return '8080' + else: + return FLAGS.port def _service_addr() -> str: - # return f'{_SERVICE_PREFIX}{get_service_id()}-dq7fdwqgbq-uc.a.run.app' - global _UNIQUE_STRING - # if('IP_ADDR' in os.environ): - # return os.environ['IP_ADDR'] - # elif (_UNIQUE_STRING): - if (_UNIQUE_STRING): - # print("unique string found : ", _UNIQUE_STRING) - # print("get_service_id() : ", get_service_id()) - return f'{_SERVICE_PREFIX}{get_service_id()}-{_UNIQUE_STRING}-uc.a.run.app' - else: - # print('fetching unique string.....') - try: - service_url = subprocess.getoutput( - 'gcloud run services describe' - f" {_SERVICE_PREFIX}{get_service_id()} --region us-central1 --format='value(status.url)'" - ) - print("service url : ", service_url) - _UNIQUE_STRING = re.search(r'https://.*-(\w+)-uc\.a\.run\.app', - service_url).group(1) - print("_UNIQUE_STRING : ", _UNIQUE_STRING) - except Exception as e: - logging.exception("service not found : %s", e) - # print("first _UNIQUE_STRING : ", _UNIQUE_STRING) - return f'{_SERVICE_PREFIX}{get_service_id()}-{_UNIQUE_STRING}-uc.a.run.app' + # return f'{_SERVICE_PREFIX}{get_service_id()}-dq7fdwqgbq-uc.a.run.app' + global _UNIQUE_STRING + # if('IP_ADDR' in os.environ): + # return os.environ['IP_ADDR'] + # elif (_UNIQUE_STRING): + if (_UNIQUE_STRING): + # print("unique string found : ", _UNIQUE_STRING) + # print("get_service_id() : ", get_service_id()) + return f'{_SERVICE_PREFIX}{get_service_id()}-{_UNIQUE_STRING}-uc.a.run.app' + else: + # print('fetching unique string.....') + try: + service_url = subprocess.getoutput( + 'gcloud run services describe' + f" {_SERVICE_PREFIX}{get_service_id()} --region us-central1 --format='value(status.url)'" + ) + print("service url : ", service_url) + _UNIQUE_STRING = re.search(r'https://.*-(\w+)-uc\.a\.run\.app', + service_url).group(1) + print("_UNIQUE_STRING : ", _UNIQUE_STRING) + except Exception as e: + logging.exception("service not found : %s", e) + # print("first _UNIQUE_STRING : ", _UNIQUE_STRING) + return f'{_SERVICE_PREFIX}{get_service_id()}-{_UNIQUE_STRING}-uc.a.run.app' def _find_or_deploy_server() -> str: - """deploy sight server with given docker image.""" + """deploy sight server with given docker image.""" - global _SIGHT_SERVICE_KNOWN - if (os.environ.get('SIGHT_SERVICE_ID')): - # print('service found from environment variable : ', get_service_id()) - # logging.info('service found from environment variable') - return get_service_id() + global _SIGHT_SERVICE_KNOWN + if (os.environ.get('SIGHT_SERVICE_ID')): + # print('service found from environment variable : ', get_service_id()) + # logging.info('service found from environment variable') + return get_service_id() - if _SIGHT_SERVICE_KNOWN or (not _SERVICE_DOCKER_FILE.value - and not _SERVICE_DOCKER_IMG.value): - try: - # get list of services deployed on cloud-run which - # includes given service-name - response = subprocess.getoutput( - 'gcloud run services list' - f" --filter='SERVICE:{_SERVICE_PREFIX}{get_service_id()}'") - - if response == 'Listed 0 items.': - # given service_name doesn't exist on cloud-run - if get_service_id() != 'default': - raise ValueError( - f"{_SERVICE_PREFIX}{get_service_id()} doesn't exist, try with" - ' different name...') - else: - logging.info('No such service exist : %s', - _SERVICE_PREFIX + get_service_id()) - logging.info('creating new service : %s%s', - _SERVICE_PREFIX, get_service_id()) - - else: - # given service_name exist on cloud-run - return get_service_id() - except ValueError as e: - logging.info('value Error : %s', e) - sys.exit(0) - except Exception: - # error while calling "gcloud run service list" command - try: - # sample Test call is possible if service exist - sight_service = obtain_secure_channel() - metadata = [] - id_token = generate_id_token() - metadata.append(('authorization', 'Bearer ' + id_token)) - # print("try in calling dummt test service call") - response = sight_service.Test(service_pb2.TestRequest(), - 300, - metadata=metadata) - return get_service_id() - except Exception as error: - logging.info( - "Provided service - %s doesn't exist or Not enough permissions: %s", - get_service_id(), - error, - ) - sys.exit(0) - - # deploy new service - print('_SERVICE_ID=', get_service_id()) - docker_file_path = _SERVICE_DOCKER_FILE.value - docker_img = _SERVICE_DOCKER_IMG.value - - if get_service_id() == 'default' and not _SERVICE_DOCKER_FILE.value: - docker_file_path = 'service/Dockerfile' - # elif(not _SERVICE_DOCKER_FILE.value): - # raise ValueError( - # 'flag --service_docker_file required with any new service-name' - # ) - - if (docker_file_path): - logging.info('building img from scratch.....................') - # Step 1: Build docker image - build_out = subprocess.run( - [ - 'docker', - 'build', - '-t', - f'gcr.io/{os.environ["PROJECT_ID"]}/{_SERVICE_PREFIX}' + - get_service_id(), - '-f', - docker_file_path, - '.', - ], - check=True, - capture_output=True, + if _SIGHT_SERVICE_KNOWN or (not _SERVICE_DOCKER_FILE.value and + not _SERVICE_DOCKER_IMG.value): + try: + # get list of services deployed on cloud-run which + # includes given service-name + response = subprocess.getoutput( + 'gcloud run services list' + f" --filter='SERVICE:{_SERVICE_PREFIX}{get_service_id()}'") + + if response == 'Listed 0 items.': + # given service_name doesn't exist on cloud-run + if get_service_id() != 'default': + raise ValueError( + f"{_SERVICE_PREFIX}{get_service_id()} doesn't exist, try with" + ' different name...') + else: + logging.info('No such service exist : %s', + _SERVICE_PREFIX + get_service_id()) + logging.info('creating new service : %s%s', _SERVICE_PREFIX, + get_service_id()) + + else: + # given service_name exist on cloud-run + return get_service_id() + except ValueError as e: + logging.info('value Error : %s', e) + sys.exit(0) + except Exception: + # error while calling "gcloud run service list" command + try: + # sample Test call is possible if service exist + sight_service = obtain_secure_channel() + metadata = [] + id_token = generate_id_token() + metadata.append(('authorization', 'Bearer ' + id_token)) + # print("try in calling dummt test service call") + response = sight_service.Test(service_pb2.TestRequest(), + 300, + metadata=metadata) + return get_service_id() + except Exception as error: + logging.info( + "Provided service - %s doesn't exist or Not enough permissions: %s", + get_service_id(), + error, ) - # logging.info('build_out=%s', build_out) - - # Step 2: Retrieve an OAuth2 access token - access_token_cmd = ['gcloud', 'auth', 'print-access-token'] - access_token_result = subprocess.run(access_token_cmd, - capture_output=True, - text=True, - check=True) - access_token = access_token_result.stdout.strip() - - # Step 3: Authenticate with gcr.io using the access token - login_cmd = [ + sys.exit(0) + + # deploy new service + print('_SERVICE_ID=', get_service_id()) + docker_file_path = _SERVICE_DOCKER_FILE.value + docker_img = _SERVICE_DOCKER_IMG.value + + if get_service_id() == 'default' and not _SERVICE_DOCKER_FILE.value: + docker_file_path = 'service/Dockerfile' + # elif(not _SERVICE_DOCKER_FILE.value): + # raise ValueError( + # 'flag --service_docker_file required with any new service-name' + # ) + + if (docker_file_path): + logging.info('building img from scratch.....................') + # Step 1: Build docker image + build_out = subprocess.run( + [ 'docker', - 'login', - '-u', - 'oauth2accesstoken', - '-p', - access_token, - 'https://gcr.io', - ] - subprocess.run(login_cmd, check=True) - - # Step 4: push created image to gcr.io - push_out = subprocess.run( - [ - 'docker', 'push', - f'gcr.io/{os.environ["PROJECT_ID"]}/{_SERVICE_PREFIX}' + - get_service_id() - ], - check=True, - capture_output=True, - ) - # logging.info('push_out=%s', push_out) - - # Step 5: fetch image id - image_id = '' - for line in push_out.stdout.decode('utf-8').splitlines(): - m = re.search(r'sha256:([a-z0-9]+) size: [0-9]+$', line) - if m: - image_id = m.group(1) - if not image_id: - raise ValueError( - f'Failed to find image id in output of docker push:\n{push_out.stdout}' - ) - logging.info('image_id=%s', image_id) - - if (docker_file_path): - logging.info('using newly build img to deploy service') - img_name = f'gcr.io/{os.environ["PROJECT_ID"]}/{_SERVICE_PREFIX}{get_service_id()}@sha256:{image_id}' - elif (docker_img): - logging.info('using docker img to deploy service') - img_name = _SERVICE_DOCKER_IMG.value - else: - raise ValueError( - 'img_name have to specify before deplying cloud run service') + 'build', + '-t', + f'gcr.io/{os.environ["PROJECT_ID"]}/{_SERVICE_PREFIX}' + + get_service_id(), + '-f', + docker_file_path, + '.', + ], + check=True, + capture_output=True, + ) + # logging.info('build_out=%s', build_out) + + # Step 2: Retrieve an OAuth2 access token + access_token_cmd = ['gcloud', 'auth', 'print-access-token'] + access_token_result = subprocess.run(access_token_cmd, + capture_output=True, + text=True, + check=True) + access_token = access_token_result.stdout.strip() + + # Step 3: Authenticate with gcr.io using the access token + login_cmd = [ + 'docker', + 'login', + '-u', + 'oauth2accesstoken', + '-p', + access_token, + 'https://gcr.io', + ] + subprocess.run(login_cmd, check=True) - # Step 6: deploy cloud run service from deployed image - deploy_out = subprocess.run( + # Step 4: push created image to gcr.io + push_out = subprocess.run( [ - 'gcloud', - 'run', - 'deploy', - _SERVICE_PREFIX + get_service_id(), - f'--image={img_name}', - '--allow-unauthenticated', - f'--service-account={flags.FLAGS.service_account}@{os.environ["PROJECT_ID"]}.iam.gserviceaccount.com', - '--concurrency=default', - '--cpu=4', - '--memory=16Gi', - '--min-instances=1', - '--max-instances=1', - '--no-cpu-throttling', - '--region=us-central1', - f'--project={os.environ["PROJECT_ID"]}', + 'docker', 'push', + f'gcr.io/{os.environ["PROJECT_ID"]}/{_SERVICE_PREFIX}' + + get_service_id() ], check=True, capture_output=True, ) - print('deploy_out : ', deploy_out.stderr) - - logging.info('_SERVICE_ID=%s', _SERVICE_ID) - if (docker_file_path): - logging.info('deleting newly built img') - subprocess.run( - [ - 'gcloud', - 'container', - 'images', - 'delete', - f'gcr.io/{os.environ["PROJECT_ID"]}/{_SERVICE_PREFIX}{get_service_id()}@sha256:{image_id}', - '--quiet', - '--force-delete-tags', - ], - check=True, - ) - # logging.info('%s', ' '.join(['gcloud', 'run', 'services', 'delete', - # get_service_id(), - # '--region=us-central1', '--quiet'])) - # subprocess.run(['gcloud', 'run', 'services', 'delete', - # _SIGHT_SERVICE_ADDR, - # '--region=us-central1', '--quiet'], - # check=True) - - # _SIGHT_SERVICE_ADDR=f'{service_id}-dq7fdwqgbq-uc.a.run.app' - _SIGHT_SERVICE_KNOWN = True - - print( - 'Log:' - f' https://pantheon.corp.google.com/run/detail/us-central1/{_SERVICE_PREFIX}{get_service_id()}/logs?project={os.environ["PROJECT_ID"]}' + # logging.info('push_out=%s', push_out) + + # Step 5: fetch image id + image_id = '' + for line in push_out.stdout.decode('utf-8').splitlines(): + m = re.search(r'sha256:([a-z0-9]+) size: [0-9]+$', line) + if m: + image_id = m.group(1) + if not image_id: + raise ValueError( + f'Failed to find image id in output of docker push:\n{push_out.stdout}' + ) + logging.info('image_id=%s', image_id) + + if (docker_file_path): + logging.info('using newly build img to deploy service') + img_name = f'gcr.io/{os.environ["PROJECT_ID"]}/{_SERVICE_PREFIX}{get_service_id()}@sha256:{image_id}' + elif (docker_img): + logging.info('using docker img to deploy service') + img_name = _SERVICE_DOCKER_IMG.value + else: + raise ValueError( + 'img_name have to specify before deplying cloud run service') + + # Step 6: deploy cloud run service from deployed image + deploy_out = subprocess.run( + [ + 'gcloud', + 'run', + 'deploy', + _SERVICE_PREFIX + get_service_id(), + f'--image={img_name}', + '--allow-unauthenticated', + f'--service-account={flags.FLAGS.service_account}@{os.environ["PROJECT_ID"]}.iam.gserviceaccount.com', + '--concurrency=default', + '--cpu=4', + '--memory=16Gi', + '--min-instances=1', + '--max-instances=1', + '--no-cpu-throttling', + '--region=us-central1', + f'--project={os.environ["PROJECT_ID"]}', + ], + check=True, + capture_output=True, + ) + print('deploy_out : ', deploy_out.stderr) + + logging.info('_SERVICE_ID=%s', _SERVICE_ID) + if (docker_file_path): + logging.info('deleting newly built img') + subprocess.run( + [ + 'gcloud', + 'container', + 'images', + 'delete', + f'gcr.io/{os.environ["PROJECT_ID"]}/{_SERVICE_PREFIX}{get_service_id()}@sha256:{image_id}', + '--quiet', + '--force-delete-tags', + ], + check=True, ) + # logging.info('%s', ' '.join(['gcloud', 'run', 'services', 'delete', + # get_service_id(), + # '--region=us-central1', '--quiet'])) + # subprocess.run(['gcloud', 'run', 'services', 'delete', + # _SIGHT_SERVICE_ADDR, + # '--region=us-central1', '--quiet'], + # check=True) - return get_service_id() + # _SIGHT_SERVICE_ADDR=f'{service_id}-dq7fdwqgbq-uc.a.run.app' + _SIGHT_SERVICE_KNOWN = True + + print( + 'Log:' + f' https://pantheon.corp.google.com/run/detail/us-central1/{_SERVICE_PREFIX}{get_service_id()}/logs?project={os.environ["PROJECT_ID"]}' + ) + + return get_service_id() def finalize_server() -> None: - # if _SERVICE_DOCKER_FILE.value: - # subprocess.run(['gcloud', 'run', 'services', 'delete', - # get_service_id(), - # '--region=us-central1', '--quiet'], - # check=True) - pass + # if _SERVICE_DOCKER_FILE.value: + # subprocess.run(['gcloud', 'run', 'services', 'delete', + # get_service_id(), + # '--region=us-central1', '--quiet'], + # check=True) + pass def get_id_token_of_service_account(user_access_token, service_account, url) -> str: - """fetch id_token for given service_account using user credentials. + """fetch id_token for given service_account using user credentials. Args: user_access_token: token to verify identity of user generating credentials @@ -365,151 +363,151 @@ def get_id_token_of_service_account(user_access_token, service_account, Returns: id_token: id_token of service_account """ - headers = { - 'Authorization': 'Bearer ' + user_access_token, - 'Content-Type': 'application/json; charset=utf-8', - } - data = b'{"audience": "%s"}' % url.encode('utf-8') - - try: - response = requests.post( - 'https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/' - + service_account + ':generateIdToken', - headers=headers, - data=data, - ) - # print('response=%s' % response.json()) - return response.json()['token'] - except Exception as e: - logging.info('API CALL ERROR: %s', e) + headers = { + 'Authorization': 'Bearer ' + user_access_token, + 'Content-Type': 'application/json; charset=utf-8', + } + data = b'{"audience": "%s"}' % url.encode('utf-8') + + try: + response = requests.post( + 'https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/' + + service_account + ':generateIdToken', + headers=headers, + data=data, + ) + # print('response=%s' % response.json()) + return response.json()['token'] + except Exception as e: + logging.info('API CALL ERROR: %s', e) def generate_id_token(): - """fetch id_token for given user. + """fetch id_token for given user. Returns: id_token: id_token of user_account """ - # fetch id-token of service account from which we spawned D-SUB - # worker in cloud - - if 'worker_mode' in flags.FLAGS and flags.FLAGS.worker_mode == 'dsub_cloud_worker': - # print( - # 'using credentials of service account for : https://' + _service_addr() - # ) - auth_req = google.auth.transport.requests.Request() - service_account_id_token = google.oauth2.id_token.fetch_id_token( - auth_req, 'https://' + _service_addr()) - id_token = service_account_id_token - # fetch id-token locally + # fetch id-token of service account from which we spawned D-SUB + # worker in cloud + + if 'worker_mode' in flags.FLAGS and flags.FLAGS.worker_mode == 'dsub_cloud_worker': + # print( + # 'using credentials of service account for : https://' + _service_addr() + # ) + auth_req = google.auth.transport.requests.Request() + service_account_id_token = google.oauth2.id_token.fetch_id_token( + auth_req, 'https://' + _service_addr()) + id_token = service_account_id_token + # fetch id-token locally + else: + creds, project = google.auth.default() + auth_req = google.auth.transport.requests.Request() + creds.refresh(auth_req) + # impersonating service-account if passed in parameter + + if 'service_account' in flags.FLAGS and flags.FLAGS.service_account != None: + # print("using service account's credentils..... :") + user_access_token = creds.token + service_account = f'{flags.FLAGS.service_account}@{os.environ["PROJECT_ID"]}.iam.gserviceaccount.com' + # print('user_access_token : ', user_access_token) + # print('service_account : ',service_account) + url = f'https://{_service_addr()}' + service_account_id_token = get_id_token_of_service_account( + user_access_token, service_account, url) + id_token = service_account_id_token + # using user credentials else: - creds, project = google.auth.default() - auth_req = google.auth.transport.requests.Request() - creds.refresh(auth_req) - # impersonating service-account if passed in parameter - - if 'service_account' in flags.FLAGS and flags.FLAGS.service_account != None: - # print("using service account's credentils..... :") - user_access_token = creds.token - service_account = f'{flags.FLAGS.service_account}@{os.environ["PROJECT_ID"]}.iam.gserviceaccount.com' - # print('user_access_token : ', user_access_token) - # print('service_account : ',service_account) - url = f'https://{_service_addr()}' - service_account_id_token = get_id_token_of_service_account( - user_access_token, service_account, url) - id_token = service_account_id_token - # using user credentials - else: - # print("using user's credentils..... creds=%s" % creds) - user_id_token = creds.id_token - id_token = user_id_token + # print("using user's credentils..... creds=%s" % creds) + user_id_token = creds.id_token + id_token = user_id_token - # print('id_token : ', id_token) - return id_token + # print('id_token : ', id_token) + return id_token def obtain_secure_channel(options=None): - """create secure channel to communicate with server. + """create secure channel to communicate with server. Returns: service_handle: to communicate with server """ - # hosted server - if 'SIGHT_SERVICE_PATH' in os.environ: - cert_file = (f'{os.environ["SIGHT_SERVICE_PATH"]}/sight_service.cert') - else: - cert_file = _CERT_FILE_PATH - with open(cert_file, 'rb') as f: - creds = grpc.ssl_channel_credentials(f.read()) - - # if('IP_ADDR' in os.environ): - # url = os.environ['IP_ADDR'] - # else: - url = _service_addr() - target = '{}:{}'.format(url, get_port_number()) - # print("service_url here : ", target) - - channel = grpc.secure_channel( - target, - creds, - options, - ) - return channel + # hosted server + if 'SIGHT_SERVICE_PATH' in os.environ: + cert_file = (f'{os.environ["SIGHT_SERVICE_PATH"]}/sight_service.cert') + else: + cert_file = _CERT_FILE_PATH + with open(cert_file, 'rb') as f: + creds = grpc.ssl_channel_credentials(f.read()) + + # if('IP_ADDR' in os.environ): + # url = os.environ['IP_ADDR'] + # else: + url = _service_addr() + target = '{}:{}'.format(url, get_port_number()) + # print("service_url here : ", target) + + channel = grpc.secure_channel( + target, + creds, + options, + ) + return channel def obtain_insecure_channel(options): - """create insecure channel to communicate with server. + """create insecure channel to communicate with server. Returns: service_handle: to communicate with server """ - if 'IP_ADDR' in os.environ: - host = os.environ["IP_ADDR"] - else: - host = 'localhost' - target = '{}:{}'.format(host, get_port_number()) - # print("service_url here : ", targpending action ids :et) + if 'IP_ADDR' in os.environ: + host = os.environ["IP_ADDR"] + else: + host = 'localhost' + target = '{}:{}'.format(host, get_port_number()) + # print("service_url here : ", targpending action ids :et) - channel = grpc.insecure_channel( - target, - options, - ) - return channel + channel = grpc.insecure_channel( + target, + options, + ) + return channel def generate_metadata(): - """Generate metadata to call service with authentication.""" - - channel_opts = [ - ('grpc.max_send_message_length', 512 * 1024 * 1024), - ('grpc.max_receive_message_length', 512 * 1024 * 1024), - ] - - if 'IP_ADDR' in os.environ or ('deployment_mode' in FLAGS and - FLAGS.deployment_mode in ['local', 'vm']): - - channel = obtain_insecure_channel(channel_opts) - sight_service = service_pb2_grpc.SightServiceStub(channel) - metadata = [] - return sight_service, metadata - # elif 'deployment_mode' == "worker_mode": - # return sight_service, metadata - else: - #for worker spawned using vm mode, they must be connect via insecure channel - # if(): - - # for client code, need to find or deploy cloud run service, workers will directly get via env - if 'deployment_mode' in FLAGS and FLAGS.deployment_mode == "distributed": - _find_or_deploy_server() - - secure_channel = obtain_secure_channel() - # print("secure_channel : ", secure_channel) - sight_service = service_pb2_grpc.SightServiceStub(secure_channel) - metadata = [] - id_token = generate_id_token() - # print('id_token : ', id_token) - metadata.append(('authorization', 'Bearer ' + id_token)) - return sight_service, metadata + """Generate metadata to call service with authentication.""" + + channel_opts = [ + ('grpc.max_send_message_length', 512 * 1024 * 1024), + ('grpc.max_receive_message_length', 512 * 1024 * 1024), + ] + + if 'IP_ADDR' in os.environ or ('deployment_mode' in FLAGS and + FLAGS.deployment_mode in ['local', 'vm']): + + channel = obtain_insecure_channel(channel_opts) + sight_service = service_pb2_grpc.SightServiceStub(channel) + metadata = [] + return sight_service, metadata + # elif 'deployment_mode' == "worker_mode": + # return sight_service, metadata + else: + #for worker spawned using vm mode, they must be connect via insecure channel + # if(): + + # for client code, need to find or deploy cloud run service, workers will directly get via env + if 'deployment_mode' in FLAGS and FLAGS.deployment_mode == "distributed": + _find_or_deploy_server() + + secure_channel = obtain_secure_channel() + # print("secure_channel : ", secure_channel) + sight_service = service_pb2_grpc.SightServiceStub(secure_channel) + metadata = [] + id_token = generate_id_token() + # print('id_token : ', id_token) + metadata.append(('authorization', 'Bearer ' + id_token)) + return sight_service, metadata # def calculate_response_time(start_time): @@ -522,7 +520,7 @@ def generate_metadata(): def call(invoke_func: Callable[[Any, Any], Any]) -> Any: - """Calls invoke_func as many times as needed for it to complete. + """Calls invoke_func as many times as needed for it to complete. After each failed call (RPCError raised), this function backs off for a random exponentially increasing time period and retries. @@ -533,23 +531,23 @@ def call(invoke_func: Callable[[Any, Any], Any]) -> Any: Returns: response: response received from server side after invoking the function """ - sight_service, metadata = generate_metadata() - num_retries = 0 - backoff_interval = 0.5 - # while True: - for i in range(1): - try: - response = invoke_func(sight_service, metadata) - return response - except grpc.RpcError as e: - logging.info('RPC ERROR: %s', e) - if e.code() == grpc.StatusCode.PERMISSION_DENIED: - print('NO ACCESS!!!!') - elif e.code() == grpc.StatusCode.UNIMPLEMENTED: - print('SIGHT SERVICE NOT FOUND!!!') - if num_retries == 12: - raise e - time.sleep(random.uniform(backoff_interval / 2, backoff_interval)) - logging.info('backed off for %s seconds...', backoff_interval) - backoff_interval *= 2 - num_retries += 1 + sight_service, metadata = generate_metadata() + num_retries = 0 + backoff_interval = 0.5 + # while True: + for i in range(1): + try: + response = invoke_func(sight_service, metadata) + return response + except grpc.RpcError as e: + logging.info('RPC ERROR: %s', e) + if e.code() == grpc.StatusCode.PERMISSION_DENIED: + print('NO ACCESS!!!!') + elif e.code() == grpc.StatusCode.UNIMPLEMENTED: + print('SIGHT SERVICE NOT FOUND!!!') + if num_retries == 12: + raise e + time.sleep(random.uniform(backoff_interval / 2, backoff_interval)) + logging.info('backed off for %s seconds...', backoff_interval) + backoff_interval *= 2 + num_retries += 1 diff --git a/py/sight/sight.py b/py/sight/sight.py index e9d48cd..e5192a7 100644 --- a/py/sight/sight.py +++ b/py/sight/sight.py @@ -15,23 +15,22 @@ from __future__ import annotations +import asyncio from collections import defaultdict +import contextvars +import dataclasses import inspect import io import os -import time import threading +import time from typing import Any, Optional, Sequence -from helpers.logs.logs_handler import logger as logging from absl import flags -import asyncio -import contextvars -import dataclasses from dotenv import load_dotenv import fastavro from fastavro.schema import load_schema -from sight_service.proto import service_pb2 +from helpers.logs.logs_handler import logger as logging from sight import service_utils as service from sight.exception import exception from sight.gcs_utils import create_external_bq_table @@ -39,10 +38,13 @@ from sight.location import Location from sight.proto import sight_pb2 from sight.service_utils import finalize_server -from sight.utility import MessageToDict, poll_network_batch_outcome +from sight.utility import MessageToDict +from sight.utility import poll_network_batch_outcome from sight.widgets.decision import decision -from sight.widgets.simulation.simulation_widget_state import SimulationWidgetState -from sight.widgets.simulation.simulation_widget_state import SimulationWidgetState +from sight.widgets.simulation.simulation_widget_state import ( + SimulationWidgetState +) +from sight_service.proto import service_pb2 load_dotenv() _PARENT_ID = flags.DEFINE_string('parent_id', None, @@ -54,42 +56,42 @@ def generate_default_sight_params(): - """Returns a sight object with default parameters. + """Returns a sight object with default parameters. If user has provided values for some of them while initializing, it will be used, otherwise this value are passed. """ - default_prams = sight_pb2.Params( - label='default_sight', - log_owner='bronovetsky@google.com', - local=True, - text_output=False, - capacitor_output=False, - avro_output=True, - log_dir_path='/tmp/', - bucket_name='sight-bucket', - gcp_path='sight-logs', - file_format='.avro', - dataset_name='sight_logs', - external_file_format='AVRO', - external_file_uri='gs://', - ) - return default_prams + default_prams = sight_pb2.Params( + label='default_sight', + log_owner='bronovetsky@google.com', + local=True, + text_output=False, + capacitor_output=False, + avro_output=True, + log_dir_path='/tmp/', + bucket_name='sight-bucket', + gcp_path='sight-logs', + file_format='.avro', + dataset_name='sight_logs', + external_file_format='AVRO', + external_file_uri='gs://', + ) + return default_prams @dataclasses.dataclass class SightLocationState: - location: Location - line_prefix: str - line_suffix: str - open_block_start_locations: list[Any] - num_direct_contents: Location - num_transitive_contents: Location - active_block_labels: list[Any] + location: Location + line_prefix: str + line_suffix: str + open_block_start_locations: list[Any] + num_direct_contents: Location + num_transitive_contents: Location + active_block_labels: list[Any] class Sight(object): - """Object that manages writing a Sight log in some structured format. + """Object that manages writing a Sight log in some structured format. Provides an interface for higher-level logging abstractions to be built on top of this base functionality. @@ -147,238 +149,229 @@ class Sight(object): file_name: """ - # The common prefix of source code files that should be removed from emitted - # log when documenting the logging code location. - CODE_FILES_PATH_PREFIX = 'runfiles/google3/' - - # The absolute path of the Sight protodb file. - # PROTODB_PATH = 'google3/googlex/cortex/sight/proto2/sight_proto2db.protodb' - - # The API Key for the BQ Sight service - # SIGHT_API_KEY = 'AKfycbz35qrsrKUmm2FITMsLW9vSbKoBxEYv4EggM_m1Q2H3' #cameltrain - # SIGHT_API_KEY = 'AKfycbw9eY9dk-JstxeAizfMfJZ8qwHm6BVmOZEgBUey-HPL' #catan-(now generalized) - SIGHT_API_KEY = 'AKfycbzU74yRL1Dc0Xu5--oJricaD-H50UgF3FKM_E8_CMP7uNesQEk-k3cm57R3vTsjbWCcxA' - - def __init__( - self, - params: sight_pb2.Params, - configuration: Optional[Sequence[sight_pb2.Object]] = None, - ): - # generating default params to run sight - default_params = generate_default_sight_params() - # replacing fields provided user - default_params.MergeFrom(params) - default_params.label = default_params.label.replace(' ', '_') - self.params = default_params - # print("self.params : ", self.params) - - # Initialize each widget's state to make sure its state field is created. - self.widget_decision_state = defaultdict(dict) - self.widget_simulation_state = SimulationWidgetState() - self.widget_simulation_state = SimulationWidgetState() - # self._configure(configuration) - - # Configure the tracking state of the Sight object, which records the current location - # in the log of the current task, including its hierarchical nesting. - self.pause_logging_depth = 0 - - self.location = contextvars.ContextVar('location') - self.location.set(Location()) - if 'PARENT_LOG_ID' in os.environ: - self.location.get().exit() - worker_location = (os.environ['worker_location']).split(':') - for loc in worker_location: - self.location.get().enter(loc) - self.location.get().enter(0) - self.index = 1 - - self.line_prefix = contextvars.ContextVar('line_prefix') - self.line_prefix.set('') - self.line_suffix = contextvars.ContextVar('line_suffix') - self.line_suffix.set('') - self.open_block_start_locations = contextvars.ContextVar('line_suffix') - self.open_block_start_locations.set([]) - self.num_direct_contents = contextvars.ContextVar( - 'num_direct_contents') - self.num_direct_contents.set(Location()) - self.num_transitive_contents = contextvars.ContextVar( - 'num_transitive_contents') - self.num_transitive_contents.set(Location()) - self.active_block_labels = contextvars.ContextVar( - 'active_block_labels') - self.active_block_labels.set([]) - - self.attributes = {} - self.open = True - + # The common prefix of source code files that should be removed from emitted + # log when documenting the logging code location. + CODE_FILES_PATH_PREFIX = 'runfiles/google3/' + + # The absolute path of the Sight protodb file. + # PROTODB_PATH = 'google3/googlex/cortex/sight/proto2/sight_proto2db.protodb' + + # The API Key for the BQ Sight service + # SIGHT_API_KEY = 'AKfycbz35qrsrKUmm2FITMsLW9vSbKoBxEYv4EggM_m1Q2H3' #cameltrain + # SIGHT_API_KEY = 'AKfycbw9eY9dk-JstxeAizfMfJZ8qwHm6BVmOZEgBUey-HPL' #catan-(now generalized) + SIGHT_API_KEY = 'AKfycbzU74yRL1Dc0Xu5--oJricaD-H50UgF3FKM_E8_CMP7uNesQEk-k3cm57R3vTsjbWCcxA' + + def __init__( + self, + params: sight_pb2.Params, + configuration: Optional[Sequence[sight_pb2.Object]] = None, + ): + # generating default params to run sight + default_params = generate_default_sight_params() + # replacing fields provided user + default_params.MergeFrom(params) + default_params.label = default_params.label.replace(' ', '_') + self.params = default_params + # print("self.params : ", self.params) + + # Initialize each widget's state to make sure its state field is created. + self.widget_decision_state = defaultdict(dict) + self.widget_simulation_state = SimulationWidgetState() + self.widget_simulation_state = SimulationWidgetState() + # self._configure(configuration) + + # Configure the tracking state of the Sight object, which records the current location + # in the log of the current task, including its hierarchical nesting. + self.pause_logging_depth = 0 + + self.location = contextvars.ContextVar('location') + self.location.set(Location()) + if 'PARENT_LOG_ID' in os.environ: + self.location.get().exit() + worker_location = (os.environ['worker_location']).split(':') + for loc in worker_location: + self.location.get().enter(loc) + self.location.get().enter(0) + self.index = 1 + + self.line_prefix = contextvars.ContextVar('line_prefix') + self.line_prefix.set('') + self.line_suffix = contextvars.ContextVar('line_suffix') + self.line_suffix.set('') + self.open_block_start_locations = contextvars.ContextVar('line_suffix') + self.open_block_start_locations.set([]) + self.num_direct_contents = contextvars.ContextVar('num_direct_contents') + self.num_direct_contents.set(Location()) + self.num_transitive_contents = contextvars.ContextVar( + 'num_transitive_contents') + self.num_transitive_contents.set(Location()) + self.active_block_labels = contextvars.ContextVar('active_block_labels') + self.active_block_labels.set([]) + + self.attributes = {} + self.open = True + + self.id = 0 + self.set_attribute('log_uid', str(self.id)) + + if self.params.silent_logger: + return + + # The path prefix common to all the file(s) that hold the log. + self.path_prefix = '' + path_label = 'log' + if self.params.label: + path_label = self.params.label + + if self.params.in_memory: + self.path_prefix = '' + self.id = 0 + self.in_memory_log = [] + self.text_log = None + self.capacitor_log = None + self.avro_log = None + self.avro_schema = None + self.avro_record_counter = 0 + self.avro_file_counter = 0 + self.file_name = self.params.label + else: + if self.params.local: + self.path_prefix = '%s/%s' % (self.params.log_dir_path, path_label) self.id = 0 - self.set_attribute('log_uid', str(self.id)) - - if self.params.silent_logger: - return - - # The path prefix common to all the file(s) that hold the log. - self.path_prefix = '' - path_label = 'log' - if self.params.label: - path_label = self.params.label - - if self.params.in_memory: - self.path_prefix = '' - self.id = 0 - self.in_memory_log = [] - self.text_log = None - self.capacitor_log = None - self.avro_log = None - self.avro_schema = None - self.avro_record_counter = 0 - self.avro_file_counter = 0 - self.file_name = self.params.label - else: - if self.params.local: - self.path_prefix = '%s/%s' % (self.params.log_dir_path, - path_label) - self.id = 0 - - # Added : opening Avro file - - if self.params.avro_output: - # logging.info('#######SERVICE###############') - - try: - - if 'PARENT_LOG_ID' in os.environ: - logging.info('PARENT_LOG_ID found - worker process') - worker_location = os.environ[ - 'worker_location'].replace(':', '_') - self.path_prefix = (self.params.label + '_' + - os.environ['PARENT_LOG_ID'] + '_' + - 'worker' + '_' + worker_location + - '_' + 'log') - self.id = os.environ['PARENT_LOG_ID'] - print("log id is : ", self.id) - elif (FLAGS.sight_log_id): - logging.info('Using provided sight id') - self.id = FLAGS.sight_log_id - self.path_prefix = (self.params.label + '_' + self.id + - '_' + 'log' + '_run_mode') - else: - # logging.info('calling generate metadata') - req = service_pb2.CreateRequest( - # log_owner=self.params.log_owner, - # label=self.params.label, - # log_dir_path=self.params.log_dir_path, - # format='LF_AVRO', - ) - response = service.call( - lambda s, meta: s.Create(req, 300, metadata=meta)) - logging.info('##### response=%s #####', response) - self.id = response.id - # logging.info('PARENT_LOG_ID not found - parent process') - self.path_prefix = (self.params.label + '_' + - str(response.id) + '_' + 'log') - - except Exception as e: - logging.info('RPC ERROR: %s', e) - if not self.params.log_dir_path: - self.params.log_dir_path = '/tmp/' - self.path_prefix = '%s/%s' % (self.params.log_dir_path, - path_label) - logging.exception( - 'Logging only locally to %s due to: error %s ', - self.path_prefix, - e, - ) - self.params.local = True - - self.avro_log_file_path = ( - self.params.label + '_' + str(self.id) + '/' + - self.path_prefix - # 'client_' + str(self.id) + '/' + self.path_prefix - ) - self.file_name = self.avro_log_file_path.split('/')[-1] - # self.table_name = self.params.label + '_' + str(self.id) + '_' + 'log' - self.table_name = str(self.id) + '_' + 'log' - - if 'SIGHT_PATH' in os.environ: - self.avro_schema = load_schema( - f'{os.environ["SIGHT_PATH"]}/../avrofile-schema.avsc') - else: - # print('avro-schema path is : ', _SCHEMA_FILE_PATH) - self.avro_schema = load_schema(_SCHEMA_FILE_PATH) - self.avro_log = io.BytesIO() - self.avro_record_counter = 0 - self.avro_file_counter = 0 - - if self.params.text_output: - self.text_log_file_path = self.path_prefix + '.txt' - self.text_log = open(self.text_log_file_path, 'w') - else: - self.text_log = None - - # if build_data.Changelist(): - # self.change_list_number = int(build_data.Changelist()) - # self.set_attribute('change_list_number', str(self.change_list_number)) - # if build_data.CitcSnapshot(): - # self.citc_snapshot = int(build_data.CitcSnapshot()) - # self.set_attribute('citc_snapshot', str(self.citc_snapshot)) - - def get_location_state(self) -> SightLocationState: - return SightLocationState( - self.location.get().clone(), - self.line_prefix.get(), - self.line_suffix.get(), - self.open_block_start_locations.get().copy(), - self.num_direct_contents.get().clone(), - self.num_transitive_contents.get().clone(), - self.active_block_labels.get().copy(), - ) - def set_location_state(self, state: SightLocationState) -> None: - self.location.set(state.location) - self.line_prefix.set(state.line_prefix) - self.line_suffix.set(state.line_suffix) - self.open_block_start_locations.set(state.open_block_start_locations) - self.num_direct_contents.set(state.num_direct_contents) - self.num_transitive_contents.set(state.num_transitive_contents) - self.active_block_labels.set(state.active_block_labels) + # Added : opening Avro file + + if self.params.avro_output: + # logging.info('#######SERVICE###############') + + try: + + if 'PARENT_LOG_ID' in os.environ: + logging.info('PARENT_LOG_ID found - worker process') + worker_location = os.environ['worker_location'].replace(':', '_') + self.path_prefix = (self.params.label + '_' + + os.environ['PARENT_LOG_ID'] + '_' + 'worker' + + '_' + worker_location + '_' + 'log') + self.id = os.environ['PARENT_LOG_ID'] + print("log id is : ", self.id) + elif (FLAGS.sight_log_id): + logging.info('Using provided sight id') + self.id = FLAGS.sight_log_id + self.path_prefix = (self.params.label + '_' + self.id + '_' + + 'log' + '_run_mode') + else: + # logging.info('calling generate metadata') + req = service_pb2.CreateRequest( + # log_owner=self.params.log_owner, + # label=self.params.label, + # log_dir_path=self.params.log_dir_path, + # format='LF_AVRO', + ) + response = service.call( + lambda s, meta: s.Create(req, 300, metadata=meta)) + logging.info('##### response=%s #####', response) + self.id = response.id + # logging.info('PARENT_LOG_ID not found - parent process') + self.path_prefix = (self.params.label + '_' + str(response.id) + + '_' + 'log') + + except Exception as e: + logging.info('RPC ERROR: %s', e) + if not self.params.log_dir_path: + self.params.log_dir_path = '/tmp/' + self.path_prefix = '%s/%s' % (self.params.log_dir_path, path_label) + logging.exception( + 'Logging only locally to %s due to: error %s ', + self.path_prefix, + e, + ) + self.params.local = True + + self.avro_log_file_path = ( + self.params.label + '_' + str(self.id) + '/' + self.path_prefix + # 'client_' + str(self.id) + '/' + self.path_prefix + ) + self.file_name = self.avro_log_file_path.split('/')[-1] + # self.table_name = self.params.label + '_' + str(self.id) + '_' + 'log' + self.table_name = str(self.id) + '_' + 'log' - def create_task(self, func): - frame = inspect.currentframe().f_back + if 'SIGHT_PATH' in os.environ: + self.avro_schema = load_schema( + f'{os.environ["SIGHT_PATH"]}/../avrofile-schema.avsc') + else: + # print('avro-schema path is : ', _SCHEMA_FILE_PATH) + self.avro_schema = load_schema(_SCHEMA_FILE_PATH) + self.avro_log = io.BytesIO() + self.avro_record_counter = 0 + self.avro_file_counter = 0 + + if self.params.text_output: + self.text_log_file_path = self.path_prefix + '.txt' + self.text_log = open(self.text_log_file_path, 'w') + else: + self.text_log = None + + # if build_data.Changelist(): + # self.change_list_number = int(build_data.Changelist()) + # self.set_attribute('change_list_number', str(self.change_list_number)) + # if build_data.CitcSnapshot(): + # self.citc_snapshot = int(build_data.CitcSnapshot()) + # self.set_attribute('citc_snapshot', str(self.citc_snapshot)) + + def get_location_state(self) -> SightLocationState: + return SightLocationState( + self.location.get().clone(), + self.line_prefix.get(), + self.line_suffix.get(), + self.open_block_start_locations.get().copy(), + self.num_direct_contents.get().clone(), + self.num_transitive_contents.get().clone(), + self.active_block_labels.get().copy(), + ) - async def go(func, state): - # self.location.set(temp_location) - self.set_location_state(state) - # label = f'id={task_id}' - # print('%s/%s: outside self.location=%s/%s' % (task_id, asyncio.current_task().get_name(), self.location.get(), id(self.location.get()))) - # self.enter_block(label, sight_pb2.Object(), frame) - # print('%s/%s: inside self.location=%s/%s' % (task_id, asyncio.current_task().get_name(), self.location.get(), id(self.location.get()))) - return await func - # self.exit_block(label, sight_pb2.Object(), frame) - - self.enter_block( - f'asyncio.create_task: {asyncio.current_task().get_name()}', - sight_pb2.Object(), frame) - state = self.get_location_state() #self.location.get().clone() - # print('%s/%s: temp_location=%s=%s' % (task_id, asyncio.current_task().get_name(), state, id(state))) - - new_task = asyncio.create_task(go(func, - state)) #, name=f'task_{task_id}') - self.exit_block( - f'asyncio.create_task: {asyncio.current_task().get_name()}', - sight_pb2.Object(), frame) - return new_task - - @classmethod - def silent(cls) -> Sight: - return Sight(sight_pb2.Params(silent_logger=True)) - - def new( - self, - params: sight_pb2.Params, - configuration: Optional[Sequence[sight_pb2.Object]] = None, - ) -> Sight: - """Returns a new instance of Sight. + def set_location_state(self, state: SightLocationState) -> None: + self.location.set(state.location) + self.line_prefix.set(state.line_prefix) + self.line_suffix.set(state.line_suffix) + self.open_block_start_locations.set(state.open_block_start_locations) + self.num_direct_contents.set(state.num_direct_contents) + self.num_transitive_contents.set(state.num_transitive_contents) + self.active_block_labels.set(state.active_block_labels) + + def create_task(self, func): + frame = inspect.currentframe().f_back + + async def go(func, state): + # self.location.set(temp_location) + self.set_location_state(state) + # label = f'id={task_id}' + # print('%s/%s: outside self.location=%s/%s' % (task_id, asyncio.current_task().get_name(), self.location.get(), id(self.location.get()))) + # self.enter_block(label, sight_pb2.Object(), frame) + # print('%s/%s: inside self.location=%s/%s' % (task_id, asyncio.current_task().get_name(), self.location.get(), id(self.location.get()))) + return await func + # self.exit_block(label, sight_pb2.Object(), frame) + + self.enter_block( + f'asyncio.create_task: {asyncio.current_task().get_name()}', + sight_pb2.Object(), frame) + state = self.get_location_state() #self.location.get().clone() + # print('%s/%s: temp_location=%s=%s' % (task_id, asyncio.current_task().get_name(), state, id(state))) + + new_task = asyncio.create_task(go(func, state)) #, name=f'task_{task_id}') + self.exit_block(f'asyncio.create_task: {asyncio.current_task().get_name()}', + sight_pb2.Object(), frame) + return new_task + + @classmethod + def silent(cls) -> Sight: + return Sight(sight_pb2.Params(silent_logger=True)) + + def new( + self, + params: sight_pb2.Params, + configuration: Optional[Sequence[sight_pb2.Object]] = None, + ) -> Sight: + """Returns a new instance of Sight. This method is useful for creating new Sight logger objects in cases where it is not feasible to import Sight (due to circular import dependencies) @@ -389,128 +382,122 @@ def new( params: Primary configuration parameters of the logger. configuration: Sight log that contains additional configuration details. """ - return Sight(params, configuration) - - def __enter__(self): - return self - - def __exit__(self, exc_type, value, traceback): - if self.params.silent_logger: - self.close() - return - if exc_type is not None: - # pytype: disable=attribute-error - exception(exc_type, value, traceback, self, - inspect.currentframe().f_back) - # pytype: enable=attribute-error - - # last rpc call to server for this sight id - req = service_pb2.CloseRequest() - req.client_id = str(self.id) - response = service.call( - lambda s, meta: s.Close(req, 300, metadata=meta)) - # print("close rpc status :", response.response_str) - self.close() - - def __del__(self): - self.close() - - def close(self): - """Closes this logger. Finalizes all log files so are ready for use.""" - if self.params.silent_logger: - return - - if not self.open: - return - - if hasattr(self, 'citc_snapshot'): - self.unset_attribute('citc_snapshot') - if hasattr(self, 'change_list_number'): - self.unset_attribute('change_list_number') - - if self.text_log: - self.text_log.close() - - if self.avro_log: - if self.avro_log.getbuffer().nbytes > 0: - self.avro_file_counter += 1 - upload_blob_from_stream( - self.params.bucket_name, - self.params.gcp_path, - self.avro_log, - self.avro_log_file_path, - self.avro_file_counter, - ) - # if this is the only avro file, table has not been created yet - if self.avro_file_counter == 1: - create_external_bq_table(self.params, self.table_name, - self.id) - logging.info( - 'Log GUI : https://script.google.com/a/google.com/macros/s/%s/exec?' - 'log_id=%s.%s&log_owner=%s&project_id=%s', - self.SIGHT_API_KEY, self.params.dataset_name, - self.table_name, self.params.log_owner, - os.environ['PROJECT_ID']) - print( - f'table generated : {self.params.dataset_name}.{self.table_name}' - ) - self.avro_log.close() - - if not self.params.local and not self.params.in_memory: - logging.info( - ( - #'Log : https://script.google.com/a/google.com/macros/s/%s/exec?' - 'Log : https://script.google.com/a/google.com/macros/s/%s/dev?' - 'log_id=%s.%s&log_owner=%s&project_id=%s', ), - self.SIGHT_API_KEY, - self.params.dataset_name, - self.table_name, - self.params.log_owner, - os.environ['PROJECT_ID']) - - if (FLAGS.decision_mode == 'train'): - decision.finalize(self) - finalize_server() - self.open = False - - def pause_logging(self) -> None: - self.pause_logging_depth += 1 - - def resume_logging(self) -> None: - self.pause_logging_depth -= 1 - - def is_logging_enabled(self) -> bool: - return not self.params.silent_logger and self.pause_logging_depth <= 1 - - def get_in_memory_log(self) -> sight_pb2.Log: - """Returns a proto that contains the full Sight in-memory log.""" - log = sight_pb2.Log() - - if self.in_memory_log: - log.obj.extend(self.in_memory_log) - - return log - - def set_object_code_loc(self, obj: sight_pb2.Object, frame: Any) -> None: - """Updates obj with the calling context information in frame. + return Sight(params, configuration) + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + if self.params.silent_logger: + self.close() + return + if exc_type is not None: + # pytype: disable=attribute-error + exception(exc_type, value, traceback, self, inspect.currentframe().f_back) + # pytype: enable=attribute-error + + # last rpc call to server for this sight id + req = service_pb2.CloseRequest() + req.client_id = str(self.id) + response = service.call(lambda s, meta: s.Close(req, 300, metadata=meta)) + # print("close rpc status :", response.response_str) + self.close() + + def __del__(self): + self.close() + + def close(self): + """Closes this logger. Finalizes all log files so are ready for use.""" + if self.params.silent_logger: + return + + if not self.open: + return + + if hasattr(self, 'citc_snapshot'): + self.unset_attribute('citc_snapshot') + if hasattr(self, 'change_list_number'): + self.unset_attribute('change_list_number') + + if self.text_log: + self.text_log.close() + + if self.avro_log: + if self.avro_log.getbuffer().nbytes > 0: + self.avro_file_counter += 1 + upload_blob_from_stream( + self.params.bucket_name, + self.params.gcp_path, + self.avro_log, + self.avro_log_file_path, + self.avro_file_counter, + ) + # if this is the only avro file, table has not been created yet + if self.avro_file_counter == 1: + create_external_bq_table(self.params, self.table_name, self.id) + logging.info( + 'Log GUI : https://script.google.com/a/google.com/macros/s/%s/exec?' + 'log_id=%s.%s&log_owner=%s&project_id=%s', self.SIGHT_API_KEY, + self.params.dataset_name, self.table_name, self.params.log_owner, + os.environ['PROJECT_ID']) + print(f'table generated : {self.params.dataset_name}.{self.table_name}') + self.avro_log.close() + + if not self.params.local and not self.params.in_memory: + logging.info( + ( + #'Log : https://script.google.com/a/google.com/macros/s/%s/exec?' + 'Log : https://script.google.com/a/google.com/macros/s/%s/dev?' + 'log_id=%s.%s&log_owner=%s&project_id=%s',), + self.SIGHT_API_KEY, + self.params.dataset_name, + self.table_name, + self.params.log_owner, + os.environ['PROJECT_ID']) + + if (FLAGS.decision_mode == 'train'): + decision.finalize(self) + finalize_server() + self.open = False + + def pause_logging(self) -> None: + self.pause_logging_depth += 1 + + def resume_logging(self) -> None: + self.pause_logging_depth -= 1 + + def is_logging_enabled(self) -> bool: + return not self.params.silent_logger and self.pause_logging_depth <= 1 + + def get_in_memory_log(self) -> sight_pb2.Log: + """Returns a proto that contains the full Sight in-memory log.""" + log = sight_pb2.Log() + + if self.in_memory_log: + log.obj.extend(self.in_memory_log) + + return log + + def set_object_code_loc(self, obj: sight_pb2.Object, frame: Any) -> None: + """Updates obj with the calling context information in frame. Args: obj: The object to be updated frame: The call stack frame that contains the calling context information. """ - frameinfo = inspect.getframeinfo(frame) - google3_loc = frameinfo.filename.find(self.CODE_FILES_PATH_PREFIX) - if google3_loc >= 0: - obj.file = frameinfo.filename[google3_loc + - len(self.CODE_FILES_PATH_PREFIX):] - else: - obj.file = frameinfo.filename - obj.line = frameinfo.lineno - obj.func = frameinfo.function + frameinfo = inspect.getframeinfo(frame) + google3_loc = frameinfo.filename.find(self.CODE_FILES_PATH_PREFIX) + if google3_loc >= 0: + obj.file = frameinfo.filename[google3_loc + + len(self.CODE_FILES_PATH_PREFIX):] + else: + obj.file = frameinfo.filename + obj.line = frameinfo.lineno + obj.func = frameinfo.function - def text(self, text_val: str, end='\n', frame=None) -> str: - """Logs a text value to the Sight log. + def text(self, text_val: str, end='\n', frame=None) -> str: + """Logs a text value to the Sight log. Args: text_val: The text value to be logged. @@ -521,43 +508,39 @@ def text(self, text_val: str, end='\n', frame=None) -> str: Returns: The logged text. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return '' - - obj = sight_pb2.Object() - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - self.set_object_code_loc(obj, frame) - - if self.is_binary_logged(): - obj.sub_type = sight_pb2.Object.SubType.ST_TEXT - obj.text.text = text_val.replace('\n', '\\n') + end - obj.text.sub_type = sight_pb2.Text.SubType.ST_TEXT - self.log_object(obj, True) - - if end == '\n': - full_text_line = '(%s:%d) function : %s\n %s\n' % ( - obj.file, - obj.line, - obj.func, - # self.line_prefix, - text_val, - # self.line_suffix, - ) - else: - full_text_line = text_val + end - self.emit_text_to_file(full_text_line) + if self.params.silent_logger or self.pause_logging_depth > 0: + return '' - return full_text_line - - def text_block(self, - label: str, - text_val: str, - end='\n', - frame=None) -> str: - """Logs a block that contains a specified text string as its contents. + obj = sight_pb2.Object() + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + self.set_object_code_loc(obj, frame) + + if self.is_binary_logged(): + obj.sub_type = sight_pb2.Object.SubType.ST_TEXT + obj.text.text = text_val.replace('\n', '\\n') + end + obj.text.sub_type = sight_pb2.Text.SubType.ST_TEXT + self.log_object(obj, True) + + if end == '\n': + full_text_line = '(%s:%d) function : %s\n %s\n' % ( + obj.file, + obj.line, + obj.func, + # self.line_prefix, + text_val, + # self.line_suffix, + ) + else: + full_text_line = text_val + end + self.emit_text_to_file(full_text_line) + + return full_text_line + + def text_block(self, label: str, text_val: str, end='\n', frame=None) -> str: + """Logs a block that contains a specified text string as its contents. Args: label: The label of the block. @@ -569,38 +552,37 @@ def text_block(self, Returns: The logged text. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return '' + if self.params.silent_logger or self.pause_logging_depth > 0: + return '' - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - self.enter_block(label, sight_pb2.Object(), frame) - ret_val = self.text(text_val, end, frame) - self.exit_block(label, sight_pb2.Object(), frame) + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + self.enter_block(label, sight_pb2.Object(), frame) + ret_val = self.text(text_val, end, frame) + self.exit_block(label, sight_pb2.Object(), frame) - return ret_val + return ret_val - def gap(self) -> Optional[Location]: - """Logs a dummy gap value value to the Sight log. + def gap(self) -> Optional[Location]: + """Logs a dummy gap value value to the Sight log. Returns: The location of the dummy object in the log. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return None + if self.params.silent_logger or self.pause_logging_depth > 0: + return None - if self.is_binary_logged(): - return self.log_object( - sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_GAP), - True) + if self.is_binary_logged(): + return self.log_object( + sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_GAP), True) - def enter_block(self, - label: str, - obj: sight_pb2.Object, - frame: Optional[Any] = None) -> Optional[Location]: - """Documents in the Sight log that a hierarchical block was entered. + def enter_block(self, + label: str, + obj: sight_pb2.Object, + frame: Optional[Any] = None) -> Optional[Location]: + """Documents in the Sight log that a hierarchical block was entered. Args: label: The label of the block. @@ -613,48 +595,45 @@ def enter_block(self, Returns: The log Location of the block's starting point. """ - if self.params.silent_logger: - return None + if self.params.silent_logger: + return None - if self.pause_logging_depth > 0: - return self.location.get() + if self.pause_logging_depth > 0: + return self.location.get() - self.active_block_labels.get().append(label) - # self.emit_text_to_file( - # self.line_prefix + label + '<<<' + self.line_suffix + '\n' - # ) - self.emit_text_to_file(self.line_prefix.get() + label + '\n' + '>>> ' + - '\n') - self.line_prefix.set(self.line_prefix.get() + label + ': ') + self.active_block_labels.get().append(label) + # self.emit_text_to_file( + # self.line_prefix + label + '<<<' + self.line_suffix + '\n' + # ) + self.emit_text_to_file(self.line_prefix.get() + label + '\n' + '>>> ' + + '\n') + self.line_prefix.set(self.line_prefix.get() + label + ': ') - obj_location = self.location.get() - if self.is_binary_logged(): - obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START + obj_location = self.location.get() + if self.is_binary_logged(): + obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START - if obj.block_start is None: - obj.block_start = sight_pb2.BlockStart() - obj.block_start.label = label + if obj.block_start is None: + obj.block_start = sight_pb2.BlockStart() + obj.block_start.label = label - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - self.set_object_code_loc(obj, frame) + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + self.set_object_code_loc(obj, frame) - self.log_object(obj, False) - self.open_block_start_locations.get().append(obj.location) + self.log_object(obj, False) + self.open_block_start_locations.get().append(obj.location) - self.num_direct_contents.get().enter(0) - self.num_transitive_contents.get().enter(0) - self.location.get().enter(0) + self.num_direct_contents.get().enter(0) + self.num_transitive_contents.get().enter(0) + self.location.get().enter(0) - return obj_location + return obj_location - def exit_block(self, - label: str, - obj: sight_pb2.Object, - frame=None) -> None: - """Documents in the Sight log that a hierarchical block was exited. + def exit_block(self, label: str, obj: sight_pb2.Object, frame=None) -> None: + """Documents in the Sight log that a hierarchical block was exited. Args: label: the label of the block. @@ -664,61 +643,59 @@ def exit_block(self, frame: the call stack frame that the calling context from which the logging event was created. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return - - if not self.active_block_labels.get() or self.location.get().size( - ) == 1: - logging.warning('Exiting inactive Sight block "%s"', label) - return - - self.active_block_labels.get().pop() - self.line_prefix.set('') - for block_label in self.active_block_labels.get(): - self.line_prefix.set(self.line_prefix.get() + block_label + ': ') - - self.location.get().exit() - self.location.get().next() - - if self.is_binary_logged(): - if not self.open_block_start_locations.get(): - logging.warning('Exiting inactive Sight block "%s"', label) - - obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_END - if obj.block_end is None: - obj.block_end = sight_pb2.BlockEnd() - obj.block_end.label = label - obj.block_end.num_direct_contents = self.num_direct_contents.get( - ).pos() - obj.block_end.num_transitive_contents = self.num_transitive_contents.get( - ).pos() - obj.block_end.location_of_block_start = self.open_block_start_locations.get( - )[-1] - self.open_block_start_locations.get().pop() - - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - self.set_object_code_loc(obj, frame) - - self.log_object(obj, True) - - self.emit_text_to_file( - # self.line_prefix + label + '>>>' + self.line_suffix + '\n' - '<<< ' + '\n') - - self.num_direct_contents.get().exit() - self.num_transitive_contents.get().exit() - - def _update_line_suffix(self) -> None: - # Each value in self.attributes is non-empty since empty values are removed - # in unset_attribute. - self.line_suffix.set('| ' + ','.join( - [f'{key}={value[-1]}' for key, value in self.attributes.items()])) - - def set_attribute(self, key: str, value: str) -> None: - """Documents in the Sight log a new key-value attribute mapping. + if self.params.silent_logger or self.pause_logging_depth > 0: + return + + if not self.active_block_labels.get() or self.location.get().size() == 1: + logging.warning('Exiting inactive Sight block "%s"', label) + return + + self.active_block_labels.get().pop() + self.line_prefix.set('') + for block_label in self.active_block_labels.get(): + self.line_prefix.set(self.line_prefix.get() + block_label + ': ') + + self.location.get().exit() + self.location.get().next() + + if self.is_binary_logged(): + if not self.open_block_start_locations.get(): + logging.warning('Exiting inactive Sight block "%s"', label) + + obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_END + if obj.block_end is None: + obj.block_end = sight_pb2.BlockEnd() + obj.block_end.label = label + obj.block_end.num_direct_contents = self.num_direct_contents.get().pos() + obj.block_end.num_transitive_contents = self.num_transitive_contents.get( + ).pos() + obj.block_end.location_of_block_start = self.open_block_start_locations.get( + )[-1] + self.open_block_start_locations.get().pop() + + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + self.set_object_code_loc(obj, frame) + + self.log_object(obj, True) + + self.emit_text_to_file( + # self.line_prefix + label + '>>>' + self.line_suffix + '\n' + '<<< ' + '\n') + + self.num_direct_contents.get().exit() + self.num_transitive_contents.get().exit() + + def _update_line_suffix(self) -> None: + # Each value in self.attributes is non-empty since empty values are removed + # in unset_attribute. + self.line_suffix.set('| ' + ','.join( + [f'{key}={value[-1]}' for key, value in self.attributes.items()])) + + def set_attribute(self, key: str, value: str) -> None: + """Documents in the Sight log a new key-value attribute mapping. Until the mapping is unset all logged objects will be annotated with this key/value pair. @@ -727,11 +704,11 @@ def set_attribute(self, key: str, value: str) -> None: key: the name of the key being set. value: the value assigned to key. """ - self.attributes.setdefault(key, []).append(value) - self._update_line_suffix() + self.attributes.setdefault(key, []).append(value) + self._update_line_suffix() - def unset_attribute(self, key: str) -> None: - """Removes from the Sight log a new key-value attribute mapping. + def unset_attribute(self, key: str) -> None: + """Removes from the Sight log a new key-value attribute mapping. Subsequent logged logged objects will no longer be annotated with this key/value pair. If the key had a value mapped to it before the value @@ -741,30 +718,29 @@ def unset_attribute(self, key: str) -> None: Args: key: the name of the key being unset. """ - values = self.attributes.get(key) - if not values: - logging.error('Failed to unset attribute %s, which is not set.', - key) - return + values = self.attributes.get(key) + if not values: + logging.error('Failed to unset attribute %s, which is not set.', key) + return - values.pop() - if not values: - del self.attributes[key] + values.pop() + if not values: + del self.attributes[key] - self._update_line_suffix() + self._update_line_suffix() - def fetch_attributes(self) -> dict[str, str]: - """Fetches all the values of attributes that is currently set to within Sight. + def fetch_attributes(self) -> dict[str, str]: + """Fetches all the values of attributes that is currently set to within Sight. Returns: The dictionary that contains key-value pairs of attributes currently set to. """ - attr_dict = {} - for k, v in self.attributes.items(): - attr_dict[k] = v[-1] - return attr_dict + attr_dict = {} + for k, v in self.attributes.items(): + attr_dict[k] = v[-1] + return attr_dict - def get_attribute(self, key: str) -> str: - """Fetches the value that a key is currently set to within Sight. + def get_attribute(self, key: str) -> str: + """Fetches the value that a key is currently set to within Sight. Args: key: the name of the key being fetched. @@ -772,15 +748,15 @@ def get_attribute(self, key: str) -> str: Returns: The value that key is currently set to. """ - values = self.attributes.get(key) - if not values: - return '' - return values[-1] + values = self.attributes.get(key) + if not values: + return '' + return values[-1] - def log_object(self, - obj: sight_pb2.Object, - advance_location: bool = True) -> Optional[Location]: - """Emits a single object to the Sight log. + def log_object(self, + obj: sight_pb2.Object, + advance_location: bool = True) -> Optional[Location]: + """Emits a single object to the Sight log. Args: obj: A Sight object where log event is to be recorded. This object may @@ -791,138 +767,136 @@ def log_object(self, Returns: The Location of the logged object. """ - if self.params.silent_logger: - return None - - if self.pause_logging_depth > 0: - return self.location.get() - - if not self.num_direct_contents.get().is_empty(): - self.num_direct_contents.get().next() - self.num_transitive_contents.get().next_all() - - obj_location = self.location.get() - if self.is_binary_logged(): - obj.location = str(self.location.get()) - obj.index = self.index - self.index += 1 - - for key, value in self.attributes.items(): - if not value: - logging.warning('No attributes recorded for key %s', key) - continue - - attr = obj.attribute.add() - attr.key = key - attr.value = str(value[-1]) - - for loc in self.open_block_start_locations.get(): - obj.ancestor_start_location.append(str(loc)) - obj.ancestor_start_location.append(str(self.location.get())) - - obj.order.timestamp_ns = time.time_ns() - - if self.params.in_memory: - self.in_memory_log.append(obj) - elif self.avro_log: - dict_obj = MessageToDict(obj, preserving_proto_field_name=True) - fastavro.writer(self.avro_log, self.avro_schema, [dict_obj]) - self.avro_record_counter += 1 - if self.avro_record_counter % 1000 == 0: - self.avro_file_counter += 1 - upload_blob_from_stream( - self.params.bucket_name, - self.params.gcp_path, - self.avro_log, - self.avro_log_file_path, - self.avro_file_counter, - ) - if self.avro_file_counter == 1: - create_external_bq_table(self.params, self.table_name, - self.id) - logging.info( - 'Log GUI : https://script.google.com/a/google.com/macros/s/%s/exec?' - 'log_id=%s.%s&log_owner=%s&project_id=%s', - self.SIGHT_API_KEY, self.params.dataset_name, - self.table_name, self.params.log_owner, - os.environ['PROJECT_ID']) - print( - f'table generated : {self.params.dataset_name}.{self.table_name}' - ) - self.avro_log.close() - self.avro_log = io.BytesIO() - - if advance_location: - self.location.get().next() - - return obj_location - - def emit_text_to_file(self, text_val: str) -> None: - """Emits text to the output text file, if one is being used. + if self.params.silent_logger: + return None + + if self.pause_logging_depth > 0: + return self.location.get() + + if not self.num_direct_contents.get().is_empty(): + self.num_direct_contents.get().next() + self.num_transitive_contents.get().next_all() + + obj_location = self.location.get() + if self.is_binary_logged(): + obj.location = str(self.location.get()) + obj.index = self.index + self.index += 1 + + for key, value in self.attributes.items(): + if not value: + logging.warning('No attributes recorded for key %s', key) + continue + + attr = obj.attribute.add() + attr.key = key + attr.value = str(value[-1]) + + for loc in self.open_block_start_locations.get(): + obj.ancestor_start_location.append(str(loc)) + obj.ancestor_start_location.append(str(self.location.get())) + + obj.order.timestamp_ns = time.time_ns() + + if self.params.in_memory: + self.in_memory_log.append(obj) + elif self.avro_log: + dict_obj = MessageToDict(obj, preserving_proto_field_name=True) + fastavro.writer(self.avro_log, self.avro_schema, [dict_obj]) + self.avro_record_counter += 1 + if self.avro_record_counter % 1000 == 0: + self.avro_file_counter += 1 + upload_blob_from_stream( + self.params.bucket_name, + self.params.gcp_path, + self.avro_log, + self.avro_log_file_path, + self.avro_file_counter, + ) + if self.avro_file_counter == 1: + create_external_bq_table(self.params, self.table_name, self.id) + logging.info( + 'Log GUI : https://script.google.com/a/google.com/macros/s/%s/exec?' + 'log_id=%s.%s&log_owner=%s&project_id=%s', self.SIGHT_API_KEY, + self.params.dataset_name, self.table_name, + self.params.log_owner, os.environ['PROJECT_ID']) + print( + f'table generated : {self.params.dataset_name}.{self.table_name}' + ) + self.avro_log.close() + self.avro_log = io.BytesIO() + + if advance_location: + self.location.get().next() + + return obj_location + + def emit_text_to_file(self, text_val: str) -> None: + """Emits text to the output text file, if one is being used. Args: text_val: The text to be logged. """ - if self.params.silent_logger or self.pause_logging_depth > 0: - return + if self.params.silent_logger or self.pause_logging_depth > 0: + return - if self.text_log: - self.text_log.write(text_val) - # logging.info(text_val) + if self.text_log: + self.text_log.write(text_val) + # logging.info(text_val) - def is_binary_logged(self) -> bool: - """Returns whether a binary proto representation is being logged.""" - # return self.params.capacitor_output - return self.params.avro_output + def is_binary_logged(self) -> bool: + """Returns whether a binary proto representation is being logged.""" + # return self.params.capacitor_output + return self.params.avro_output - def _configure(self, configuration: Sequence[sight_pb2.Object]) -> None: - """Initializes the configuration of this logger and widgets. + def _configure(self, configuration: Sequence[sight_pb2.Object]) -> None: + """Initializes the configuration of this logger and widgets. Args: configuration: Sight log that stores configuration log objects. """ - if not configuration: - decision.configure(None, self.widget_decision_state) - return - - self.add_config(configuration) - - # def add_config(self, configuration: Sequence[sight_pb2.Object]) -> None: - # """Augments the configuration of this logger from an in-memory log. - - # Args: - # configuration: Sight log that stores configuration log objects. - # """ - # if not configuration: - # return - # for cur in configuration: - # if ( - # cur.sub_type != sight_pb2.Object.ST_BLOCK_START - # or cur.block_start.sub_type != sight_pb2.BlockStart.ST_CONFIGURATION - # ): - # continue - - # if ( - # cur.block_start.configuration.sub_type - # == sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION - # ): - # decision.configure( - # cur.block_start.configuration.decision_configuration, - # self.widget_decision_state, - # ) - - # def add_config_file(self, config_file_path: str) -> None: - # """Augments the configuration of this logger from a file. - - # Args: - # config_file_path: File glob that contains a Sight log that stores - # configuration log objects. - # """ - # self.add_config(_read_capacitor_file(config_file_path)) # pytype: disable=wrong-arg-types # dynamic-method-lookup + if not configuration: + decision.configure(None, self.widget_decision_state) + return + + self.add_config(configuration) + + # def add_config(self, configuration: Sequence[sight_pb2.Object]) -> None: + # """Augments the configuration of this logger from an in-memory log. + + # Args: + # configuration: Sight log that stores configuration log objects. + # """ + # if not configuration: + # return + # for cur in configuration: + # if ( + # cur.sub_type != sight_pb2.Object.ST_BLOCK_START + # or cur.block_start.sub_type != sight_pb2.BlockStart.ST_CONFIGURATION + # ): + # continue + + # if ( + # cur.block_start.configuration.sub_type + # == sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION + # ): + # decision.configure( + # cur.block_start.configuration.decision_configuration, + # self.widget_decision_state, + # ) + + # def add_config_file(self, config_file_path: str) -> None: + # """Augments the configuration of this logger from a file. + + # Args: + # config_file_path: File glob that contains a Sight log that stores + # configuration log objects. + # """ + # self.add_config(_read_capacitor_file(config_file_path)) # pytype: disable=wrong-arg-types # dynamic-method-lookup def text(text_val: str, sight, end='\n', frame=None) -> str: - """Logs a text value to the Sight log if Sight is being used. + """Logs a text value to the Sight log if Sight is being used. If no Sight logger object is provided, nothing is logged. @@ -936,21 +910,21 @@ def text(text_val: str, sight, end='\n', frame=None) -> str: Returns: The logged text. """ - if sight.params.silent_logger or sight.pause_logging_depth > 0: - return '' + if sight.params.silent_logger or sight.pause_logging_depth > 0: + return '' - if sight is None: - return '' + if sight is None: + return '' - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - return sight.text(text_val, end=end, frame=frame) + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + return sight.text(text_val, end=end, frame=frame) def text_block(label: str, text_val: str, sight, frame=None) -> str: - """Logs to Sight a block that contains a text string if Sight is being used. + """Logs to Sight a block that contains a text string if Sight is being used. If no Sight logger object is provided, nothing is logged. @@ -964,14 +938,14 @@ def text_block(label: str, text_val: str, sight, frame=None) -> str: Returns: The logged text. """ - if sight.params.silent_logger or sight.pause_logging_depth > 0: - return '' + if sight.params.silent_logger or sight.pause_logging_depth > 0: + return '' - if sight is None: - return '' + if sight is None: + return '' - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - return sight.text_block(label, text_val, frame) + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + return sight.text_block(label, text_val, frame) diff --git a/py/sight/sight_test.py b/py/sight/sight_test.py index 5032188..9f6286d 100644 --- a/py/sight/sight_test.py +++ b/py/sight/sight_test.py @@ -11,18 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Tests for py.sight.""" import inspect from typing import Sequence -# from google3.analysis.dremel.core.capacitor.public.python import pywrap_record_reader -from sight.proto import sight_pb2 + +from absl.testing import absltest from sight.attribute import Attribute from sight.block import Block +# from google3.analysis.dremel.core.capacitor.public.python import pywrap_record_reader +from sight.proto import sight_pb2 from sight.sight import Sight from sight.sight import text from tensorflow.python.util.protobuf import compare -from absl.testing import absltest class SightTest(absltest.TestCase): @@ -48,29 +48,23 @@ def _read_text_file(file_path: str): # protos.extend(record_reader.IterRecords()) # return sorted(protos, key=lambda x: x.index) @staticmethod - def _create_attributes( - base_attributes: Sequence[sight_pb2.Attribute], sight: Sight - ) -> Sequence[sight_pb2.Attribute]: + def _create_attributes(base_attributes: Sequence[sight_pb2.Attribute], + sight: Sight) -> Sequence[sight_pb2.Attribute]: attribute = [] if hasattr(sight, 'change_list_number'): attribute.append( - sight_pb2.Attribute( - key='change_list_number', value=str(sight.change_list_number) - ) - ) + sight_pb2.Attribute(key='change_list_number', + value=str(sight.change_list_number))) if hasattr(sight, 'citc_snapshot'): attribute.append( - sight_pb2.Attribute( - key='citc_snapshot', value=str(sight.citc_snapshot) - ) - ) + sight_pb2.Attribute(key='citc_snapshot', + value=str(sight.citc_snapshot))) attribute.extend(base_attributes) return attribute @staticmethod - def _create_attributes_text( - base_attributes: Sequence[sight_pb2.Attribute], sight: Sight - ) -> str: + def _create_attributes_text(base_attributes: Sequence[sight_pb2.Attribute], + sight: Sight) -> str: attribute = [] if hasattr(sight, 'change_list_number'): attribute.append(f'change_list_number={sight.change_list_number}') @@ -107,9 +101,8 @@ def testLogTextToTextFile(self): frameinfo.function, block_attrs, ) - actual_log = self._read_text_file( - params.log_dir_path + '/testLogTextToTextFile.txt' - ) + actual_log = self._read_text_file(params.log_dir_path + + '/testLogTextToTextFile.txt') self.assertEqual( expected_log, actual_log, @@ -165,9 +158,8 @@ def testLogTextToCapacitorFile(self): func=frameinfo.function, ancestor_start_location=['0000000002'], sub_type=sight_pb2.Object.ST_TEXT, - text=sight_pb2.Text( - sub_type=sight_pb2.Text.ST_TEXT, text='textLine1\n' - ), + text=sight_pb2.Text(sub_type=sight_pb2.Text.ST_TEXT, + text='textLine1\n'), ), sight_pb2.Object( location='0000000003', @@ -178,14 +170,12 @@ def testLogTextToCapacitorFile(self): func=frameinfo.function, ancestor_start_location=['0000000003'], sub_type=sight_pb2.Object.ST_TEXT, - text=sight_pb2.Text( - sub_type=sight_pb2.Text.ST_TEXT, text='textLine2\n' - ), + text=sight_pb2.Text(sub_type=sight_pb2.Text.ST_TEXT, + text='textLine2\n'), ), ] actual_log = self._read_capacitor_file( - params.log_dir_path + '/testLogTextToCapacitorFile.capacitor' - ) + params.log_dir_path + '/testLogTextToCapacitorFile.capacitor') self.assertEqual(len(expected_log), len(actual_log)) for i in range(0, len(expected_log)): compare.assertProtoEqual( @@ -193,8 +183,8 @@ def testLogTextToCapacitorFile(self): expected_log[i], actual_log[i], 'Target code and generated logs are different. Expected' - ' log[%d]:\n%s\nActual log[%d]:\n%s\n' - % (i, expected_log[i], i, actual_log[i]), + ' log[%d]:\n%s\nActual log[%d]:\n%s\n' % + (i, expected_log[i], i, actual_log[i]), ) def testLogBlockTextToTextFile(self): @@ -223,9 +213,8 @@ def testLogBlockTextToTextFile(self): block_attrs, block_attrs, ) - actual_log = self._read_text_file( - params.log_dir_path + '/testLogBlockTextToTextFile.txt' - ) + actual_log = self._read_text_file(params.log_dir_path + + '/testLogBlockTextToTextFile.txt') self.assertEqual( expected_log, actual_log, @@ -288,8 +277,7 @@ def testLogBlockTextToCapacitorFile(self): ), ] actual_log = self._read_capacitor_file( - params.log_dir_path + '/testLogBlockTextToCapacitorFile.capacitor' - ) + params.log_dir_path + '/testLogBlockTextToCapacitorFile.capacitor') self.assertEqual(len(expected_log), len(actual_log)) for i in range(0, len(expected_log)): compare.assertProtoEqual( @@ -297,8 +285,8 @@ def testLogBlockTextToCapacitorFile(self): expected_log[i], actual_log[i], 'Target code and generated logs are different. Expected' - ' log[%d]:\n%s\nActual log[%d]:\n%s\n' - % (i, expected_log[i], i, actual_log[i]), + ' log[%d]:\n%s\nActual log[%d]:\n%s\n' % + (i, expected_log[i], i, actual_log[i]), ) def testLogNestedBlockTextToTextFile(self): @@ -344,9 +332,8 @@ def testLogNestedBlockTextToTextFile(self): block_attrs, block_attrs, ) - actual_log = self._read_text_file( - params.log_dir_path + '/testLogNestedBlockTextToTextFile.txt' - ) + actual_log = self._read_text_file(params.log_dir_path + + '/testLogNestedBlockTextToTextFile.txt') self.assertEqual( expected_log, actual_log, @@ -392,9 +379,8 @@ def testLogNestedBlockTextToCapacitorFile(self): func=frameinfo.function, ancestor_start_location=['0000000000', '0000000000:0000000000'], sub_type=sight_pb2.Object.ST_TEXT, - text=sight_pb2.Text( - sub_type=sight_pb2.Text.ST_TEXT, text='preText\n' - ), + text=sight_pb2.Text(sub_type=sight_pb2.Text.ST_TEXT, + text='preText\n'), ), sight_pb2.Object( location='0000000000:0000000001', @@ -420,9 +406,8 @@ def testLogNestedBlockTextToCapacitorFile(self): '0000000000:0000000001:0000000000', ], sub_type=sight_pb2.Object.ST_TEXT, - text=sight_pb2.Text( - sub_type=sight_pb2.Text.ST_TEXT, text='inText\n' - ), + text=sight_pb2.Text(sub_type=sight_pb2.Text.ST_TEXT, + text='inText\n'), ), sight_pb2.Object( location='0000000000:0000000002', @@ -449,9 +434,8 @@ def testLogNestedBlockTextToCapacitorFile(self): func=frameinfo.function, ancestor_start_location=['0000000000', '0000000000:0000000003'], sub_type=sight_pb2.Object.ST_TEXT, - text=sight_pb2.Text( - sub_type=sight_pb2.Text.ST_TEXT, text='postText\n' - ), + text=sight_pb2.Text(sub_type=sight_pb2.Text.ST_TEXT, + text='postText\n'), ), sight_pb2.Object( location='0000000001', @@ -471,8 +455,8 @@ def testLogNestedBlockTextToCapacitorFile(self): ), ] actual_log = self._read_capacitor_file( - params.log_dir_path + '/testLogNestedBlockTextToCapacitorFile.capacitor' - ) + params.log_dir_path + + '/testLogNestedBlockTextToCapacitorFile.capacitor') self.assertEqual(len(expected_log), len(actual_log)) for i in range(0, len(expected_log)): compare.assertProtoEqual( @@ -480,8 +464,8 @@ def testLogNestedBlockTextToCapacitorFile(self): expected_log[i], actual_log[i], 'Target code and generated logs are different. Expected' - ' log[%d]:\n%s\nActual log[%d]:\n%s\n' - % (i, expected_log[i], i, actual_log[i]), + ' log[%d]:\n%s\nActual log[%d]:\n%s\n' % + (i, expected_log[i], i, actual_log[i]), ) def testLogAttributesToTextFile(self): @@ -503,12 +487,10 @@ def testLogAttributesToTextFile(self): frameinfo.lineno + 3, frameinfo.function, self._create_attributes_text( - [sight_pb2.Attribute(key='key', value='val')], sight - ), - ) - actual_log = self._read_text_file( - params.log_dir_path + '/testLogAttributesToTextFile.txt' + [sight_pb2.Attribute(key='key', value='val')], sight), ) + actual_log = self._read_text_file(params.log_dir_path + + '/testLogAttributesToTextFile.txt') self.assertEqual( expected_log, actual_log, @@ -535,8 +517,7 @@ def testLogAttributesToCapacitorFile(self): location='0000000000', index=0, attribute=self._create_attributes( - [sight_pb2.Attribute(key='key', value='val')], sight - ), + [sight_pb2.Attribute(key='key', value='val')], sight), file=self.test_path, line=frameinfo.lineno + 3, func=frameinfo.function, @@ -546,8 +527,7 @@ def testLogAttributesToCapacitorFile(self): ) ] actual_log = self._read_capacitor_file( - params.log_dir_path + '/testLogAttributesToCapacitorFile.capacitor' - ) + params.log_dir_path + '/testLogAttributesToCapacitorFile.capacitor') self.assertEqual(len(expected_log), len(actual_log)) for i in range(0, len(expected_log)): compare.assertProtoEqual( @@ -555,8 +535,8 @@ def testLogAttributesToCapacitorFile(self): expected_log[i], actual_log[i], 'Target code and generated logs are different. Expected' - ' log[%d]:\n%s\nActual log[%d]:\n%s\n' - % (i, expected_log[i], i, actual_log[i]), + ' log[%d]:\n%s\nActual log[%d]:\n%s\n' % + (i, expected_log[i], i, actual_log[i]), ) diff --git a/py/sight/trace.py b/py/sight/trace.py index b790d84..39850be 100644 --- a/py/sight/trace.py +++ b/py/sight/trace.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Access to traces of Sight-logged executions.""" from typing import Any, List, Optional, Sequence @@ -48,11 +47,9 @@ def __init__( """ if trace_file_path: self._trace_file = pywrap_record_reader.RecordReader.CreateFromPath( - trace_file_path, ['*'], 60.0 - ) - log = sorted( - list(self._trace_file.IterRecords()), key=lambda x: x.location - ) + trace_file_path, ['*'], 60.0) + log = sorted(list(self._trace_file.IterRecords()), + key=lambda x: x.location) self._trace_iter = log.__iter__() else: self._trace_iter = iter(trace) @@ -63,8 +60,7 @@ def get_cur(self) -> Optional[sight_pb2.Object]: return self._cur_obj def advance_to_within_block( - self, obj_type: Sequence[Any] - ) -> Optional[sight_pb2.Object]: + self, obj_type: Sequence[Any]) -> Optional[sight_pb2.Object]: """Advances to the next object of a given type in the current block. This method focuses on singleton or start-of-block objects. @@ -81,10 +77,8 @@ def advance_to_within_block( Returns: The next log object of this type, if any. """ - if ( - self._cur_obj is None - or self._cur_obj.sub_type == sight_pb2.Object.ST_BLOCK_END - ): + if (self._cur_obj is None or + self._cur_obj.sub_type == sight_pb2.Object.ST_BLOCK_END): return None start_obj = self._cur_obj @@ -97,10 +91,9 @@ def advance_to_within_block( return None if self._cur_obj.sub_type == sight_pb2.Object.ST_BLOCK_END and ( - not container_location - or self._cur_obj.block_end.location_of_block_start - == container_location - ): + not container_location or + self._cur_obj.block_end.location_of_block_start + == container_location): target_obj = None # Advance the current object to the next log location. self._cur_obj = self._trace_iter.__next__() @@ -112,44 +105,32 @@ def advance_to_within_block( target_obj = self._cur_obj break if len(obj_type) >= 2: - if ( - obj_type[0] == sight_pb2.Object.ST_BLOCK_START - and self._cur_obj.block_start.sub_type == obj_type[1] - ): + if (obj_type[0] == sight_pb2.Object.ST_BLOCK_START and + self._cur_obj.block_start.sub_type == obj_type[1]): if len(obj_type) == 2: target_obj = self._cur_obj break - if ( - len(obj_type) >= 3 - and obj_type[1] == sight_pb2.BlockStart.ST_LIST - and self._cur_obj.block_start.list.sub_type == obj_type[2] - ): + if (len(obj_type) >= 3 and + obj_type[1] == sight_pb2.BlockStart.ST_LIST and + self._cur_obj.block_start.list.sub_type == obj_type[2]): target_obj = self._cur_obj break - if ( - len(obj_type) >= 3 - and obj_type[1] == sight_pb2.BlockStart.ST_CONFIGURATION - and self._cur_obj.block_start.configuration.sub_type - == obj_type[2] - ): + if (len(obj_type) >= 3 and + obj_type[1] == sight_pb2.BlockStart.ST_CONFIGURATION and + self._cur_obj.block_start.configuration.sub_type + == obj_type[2]): target_obj = self._cur_obj break - elif ( - obj_type[0] == sight_pb2.Object.ST_TEXT - and self._cur_obj.text.sub_type == obj_type[1] - ): + elif (obj_type[0] == sight_pb2.Object.ST_TEXT and + self._cur_obj.text.sub_type == obj_type[1]): target_obj = self._cur_obj break - elif ( - obj_type[0] == sight_pb2.Object.ST_VALUE - and self._cur_obj.value.sub_type == obj_type[1] - ): + elif (obj_type[0] == sight_pb2.Object.ST_VALUE and + self._cur_obj.value.sub_type == obj_type[1]): target_obj = self._cur_obj break - elif ( - obj_type[0] == sight_pb2.Object.ST_TENSOR - and self._cur_obj.tensor.sub_type == obj_type[1] - ): + elif (obj_type[0] == sight_pb2.Object.ST_TENSOR and + self._cur_obj.tensor.sub_type == obj_type[1]): target_obj = self._cur_obj break @@ -177,11 +158,10 @@ def collect_current_block(self) -> List[sight_pb2.Object]: block_objects = [] # Iterate until we reach an block-end object that matches block_start_obj. - while self._cur_obj and not ( - self._cur_obj.sub_type == sight_pb2.Object.ST_BLOCK_END - and self._cur_obj.block_end.location_of_block_start - == block_start_obj.location - ): + while self._cur_obj and not (self._cur_obj.sub_type + == sight_pb2.Object.ST_BLOCK_END and + self._cur_obj.block_end.location_of_block_start + == block_start_obj.location): block_objects.append(self._cur_obj) self._cur_obj = self._trace_iter.__next__() block_objects.append(self._cur_obj) diff --git a/py/sight/utility.py b/py/sight/utility.py index 92712cf..2999d24 100644 --- a/py/sight/utility.py +++ b/py/sight/utility.py @@ -25,17 +25,16 @@ from google.protobuf.json_format import _NEG_INFINITY from google.protobuf.json_format import _Printer as BasePrinter from google.protobuf.json_format import SerializeToJsonError +from sight import service_utils as service from sight.widgets.decision.resource_lock import RWLockDictWrapper from sight_service.optimizer_instance import param_proto_to_dict from sight_service.proto import service_pb2 -from sight import service_utils as service - - -POLL_LIMIT = 10 # POLL_TIME_INTERVAL th part of second -POLL_TIME_INTERVAL = 6 # seconds +POLL_LIMIT = 10 # POLL_TIME_INTERVAL th part of second +POLL_TIME_INTERVAL = 6 # seconds global_outcome_mapping = RWLockDictWrapper() + def get_all_outcomes(sight_id, action_ids): # print(f'get all outcome for actions ids {action_ids}') @@ -58,7 +57,8 @@ def get_all_outcomes(sight_id, action_ids): # service_pb2.GetOutcomeResponse.Status.COMPLETED # print(f'Response => {[outcome for outcome in response.outcome]}') for outcome in response.outcome: - if (outcome.status == service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED): + if (outcome.status == + service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED): outcome_dict = {} outcome_dict['action_id'] = outcome.action_id outcome_dict['reward'] = outcome.reward @@ -80,8 +80,7 @@ def poll_network_batch_outcome(sight_id): try: resource_dict = global_outcome_mapping.get() pending_action_ids = [ - id for id in resource_dict - if resource_dict[id] is None + id for id in resource_dict if resource_dict[id] is None ] # print("pending action ids : ", pending_action_ids) @@ -98,17 +97,18 @@ def poll_network_batch_outcome(sight_id): global_outcome_mapping.update(new_dict) else: - print(f'Not sending request as no pending ids ...=> {pending_action_ids} with counter => {counter}') + print( + f'Not sending request as no pending ids ...=> {pending_action_ids} with counter => {counter}' + ) if counter <= 0: return - counter -=1 + counter -= 1 time.sleep(POLL_TIME_INTERVAL) except Exception as e: print(f"Error updating outcome mapping: {e}") raise e - def MessageToJson( message, including_default_value_fields=False, @@ -190,7 +190,7 @@ def MessageToDict( preserving_proto_field_name, use_integers_for_enums, descriptor_pool, - # float_precision=float_precision, + # float_precision=float_precision, ) # pylint: disable=protected-access return printer._MessageToJsonObject(message) @@ -213,10 +213,8 @@ def _FieldToJsonObject(self, field, value): else: if field.file.syntax == 'proto3': return value - raise SerializeToJsonError( - 'Enum field contains an integer value ' - 'which can not mapped to an enum value.' - ) + raise SerializeToJsonError('Enum field contains an integer value ' + 'which can not mapped to an enum value.') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: if field.type == descriptor.FieldDescriptor.TYPE_BYTES: # Use base64 Data encoding for bytes diff --git a/py/sight/widgets/decision/acme/acme_optimizer_client.py b/py/sight/widgets/decision/acme/acme_optimizer_client.py index d3153c6..1eb6a62 100644 --- a/py/sight/widgets/decision/acme/acme_optimizer_client.py +++ b/py/sight/widgets/decision/acme/acme_optimizer_client.py @@ -13,21 +13,22 @@ # limitations under the License. """Client for dm-acme optimizer to communicate with server.""" import math -from helpers.logs.logs_handler import logger as logging from typing import Optional, Sequence, Tuple + from absl import flags from acme import specs from acme.jax.experiments import config import dm_env +from helpers.logs.logs_handler import logger as logging import jax import numpy as np +from overrides import override import reverb -from sight_service.proto import service_pb2 from sight.proto import sight_pb2 from sight.widgets.decision.acme import sight_adder from sight.widgets.decision.acme import sight_variable_source -from sight.widgets.decision.acme.build_dqn_actor import build_dqn_config from sight.widgets.decision.acme.build_d4pg_actor import build_d4pg_config +from sight.widgets.decision.acme.build_dqn_actor import build_dqn_config # from sight.widgets.decision.acme.build_impala_actor import build_impala_config from sight.widgets.decision.acme.build_mdqn_actor import build_mdqn_config from sight.widgets.decision.acme.build_qrdqn_actor import build_qrdqn_config @@ -36,7 +37,7 @@ # from sight.widgets.decision.acme.build_sac_actor import build_sac_config from sight.widgets.decision.acme.build_td3_actor import build_td3_config from sight.widgets.decision.optimizer_client import OptimizerClient -from overrides import override +from sight_service.proto import service_pb2 _ACME_AGENT = flags.DEFINE_enum( 'acme_agent', @@ -92,245 +93,242 @@ class AcmeOptimizerClient(OptimizerClient): - """Acme client for the Sight service.""" - - def __init__(self, sight): - super().__init__( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_ACME) - self._sight = sight - self._actor = None - self._adder = None - self._variable_source = None - self._dp_first_call = True - self._last_acme_action = None - - # added to run the base example - self._replay_server = None - self._replay_client = None - self._dataset = None - self._learner = None - - @override - def create_config( - self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: - # print("self._sight.widget_decision_state['decision_episode_fn'] : ", self._sight) - print("in create config") - choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig() - - if (FLAGS.acme_agent == 'dqn'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_DQN - elif (FLAGS.acme_agent == 'd4pg'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_D4PG - # elif(FLAGS.acme_agent == 'impala'): - # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_IMPALA - elif (FLAGS.acme_agent == 'mdqn'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MDQN - elif (FLAGS.acme_agent == 'qrdqn'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_QRDQN - # elif(FLAGS.acme_agent == 'ppo'): - # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_PPO - # elif(FLAGS.acme_agent == 'mpo'): - # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MPO - # elif(FLAGS.acme_agent == 'sac'): - # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_SAC - elif (FLAGS.acme_agent == 'td3'): - choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_TD3 - - # possible_actions = fetch_possible_actions(self._sight.widget_decision_state['decision_episode_fn']) - # choice_config.acme_config.possible_actions = possible_actions - - #? using state and action related data as common to all choice_config - # ( - # state_min, - # state_max, - # state_param_length, - # action_min, - # action_max, - # action_param_length, - # possible_actions, - # ) = generate_spec_details( - # self._sight.widget_decision_state['decision_episode_fn'] - # ) - # choice_config.acme_config.state_min.extend(state_min) - # choice_config.acme_config.state_max.extend(state_max) - # choice_config.acme_config.state_param_length = state_param_length - # choice_config.acme_config.action_min.extend(action_min) - # choice_config.acme_config.action_max.extend(action_max) - # choice_config.acme_config.action_param_length = action_param_length - # choice_config.acme_config.possible_actions = possible_actions - - # if FLAGS.env_name: - # choice_config.acme_config.env_name = FLAGS.env_name - - return choice_config - - def generate_env_spec( - self, - # state_min, - # state_max, - # state_param_length, - # action_min, - # action_max, - # action_param_length, - attr_dict - ) -> specs.EnvironmentSpec: - """Generates the environment spec for the environment.""" - - method_name = "generate_env_spec" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - # dtype_mapping = { - # sight_pb2.DecisionConfigurationStart.DataType.DT_INT32: np.int32, - # sight_pb2.DecisionConfigurationStart.DataType.DT_INT64: np.int64, - # sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT32: np.float32, - # sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT64: np.float64, - # } - - default_dtype = np.float32 - state_min = np.array(list(attr_dict.state_min.values())) - state_max = np.array(list(attr_dict.state_max.values())) - state_param_length = len(attr_dict.state_attrs) - # state_dtype = dtype_mapping[attr_dict.state_dtype] - observations = specs.BoundedArray( - shape=(state_param_length, ), - # dtype=state_dtype, - dtype=default_dtype, - name="observation", - minimum=state_min, - maximum=state_max, - ), - - action_param_length = len(attr_dict.action_attrs) - # if(attr_dict.action_min): - action_min = np.array(list(attr_dict.action_min.values())) - # if(attr_dict.action_max): - action_max = np.array(list(attr_dict.action_max.values())) - # if(attr_dict.action_dtype): - # action_dtype = dtype_mapping[attr_dict.action_dtype] - - # create discrete spec - if (attr_dict.valid_action_values): - possible_values_list = list( - attr_dict.valid_action_values.values())[0] - actions = specs.DiscreteArray( - num_values=len(possible_values_list), - dtype=np.int64, - name="action", - ) - # create bounded spec - else: - if (attr_dict.step_size): - default_dtype = np.int64 - actions = specs.BoundedArray( - shape=(action_param_length, ), - # dtype=action_dtype, - dtype=default_dtype, - name="action", - minimum=action_min, - maximum=action_max, - ) - - # print(state_dtype, action_dtype) - - new_env_spec = specs.EnvironmentSpec( - # works for gym - observations=observations, - actions=actions, - rewards=specs.Array(shape=(), dtype=float, name="reward"), - discounts=specs.BoundedArray(shape=(), - dtype=float, - minimum=0.0, - maximum=1.0, - name="discount"), - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - # print("new_env_spec : ", new_env_spec) - return new_env_spec - - def create_new_actor(self): - """Creates a new actor.""" - method_name = "create_new_actor" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - # if FLAGS.env_name: - # if FLAGS.env_name == "Pendulum-v1": - # experiment = build_d4pg_config(env_name=FLAGS.env_name) - # else: - # experiment = build_dqn_config(env_name=FLAGS.env_name) - # # print("experiment : ", experiment) - - # environment = experiment.environment_factory() - # environment_spec = specs.make_environment_spec(environment) - # # print('environment_spec : ', environment_spec) - - # else: - attr_dict = self._sight.widget_decision_state['decision_episode_fn'] - environment_spec = self.generate_env_spec(attr_dict) - - if (FLAGS.acme_agent == 'dqn'): - experiment = build_dqn_config() - elif (FLAGS.acme_agent == 'd4pg'): - experiment = build_d4pg_config() - # elif(FLAGS.acme_agent == 'impala'): - # experiment = build_impala_config() - elif (FLAGS.acme_agent == 'mdqn'): - experiment = build_mdqn_config() - elif (FLAGS.acme_agent == 'qrdqn'): - experiment = build_qrdqn_config() - # elif(FLAGS.acme_agent == 'ppo'): - # experiment = build_ppo_config() - # elif(FLAGS.acme_agent == 'mpo'): - # experiment = build_mpo_config() - # elif(FLAGS.acme_agent == 'sac'): - # experiment = build_sac_config(environment_spec) - elif (FLAGS.acme_agent == 'td3'): - experiment = build_td3_config() - - # ( - # state_min, - # state_max, - # state_param_length, - # state_dtype, - # action_min, - # action_max, - # action_param_length, - # action_dtype - # # possible_actions, - # ) = generate_spec_details( - # self._sight.widget_decision_state['decision_episode_fn'] - # ) - - # print('environment_spec : ', environment_spec) - - networks = experiment.network_factory(environment_spec) - policy = config.make_policy( - experiment=experiment, - networks=networks, - environment_spec=environment_spec, - evaluation=False, - ) - # print("network : ", networks) - # print("policy : ", policy) - - self._adder = sight_adder.SightAdder() - self._variable_source = sight_variable_source.SightVariableSource( - adder=self._adder, client_id=self._sight.id, sight=self._sight) - - key = jax.random.PRNGKey(0) - actor_key, key = jax.random.split(key) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return experiment.builder.make_actor( - actor_key, - policy, - environment_spec, - variable_source=self._variable_source, - adder=self._adder, - ) - - @override - def decision_point(self, sight, request: service_pb2.DecisionPointRequest): - # def decision_point(self, sight): - """communicates with decision_point method on server. + """Acme client for the Sight service.""" + + def __init__(self, sight): + super().__init__(sight_pb2.DecisionConfigurationStart.OptimizerType.OT_ACME) + self._sight = sight + self._actor = None + self._adder = None + self._variable_source = None + self._dp_first_call = True + self._last_acme_action = None + + # added to run the base example + self._replay_server = None + self._replay_client = None + self._dataset = None + self._learner = None + + @override + def create_config(self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: + # print("self._sight.widget_decision_state['decision_episode_fn'] : ", self._sight) + print("in create config") + choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig() + + if (FLAGS.acme_agent == 'dqn'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_DQN + elif (FLAGS.acme_agent == 'd4pg'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_D4PG + # elif(FLAGS.acme_agent == 'impala'): + # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_IMPALA + elif (FLAGS.acme_agent == 'mdqn'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MDQN + elif (FLAGS.acme_agent == 'qrdqn'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_QRDQN + # elif(FLAGS.acme_agent == 'ppo'): + # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_PPO + # elif(FLAGS.acme_agent == 'mpo'): + # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MPO + # elif(FLAGS.acme_agent == 'sac'): + # choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_SAC + elif (FLAGS.acme_agent == 'td3'): + choice_config.acme_config.acme_agent = sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_TD3 + + # possible_actions = fetch_possible_actions(self._sight.widget_decision_state['decision_episode_fn']) + # choice_config.acme_config.possible_actions = possible_actions + + #? using state and action related data as common to all choice_config + # ( + # state_min, + # state_max, + # state_param_length, + # action_min, + # action_max, + # action_param_length, + # possible_actions, + # ) = generate_spec_details( + # self._sight.widget_decision_state['decision_episode_fn'] + # ) + # choice_config.acme_config.state_min.extend(state_min) + # choice_config.acme_config.state_max.extend(state_max) + # choice_config.acme_config.state_param_length = state_param_length + # choice_config.acme_config.action_min.extend(action_min) + # choice_config.acme_config.action_max.extend(action_max) + # choice_config.acme_config.action_param_length = action_param_length + # choice_config.acme_config.possible_actions = possible_actions + + # if FLAGS.env_name: + # choice_config.acme_config.env_name = FLAGS.env_name + + return choice_config + + def generate_env_spec( + self, + # state_min, + # state_max, + # state_param_length, + # action_min, + # action_max, + # action_param_length, + attr_dict + ) -> specs.EnvironmentSpec: + """Generates the environment spec for the environment.""" + + method_name = "generate_env_spec" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + # dtype_mapping = { + # sight_pb2.DecisionConfigurationStart.DataType.DT_INT32: np.int32, + # sight_pb2.DecisionConfigurationStart.DataType.DT_INT64: np.int64, + # sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT32: np.float32, + # sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT64: np.float64, + # } + + default_dtype = np.float32 + state_min = np.array(list(attr_dict.state_min.values())) + state_max = np.array(list(attr_dict.state_max.values())) + state_param_length = len(attr_dict.state_attrs) + # state_dtype = dtype_mapping[attr_dict.state_dtype] + observations = specs.BoundedArray( + shape=(state_param_length,), + # dtype=state_dtype, + dtype=default_dtype, + name="observation", + minimum=state_min, + maximum=state_max, + ), + + action_param_length = len(attr_dict.action_attrs) + # if(attr_dict.action_min): + action_min = np.array(list(attr_dict.action_min.values())) + # if(attr_dict.action_max): + action_max = np.array(list(attr_dict.action_max.values())) + # if(attr_dict.action_dtype): + # action_dtype = dtype_mapping[attr_dict.action_dtype] + + # create discrete spec + if (attr_dict.valid_action_values): + possible_values_list = list(attr_dict.valid_action_values.values())[0] + actions = specs.DiscreteArray( + num_values=len(possible_values_list), + dtype=np.int64, + name="action", + ) + # create bounded spec + else: + if (attr_dict.step_size): + default_dtype = np.int64 + actions = specs.BoundedArray( + shape=(action_param_length,), + # dtype=action_dtype, + dtype=default_dtype, + name="action", + minimum=action_min, + maximum=action_max, + ) + + # print(state_dtype, action_dtype) + + new_env_spec = specs.EnvironmentSpec( + # works for gym + observations=observations, + actions=actions, + rewards=specs.Array(shape=(), dtype=float, name="reward"), + discounts=specs.BoundedArray(shape=(), + dtype=float, + minimum=0.0, + maximum=1.0, + name="discount"), + ) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + # print("new_env_spec : ", new_env_spec) + return new_env_spec + + def create_new_actor(self): + """Creates a new actor.""" + method_name = "create_new_actor" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + # if FLAGS.env_name: + # if FLAGS.env_name == "Pendulum-v1": + # experiment = build_d4pg_config(env_name=FLAGS.env_name) + # else: + # experiment = build_dqn_config(env_name=FLAGS.env_name) + # # print("experiment : ", experiment) + + # environment = experiment.environment_factory() + # environment_spec = specs.make_environment_spec(environment) + # # print('environment_spec : ', environment_spec) + + # else: + attr_dict = self._sight.widget_decision_state['decision_episode_fn'] + environment_spec = self.generate_env_spec(attr_dict) + + if (FLAGS.acme_agent == 'dqn'): + experiment = build_dqn_config() + elif (FLAGS.acme_agent == 'd4pg'): + experiment = build_d4pg_config() + # elif(FLAGS.acme_agent == 'impala'): + # experiment = build_impala_config() + elif (FLAGS.acme_agent == 'mdqn'): + experiment = build_mdqn_config() + elif (FLAGS.acme_agent == 'qrdqn'): + experiment = build_qrdqn_config() + # elif(FLAGS.acme_agent == 'ppo'): + # experiment = build_ppo_config() + # elif(FLAGS.acme_agent == 'mpo'): + # experiment = build_mpo_config() + # elif(FLAGS.acme_agent == 'sac'): + # experiment = build_sac_config(environment_spec) + elif (FLAGS.acme_agent == 'td3'): + experiment = build_td3_config() + + # ( + # state_min, + # state_max, + # state_param_length, + # state_dtype, + # action_min, + # action_max, + # action_param_length, + # action_dtype + # # possible_actions, + # ) = generate_spec_details( + # self._sight.widget_decision_state['decision_episode_fn'] + # ) + + # print('environment_spec : ', environment_spec) + + networks = experiment.network_factory(environment_spec) + policy = config.make_policy( + experiment=experiment, + networks=networks, + environment_spec=environment_spec, + evaluation=False, + ) + # print("network : ", networks) + # print("policy : ", policy) + + self._adder = sight_adder.SightAdder() + self._variable_source = sight_variable_source.SightVariableSource( + adder=self._adder, client_id=self._sight.id, sight=self._sight) + + key = jax.random.PRNGKey(0) + actor_key, key = jax.random.split(key) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return experiment.builder.make_actor( + actor_key, + policy, + environment_spec, + variable_source=self._variable_source, + adder=self._adder, + ) + + @override + def decision_point(self, sight, request: service_pb2.DecisionPointRequest): + # def decision_point(self, sight): + """communicates with decision_point method on server. Stores the trajectories locally, after storing 50 trajectories, calls Update on actor so send those to server and fetch latest weights from @@ -341,105 +339,104 @@ def decision_point(self, sight, request: service_pb2.DecisionPointRequest): Returns: action to be performed. """ - method_name = "decision_point" - # logging.info(">>>> In %s of %s", method_name, _file_name) - - observation = np.array( - list(sight.widget_decision_state["state"].values()), - dtype=np.float32, - # todo : meetashah - this should be extracted from env - ) - # print('observation : ', observation) - if self._dp_first_call: - # create actor, if not there - if self._actor is None: - print("no actor found, creating new one.....") - self._actor = self.create_new_actor() - # update will fetch the latest weights from learner into actor policy - self._actor.update(wait=True) - - timestep = dm_env.TimeStep( - step_type=dm_env.StepType.FIRST, - reward=None, - discount=None, - observation=observation, - ) - self._actor.observe_first(timestep) - self._dp_first_call = False - else: - # do this for subsequent call - # logging.info("subsequent call of decision_point...") - timestep = dm_env.TimeStep( - step_type=dm_env.StepType.MID, - reward=np.array(sight.widget_decision_state["outcome_value"], - dtype=np.float64), - discount=np.array(sight.widget_decision_state["discount"], - dtype=np.float64), - observation=observation, - ) - - # action = np.array(self._last_acme_action, dtype=np.int64) - # todo : meetashah - changed dtyep from int64 to float32 for d4pg agent - # action = np.array(self._last_acme_action, dtype=np.float32, ndmin=1) - - # self._actor.observe(action, next_timestep=timestep) - self._actor.observe(self._last_acme_action, next_timestep=timestep) - - if len(self._actor._adder._observation_list) % 50 == 0: - self._actor.update(wait=True) - - # store current action for next call as last_action - self._last_acme_action = self._actor.select_action(observation) - # print("last_Acme_Action : ", self._last_acme_action, self._last_acme_action.dtype, type(self._last_acme_action), self._last_acme_action.shape) - # raise SystemError - - # todo:meetashah- for dqn-cartpole, we get dtype int32 but require int64 - if (self._last_acme_action.dtype == 'int32'): - self._last_acme_action = np.array(self._last_acme_action, - dtype=np.int64) - # self._last_acme_action = self._last_acme_action.reshape((1,)) - - # print("last_Acme_Action : ", self._last_acme_action, self._last_acme_action.dtype, self._last_acme_action.shape) - # raise SystemError - # logging.info("<<<< Out %s of %s", method_name, _file_name) - return self._last_acme_action - - @override - def finalize_episode(self, sight, - request: service_pb2.FinalizeEpisodeRequest): - """completes episode and stores remaining local trajectories to server. + method_name = "decision_point" + # logging.info(">>>> In %s of %s", method_name, _file_name) + + observation = np.array( + list(sight.widget_decision_state["state"].values()), + dtype=np.float32, + # todo : meetashah - this should be extracted from env + ) + # print('observation : ', observation) + if self._dp_first_call: + # create actor, if not there + if self._actor is None: + print("no actor found, creating new one.....") + self._actor = self.create_new_actor() + # update will fetch the latest weights from learner into actor policy + self._actor.update(wait=True) - Args: - sight: sight object. - """ - method_name = "finalize_episode" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - observation = np.array( - list(sight.widget_decision_state["state"].values()), - dtype=np.float32, - ) - timestep = dm_env.TimeStep( - step_type=dm_env.StepType.LAST, - reward=np.array(sight.widget_decision_state["outcome_value"], + timestep = dm_env.TimeStep( + step_type=dm_env.StepType.FIRST, + reward=None, + discount=None, + observation=observation, + ) + self._actor.observe_first(timestep) + self._dp_first_call = False + else: + # do this for subsequent call + # logging.info("subsequent call of decision_point...") + timestep = dm_env.TimeStep( + step_type=dm_env.StepType.MID, + reward=np.array(sight.widget_decision_state["outcome_value"], + dtype=np.float64), + discount=np.array(sight.widget_decision_state["discount"], dtype=np.float64), - discount=np.array(sight.widget_decision_state["discount"], - dtype=np.float64), - observation=np.array(observation, dtype=np.float32), - ) - # action = np.array(self._last_acme_action, dtype=np.int64) - # todo : meetashah - changed dtyep from int64 to float64 for d4pg agent - # action = np.array(self._last_acme_action, dtype=np.float32) - # self._actor.observe(action, next_timestep=timestep) - self._actor.observe(self._last_acme_action, next_timestep=timestep) - - # send remaining records to server and fetch latest weights in response - # if len(self._actor._adder._observation_list) % 50 == 0: + observation=observation, + ) + + # action = np.array(self._last_acme_action, dtype=np.int64) + # todo : meetashah - changed dtyep from int64 to float32 for d4pg agent + # action = np.array(self._last_acme_action, dtype=np.float32, ndmin=1) + + # self._actor.observe(action, next_timestep=timestep) + self._actor.observe(self._last_acme_action, next_timestep=timestep) + + if len(self._actor._adder._observation_list) % 50 == 0: self._actor.update(wait=True) - # self._actor._adder.reset() # _actor._adder._observation_list = [] - # resetting this global varibale so, next iteration will - # start with observer_first - self._dp_first_call = True + # store current action for next call as last_action + self._last_acme_action = self._actor.select_action(observation) + # print("last_Acme_Action : ", self._last_acme_action, self._last_acme_action.dtype, type(self._last_acme_action), self._last_acme_action.shape) + # raise SystemError + + # todo:meetashah- for dqn-cartpole, we get dtype int32 but require int64 + if (self._last_acme_action.dtype == 'int32'): + self._last_acme_action = np.array(self._last_acme_action, dtype=np.int64) + # self._last_acme_action = self._last_acme_action.reshape((1,)) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + # print("last_Acme_Action : ", self._last_acme_action, self._last_acme_action.dtype, self._last_acme_action.shape) + # raise SystemError + # logging.info("<<<< Out %s of %s", method_name, _file_name) + return self._last_acme_action + + @override + def finalize_episode(self, sight, + request: service_pb2.FinalizeEpisodeRequest): + """completes episode and stores remaining local trajectories to server. + + Args: + sight: sight object. + """ + method_name = "finalize_episode" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + observation = np.array( + list(sight.widget_decision_state["state"].values()), + dtype=np.float32, + ) + timestep = dm_env.TimeStep( + step_type=dm_env.StepType.LAST, + reward=np.array(sight.widget_decision_state["outcome_value"], + dtype=np.float64), + discount=np.array(sight.widget_decision_state["discount"], + dtype=np.float64), + observation=np.array(observation, dtype=np.float32), + ) + # action = np.array(self._last_acme_action, dtype=np.int64) + # todo : meetashah - changed dtyep from int64 to float64 for d4pg agent + # action = np.array(self._last_acme_action, dtype=np.float32) + # self._actor.observe(action, next_timestep=timestep) + self._actor.observe(self._last_acme_action, next_timestep=timestep) + + # send remaining records to server and fetch latest weights in response + # if len(self._actor._adder._observation_list) % 50 == 0: + self._actor.update(wait=True) + # self._actor._adder.reset() # _actor._adder._observation_list = [] + + # resetting this global varibale so, next iteration will + # start with observer_first + self._dp_first_call = True + + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/py/sight/widgets/decision/acme/build_d4pg_actor.py b/py/sight/widgets/decision/acme/build_d4pg_actor.py index ac6de78..2b590c5 100644 --- a/py/sight/widgets/decision/acme/build_d4pg_actor.py +++ b/py/sight/widgets/decision/acme/build_d4pg_actor.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Setting up configuration for DQN Experiment.""" from absl import flags @@ -25,7 +24,6 @@ import gym import haiku as hk - # SEED = flags.DEFINE_integer('seed', 0, 'Random seed.') # NUM_STEPS = flags.DEFINE_integer( # 'num_steps', 10, 'Number of env steps to run.' # 1_000_000 @@ -39,7 +37,7 @@ def env_factory(): # if env_name: # return wrappers.GymWrapper(gym.make(env_name)) # else: - return None + return None vmax_values = { 'gym': 1000., diff --git a/py/sight/widgets/decision/acme/build_dqn_actor.py b/py/sight/widgets/decision/acme/build_dqn_actor.py index dd5f674..54fc218 100644 --- a/py/sight/widgets/decision/acme/build_dqn_actor.py +++ b/py/sight/widgets/decision/acme/build_dqn_actor.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Setting up configuration for DQN Experiment.""" from absl import flags @@ -25,10 +24,11 @@ import gym import haiku as hk - SEED = flags.DEFINE_integer('seed', 0, 'Random seed.') NUM_STEPS = flags.DEFINE_integer( - 'num_steps', 10, 'Number of env steps to run.' # 1_000_000 + 'num_steps', + 10, + 'Number of env steps to run.' # 1_000_000 ) @@ -53,17 +53,12 @@ def network(inputs): network_hk = hk.without_apply_rng(hk.transform(network)) obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations)) network = networks_lib.FeedForwardNetwork( - init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply - ) + init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply) typed_network = networks_lib.non_stochastic_network_to_typed(network) return dqn.DQNNetworks(policy_network=typed_network) # Construct the agent. - config = dqn.DQNConfig( - discount=0.99, - n_step=1, - epsilon=0.1 - ) + config = dqn.DQNConfig(discount=0.99, n_step=1, epsilon=0.1) loss_fn = losses.QLearning(discount=config.discount, max_abs_reward=1.0) dqn_builder = dqn.DQNBuilder(config, loss_fn=loss_fn) diff --git a/py/sight/widgets/decision/acme/build_mdqn_actor.py b/py/sight/widgets/decision/acme/build_mdqn_actor.py index 4431e51..96fad65 100644 --- a/py/sight/widgets/decision/acme/build_mdqn_actor.py +++ b/py/sight/widgets/decision/acme/build_mdqn_actor.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Setting up configuration for DQN Experiment.""" from absl import flags @@ -47,21 +46,18 @@ def network(inputs): network_hk = hk.without_apply_rng(hk.transform(network)) obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations)) network = networks_lib.FeedForwardNetwork( - init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply - ) + init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply) typed_network = networks_lib.non_stochastic_network_to_typed(network) return dqn.DQNNetworks(policy_network=typed_network) - # Construct the agent. - config = dqn.DQNConfig( - discount=0.99, - n_step=1, - epsilon=0.1 - ) + # Construct the agent. + config = dqn.DQNConfig(discount=0.99, n_step=1, epsilon=0.1) - loss_fn = losses.MunchausenQLearning( - discount=config.discount, max_abs_reward=1., huber_loss_parameter=1., - entropy_temperature=0.03, munchausen_coefficient=0.9) + loss_fn = losses.MunchausenQLearning(discount=config.discount, + max_abs_reward=1., + huber_loss_parameter=1., + entropy_temperature=0.03, + munchausen_coefficient=0.9) dqn_builder = dqn.DQNBuilder(config, loss_fn=loss_fn) diff --git a/py/sight/widgets/decision/acme/build_qrdqn_actor.py b/py/sight/widgets/decision/acme/build_qrdqn_actor.py index 52203d8..5d7478b 100644 --- a/py/sight/widgets/decision/acme/build_qrdqn_actor.py +++ b/py/sight/widgets/decision/acme/build_qrdqn_actor.py @@ -21,51 +21,51 @@ from acme.jax import experiments from acme.jax import networks as networks_lib from acme.jax import utils -import jax.numpy as jnp import haiku as hk +import jax.numpy as jnp NUM_QUANTILES = flags.DEFINE_integer('num_quantiles', 20, 'Number of bins to use.') def build_qrdqn_config(): - """Builds QR-DQN experiment config which can be executed in different ways.""" + """Builds QR-DQN experiment config which can be executed in different ways.""" - def env_factory(seed): - # del seed - # return helpers.make_atari_environment( - # level=env_name, sticky_actions=True, zero_discount_on_life_loss=False) - return None + def env_factory(seed): + # del seed + # return helpers.make_atari_environment( + # level=env_name, sticky_actions=True, zero_discount_on_life_loss=False) + return None - def net_factory( - environment_spec: specs.EnvironmentSpec) -> dqn.DQNNetworks: - """Creates networks for training DQN on Gym Env.""" - num_quantiles = 20 + def net_factory(environment_spec: specs.EnvironmentSpec) -> dqn.DQNNetworks: + """Creates networks for training DQN on Gym Env.""" + num_quantiles = 20 - def network(inputs): - model = hk.Sequential([ - hk.nets.MLP([512, 128, environment_spec.actions.num_values * num_quantiles]), - ]) - q_dist = model(inputs).reshape(-1, environment_spec.actions.num_values, - num_quantiles) - q_values = jnp.mean(q_dist, axis=-1) - return q_values, q_dist + def network(inputs): + model = hk.Sequential([ + hk.nets.MLP( + [512, 128, environment_spec.actions.num_values * num_quantiles]), + ]) + q_dist = model(inputs).reshape(-1, environment_spec.actions.num_values, + num_quantiles) + q_values = jnp.mean(q_dist, axis=-1) + return q_values, q_dist - network_hk = hk.without_apply_rng(hk.transform(network)) - obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations)) - network = networks_lib.FeedForwardNetwork( - init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply) - typed_network = networks_lib.non_stochastic_network_to_typed(network) - return dqn.DQNNetworks(policy_network=typed_network) + network_hk = hk.without_apply_rng(hk.transform(network)) + obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations)) + network = networks_lib.FeedForwardNetwork( + init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply) + typed_network = networks_lib.non_stochastic_network_to_typed(network) + return dqn.DQNNetworks(policy_network=typed_network) - # Construct the agent. - config = dqn.DQNConfig(discount=0.99, n_step=1, epsilon=0.1) + # Construct the agent. + config = dqn.DQNConfig(discount=0.99, n_step=1, epsilon=0.1) - loss_fn = losses.QrDqn(num_atoms=2, huber_param=1.) - dqn_builder = dqn.DistributionalDQNBuilder(config, loss_fn=loss_fn) + loss_fn = losses.QrDqn(num_atoms=2, huber_param=1.) + dqn_builder = dqn.DistributionalDQNBuilder(config, loss_fn=loss_fn) - return experiments.ExperimentConfig(builder=dqn_builder, - environment_factory=env_factory, - network_factory=net_factory, - seed=0, - max_num_actor_steps=100) + return experiments.ExperimentConfig(builder=dqn_builder, + environment_factory=env_factory, + network_factory=net_factory, + seed=0, + max_num_actor_steps=100) diff --git a/py/sight/widgets/decision/acme/build_td3_actor.py b/py/sight/widgets/decision/acme/build_td3_actor.py index 6a5cc35..87b9e8c 100644 --- a/py/sight/widgets/decision/acme/build_td3_actor.py +++ b/py/sight/widgets/decision/acme/build_td3_actor.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Setting up configuration for DQN Experiment.""" from acme.agents.jax import td3 @@ -25,7 +24,7 @@ def env_factory(): # if env_name: # return wrappers.GymWrapper(gym.make(env_name)) # else: - return None + return None network_factory = ( lambda spec: td3.make_networks(spec, hidden_layer_sizes=(256, 256, 256))) @@ -37,10 +36,8 @@ def env_factory(): ) td3_builder = td3.TD3Builder(config) - return experiments.ExperimentConfig( - builder=td3_builder, - environment_factory=env_factory, - network_factory=network_factory, - seed=0, - max_num_actor_steps=10) - + return experiments.ExperimentConfig(builder=td3_builder, + environment_factory=env_factory, + network_factory=network_factory, + seed=0, + max_num_actor_steps=10) diff --git a/py/sight/widgets/decision/acme/shower_env.py b/py/sight/widgets/decision/acme/shower_env.py index 4bcb5aa..1a32935 100644 --- a/py/sight/widgets/decision/acme/shower_env.py +++ b/py/sight/widgets/decision/acme/shower_env.py @@ -11,10 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Custom implementation of manage shower temperature environment.""" import random + import dm_env import numpy as np @@ -27,9 +27,11 @@ def __init__(self): self.shower_length = 60 def action_spec(self): - return dm_env.specs.BoundedArray( - shape=(), dtype=int, name='action', minimum=0, maximum=2 - ) + return dm_env.specs.BoundedArray(shape=(), + dtype=int, + name='action', + minimum=0, + maximum=2) def observation_spec(self): return dm_env.specs.BoundedArray( @@ -63,9 +65,8 @@ def step(self, action): # Return step information if done: - return dm_env.termination( - reward, np.array([self.state], dtype=np.float32) - ) + return dm_env.termination(reward, np.array([self.state], + dtype=np.float32)) else: return dm_env.transition(reward, np.array([self.state], dtype=np.float32)) diff --git a/py/sight/widgets/decision/acme/sight_adder.py b/py/sight/widgets/decision/acme/sight_adder.py index 8f14a9f..747daa4 100644 --- a/py/sight/widgets/decision/acme/sight_adder.py +++ b/py/sight/widgets/decision/acme/sight_adder.py @@ -13,11 +13,12 @@ # limitations under the License. """Custom implementation of base Adder.""" -from helpers.logs.logs_handler import logger as logging from typing import Any, Optional + from acme import types from acme.adders import base import dm_env +from helpers.logs.logs_handler import logger as logging from sight_service.proto import service_pb2 from sight_service.proto.numproto.numproto import ndarray_to_proto @@ -25,90 +26,89 @@ class SightAdder(base.Adder): - """A custom adder based on the base.Adder with some logic changes. + """A custom adder based on the base.Adder with some logic changes. This adder maintains observations provided via actor in a list. """ - def __init__(self): - """Initialize a CustomAdder instance.""" - self._observation_list = [] - self._existing_batch_last_record = None - - def reset(self, timeout_ms: Optional[int] = None): - """Resets the adder's buffer.""" - # reset called at initial stage or afrer whole episode completed - if (not self._existing_batch_last_record - or self._existing_batch_last_record["next_timestep"].last()): - self._observation_list = [] - # whole episode not completed so, converting last record of this batch - # as FIRST type record for next batch - else: - timestep = dm_env.TimeStep( - step_type=dm_env.StepType.FIRST, - reward=None, - discount=None, - observation=self._existing_batch_last_record["next_timestep"]. - observation, - ) - observation_dict = {"action": None, "next_timestep": timestep} - self._observation_list = [observation_dict] - - def observation_to_proto(self, observation: dict[str, Any]): - method_name = "observation_to_proto" - logging.debug(">>>> In %s of %s", method_name, _file_name) - obs = service_pb2.Acme_Request().Observation() - - if observation["action"]: - obs.action.CopyFrom(ndarray_to_proto(observation["action"])) - obs.steptype = observation["next_timestep"].step_type - if observation["next_timestep"].reward: - obs.reward.CopyFrom( - ndarray_to_proto(observation["next_timestep"].reward)) - if observation["next_timestep"].discount: - obs.discount.CopyFrom( - ndarray_to_proto(observation["next_timestep"].discount)) - obs.observation.CopyFrom( - ndarray_to_proto(observation["next_timestep"].observation)) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return obs - - def fetch_and_reset_observation_list(self, sight_client_id, - sight_worker_id, learner_keys): - method_name = "fetch_and_reset_observation_list" - logging.debug(">>>> In %s of %s", method_name, _file_name) - final_observation = False - request = service_pb2.DecisionPointRequest() - request.client_id = str(sight_client_id) - request.worker_id = str(sight_worker_id) - - acme_config = service_pb2.Acme_Request() - if len(self._observation_list) > 0: - for episode_obs in self._observation_list: - obs = self.observation_to_proto(episode_obs) - acme_config.episode_observations.append(obs) - # print("learner_keys : ", learner_keys) - - if (learner_keys != ['']): - for key in learner_keys: - acme_config.learner_keys.append(key) - - request.acme_config.CopyFrom(acme_config) - self.reset() - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return request, final_observation - - def add_first(self, timestep: dm_env.TimeStep): - """Record the first observation of a trajectory.""" - self.add(action=None, next_timestep=timestep) - - def add( - self, - action: types.NestedArray, - next_timestep: dm_env.TimeStep, - extras: types.NestedArray = (), - ): - """Record an action and the following timestep.""" - observation_dict = {"action": action, "next_timestep": next_timestep} - self._existing_batch_last_record = observation_dict - self._observation_list.append(observation_dict) + def __init__(self): + """Initialize a CustomAdder instance.""" + self._observation_list = [] + self._existing_batch_last_record = None + + def reset(self, timeout_ms: Optional[int] = None): + """Resets the adder's buffer.""" + # reset called at initial stage or afrer whole episode completed + if (not self._existing_batch_last_record or + self._existing_batch_last_record["next_timestep"].last()): + self._observation_list = [] + # whole episode not completed so, converting last record of this batch + # as FIRST type record for next batch + else: + timestep = dm_env.TimeStep( + step_type=dm_env.StepType.FIRST, + reward=None, + discount=None, + observation=self._existing_batch_last_record["next_timestep"]. + observation, + ) + observation_dict = {"action": None, "next_timestep": timestep} + self._observation_list = [observation_dict] + + def observation_to_proto(self, observation: dict[str, Any]): + method_name = "observation_to_proto" + logging.debug(">>>> In %s of %s", method_name, _file_name) + obs = service_pb2.Acme_Request().Observation() + + if observation["action"]: + obs.action.CopyFrom(ndarray_to_proto(observation["action"])) + obs.steptype = observation["next_timestep"].step_type + if observation["next_timestep"].reward: + obs.reward.CopyFrom(ndarray_to_proto(observation["next_timestep"].reward)) + if observation["next_timestep"].discount: + obs.discount.CopyFrom( + ndarray_to_proto(observation["next_timestep"].discount)) + obs.observation.CopyFrom( + ndarray_to_proto(observation["next_timestep"].observation)) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return obs + + def fetch_and_reset_observation_list(self, sight_client_id, sight_worker_id, + learner_keys): + method_name = "fetch_and_reset_observation_list" + logging.debug(">>>> In %s of %s", method_name, _file_name) + final_observation = False + request = service_pb2.DecisionPointRequest() + request.client_id = str(sight_client_id) + request.worker_id = str(sight_worker_id) + + acme_config = service_pb2.Acme_Request() + if len(self._observation_list) > 0: + for episode_obs in self._observation_list: + obs = self.observation_to_proto(episode_obs) + acme_config.episode_observations.append(obs) + # print("learner_keys : ", learner_keys) + + if (learner_keys != ['']): + for key in learner_keys: + acme_config.learner_keys.append(key) + + request.acme_config.CopyFrom(acme_config) + self.reset() + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return request, final_observation + + def add_first(self, timestep: dm_env.TimeStep): + """Record the first observation of a trajectory.""" + self.add(action=None, next_timestep=timestep) + + def add( + self, + action: types.NestedArray, + next_timestep: dm_env.TimeStep, + extras: types.NestedArray = (), + ): + """Record an action and the following timestep.""" + observation_dict = {"action": action, "next_timestep": next_timestep} + self._existing_batch_last_record = observation_dict + self._observation_list.append(observation_dict) diff --git a/py/sight/widgets/decision/acme/sight_variable_source.py b/py/sight/widgets/decision/acme/sight_variable_source.py index 8dffeb8..3728034 100644 --- a/py/sight/widgets/decision/acme/sight_variable_source.py +++ b/py/sight/widgets/decision/acme/sight_variable_source.py @@ -11,24 +11,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Custom implementation of core variable_source.""" -import os -import time -import numpy as np import json +import os import pickle +import time from typing import Any, List, Sequence -from absl import flags, logging + +from absl import flags +from absl import logging from acme import core from acme import types import jax.numpy as jnp -from sight_service.proto import service_pb2 -from sight_service.proto.numproto.numproto import ndarray_to_proto, proto_to_ndarray +import numpy as np from sight import data_structures from sight import service_utils as service - from sight.widgets.decision.acme import sight_adder +from sight_service.proto import service_pb2 +from sight_service.proto.numproto.numproto import ndarray_to_proto +from sight_service.proto.numproto.numproto import proto_to_ndarray _file_name = "custom_variable_source.py" @@ -36,7 +37,7 @@ # Convert lists back to NumPy arrays during deserialization def convert_list_to_np(obj): if 'data' in obj and 'shape' in obj: - return np.array(obj['data']).reshape(obj['shape']) + return np.array(obj['data']).reshape(obj['shape']) return obj @@ -86,7 +87,8 @@ def proto_to_weights(self, networks_weights): if layer.weights.b: layer_dict['weights']['b'] = jnp.array(layer.weights.b) if layer.weights.w and len(layer.weights.w.ndarray) > 0: - layer_dict['weights']['w'] = jnp.array(proto_to_ndarray(layer.weights.w)) + layer_dict['weights']['w'] = jnp.array( + proto_to_ndarray(layer.weights.w)) # layer_dict = { # "name": layer.name, # "weights": { @@ -119,20 +121,17 @@ def get_variables(self, names: Sequence[str]) -> List[types.NestedArray]: # if len(self._adder._observation_list) > 0: request, final_observation = self._adder.fetch_and_reset_observation_list( - self._client_id, self._worker_id, names - ) + self._client_id, self._worker_id, names) # print("request here is : ", request) # raise SystemExit start_time = time.time() if final_observation: response = service.call( - lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta) - ) + lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta)) else: response = service.call( - lambda s, meta: s.DecisionPoint(request, 300, metadata=meta) - ) + lambda s, meta: s.DecisionPoint(request, 300, metadata=meta)) # weights = json.loads(response.weights.decode('utf-8'), object_hook=convert_list_to_np) weights = pickle.loads(response.weights) diff --git a/py/sight/widgets/decision/analyze_decision_outcomes.py b/py/sight/widgets/decision/analyze_decision_outcomes.py index d5807d4..4159f3e 100644 --- a/py/sight/widgets/decision/analyze_decision_outcomes.py +++ b/py/sight/widgets/decision/analyze_decision_outcomes.py @@ -20,9 +20,12 @@ from absl import app from absl import flags -from helpers.logs.logs_handler import logger as logging -from helpers.logs.logs_handler import logger as logging from apache_beam.coders import ProtoCoder +from google3.pipeline.flume.py import runner +from google3.pipeline.flume.py.io import capacitorio +from google3.pyglib import gfile +from google3.pyglib.contrib.gpathlib import gpath_flag +from helpers.logs.logs_handler import logger as logging import joblib import numpy as np import pandas as pd @@ -42,11 +45,6 @@ from sklearn.preprocessing import PolynomialFeatures from sklearn.svm import SVR -from google3.pipeline.flume.py import runner -from google3.pipeline.flume.py.io import capacitorio -from google3.pyglib import gfile -from google3.pyglib.contrib.gpathlib import gpath_flag - _IN_LOG_FILE = flags.DEFINE_list( 'in_log_file', None, @@ -66,33 +64,33 @@ class AnalyzeSequence(beam.DoFn): - """Converts sets of named value objects to time-ordered sequences.""" - - def __init__( - self, - named_value_and_object_label: str, - decision_point_label: str, - decision_outcome_label: str, - configuration_label: str, - ): - self.named_value_and_object_label = named_value_and_object_label - self.decision_point_label = decision_point_label - self.decision_outcome_label = decision_outcome_label - self.configuration_label = configuration_label - - def process( - self, - task: Tuple[Any, Dict[str, Union[List[Any], - List[Dict[str, sight_pb2.Object]]]]], - ) -> Iterator[Tuple[ - str, - Tuple[ - List[Tuple[Dict[str, Any], float]], - Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], - Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], - ], - ]]: - """Time-orders the sequence of objects for a given simulation attribute. + """Converts sets of named value objects to time-ordered sequences.""" + + def __init__( + self, + named_value_and_object_label: str, + decision_point_label: str, + decision_outcome_label: str, + configuration_label: str, + ): + self.named_value_and_object_label = named_value_and_object_label + self.decision_point_label = decision_point_label + self.decision_outcome_label = decision_outcome_label + self.configuration_label = configuration_label + + def process( + self, + task: Tuple[Any, Dict[str, Union[List[Any], + List[Dict[str, sight_pb2.Object]]]]], + ) -> Iterator[Tuple[ + str, + Tuple[ + List[Tuple[Dict[str, Any], float]], + Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], + Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], + ], + ]]: + """Time-orders the sequence of objects for a given simulation attribute. Args: task: A sequence of objects that describe the state of some simulation @@ -101,320 +99,304 @@ def process( Yields: A time-ordered version of the input sequence. """ - named_value_and_object = [ - (x['named_value'].location, x) - for x in task[1][self.named_value_and_object_label] - ] - decision_point = [(x['decision_point'].location, x) - for x in task[1][self.decision_point_label]] - decision_outcome = [(x['decision_outcome'].location, x) - for x in task[1][self.decision_outcome_label]] - - # Get the attributes used by the application within this simulation - state_attrs = None - action_attrs = None - for cfg in task[1][self.configuration_label]: - if (cfg['configuration'].block_start.configuration.sub_type == - sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION): - if state_attrs: - raise ValueError( - 'Multiple decision configurations present in run %s' % - task[0]) - decision_configuration = cfg[ - 'configuration'].block_start.configuration.decision_configuration - state_attrs = decision_configuration.state_attrs - action_attrs = decision_configuration.action_attrs - - if state_attrs is None: - raise ValueError('No decision configuration present in run %s' % - task[0]) - - log = [ - x[1] for x in sorted( - named_value_and_object + decision_point + decision_outcome, - key=lambda x: x[0], - ) - ] - - state = {} - last_decision_point: sight_pb2.DecisionPoint = None - accumulated_outcome = 0 - logging.info('state_attrs=%s', state_attrs) - dataset: Dict[str, List[Tuple[Dict[str, Any], float]]] = {} - for obj in log: - logging.info('obj=%s', obj) - if 'object' in obj: - if obj['object'][0] in state_attrs: - state[obj['object'][0]] = obj['object'][1] - logging.info('updated state=%s', state) - elif 'decision_point' in obj: - if last_decision_point: - observation = last_decision_point_state.copy() - for ( - param_name, - param_value, - ) in last_decision_point.choice_params.items(): - observation['chosen_param_' + - param_name] = float(param_value) - if last_decision_point.choice_label not in dataset: - dataset[last_decision_point.choice_label] = [] - dataset[last_decision_point.choice_label].append( - (observation, accumulated_outcome)) - logging.info( - 'observation=%s, accumulated_outcome=%s, last_decision_point=%s', - observation, - accumulated_outcome, - last_decision_point, - ) - - last_decision_point = obj['decision_point'].decision_point - last_decision_point_state = state.copy() - logging.info('last_decision_point_state=%s', - last_decision_point_state) - accumulated_outcome = 0 - elif 'decision_outcome' in obj: - accumulated_outcome += float( - obj['decision_outcome'].decision_outcome.outcome_value) - logging.info( - 'outcome=%s', - obj['decision_outcome'].decision_outcome.outcome_value) - + named_value_and_object = [ + (x['named_value'].location, x) + for x in task[1][self.named_value_and_object_label] + ] + decision_point = [(x['decision_point'].location, x) + for x in task[1][self.decision_point_label]] + decision_outcome = [(x['decision_outcome'].location, x) + for x in task[1][self.decision_outcome_label]] + + # Get the attributes used by the application within this simulation + state_attrs = None + action_attrs = None + for cfg in task[1][self.configuration_label]: + if (cfg['configuration'].block_start.configuration.sub_type == + sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION): + if state_attrs: + raise ValueError( + 'Multiple decision configurations present in run %s' % task[0]) + decision_configuration = cfg[ + 'configuration'].block_start.configuration.decision_configuration + state_attrs = decision_configuration.state_attrs + action_attrs = decision_configuration.action_attrs + + if state_attrs is None: + raise ValueError('No decision configuration present in run %s' % task[0]) + + log = [ + x[1] for x in sorted( + named_value_and_object + decision_point + decision_outcome, + key=lambda x: x[0], + ) + ] + + state = {} + last_decision_point: sight_pb2.DecisionPoint = None + accumulated_outcome = 0 + logging.info('state_attrs=%s', state_attrs) + dataset: Dict[str, List[Tuple[Dict[str, Any], float]]] = {} + for obj in log: + logging.info('obj=%s', obj) + if 'object' in obj: + if obj['object'][0] in state_attrs: + state[obj['object'][0]] = obj['object'][1] + logging.info('updated state=%s', state) + elif 'decision_point' in obj: if last_decision_point: - observation = last_decision_point_state.copy() - for param_name, param_value in last_decision_point.choice_params.items( - ): - observation['chosen_param_' + param_name] = float(param_value) - if last_decision_point.choice_label not in dataset: - dataset[last_decision_point.choice_label] = [] - dataset[last_decision_point.choice_label].append( - (observation, accumulated_outcome)) - state = {} - - for choice_label, obs_data in dataset.items(): - yield ( - choice_label, - ( - obs_data, - state_attrs, - action_attrs, - ), - ) + observation = last_decision_point_state.copy() + for ( + param_name, + param_value, + ) in last_decision_point.choice_params.items(): + observation['chosen_param_' + param_name] = float(param_value) + if last_decision_point.choice_label not in dataset: + dataset[last_decision_point.choice_label] = [] + dataset[last_decision_point.choice_label].append( + (observation, accumulated_outcome)) + logging.info( + 'observation=%s, accumulated_outcome=%s, last_decision_point=%s', + observation, + accumulated_outcome, + last_decision_point, + ) + + last_decision_point = obj['decision_point'].decision_point + last_decision_point_state = state.copy() + logging.info('last_decision_point_state=%s', last_decision_point_state) + accumulated_outcome = 0 + elif 'decision_outcome' in obj: + accumulated_outcome += float( + obj['decision_outcome'].decision_outcome.outcome_value) + logging.info('outcome=%s', + obj['decision_outcome'].decision_outcome.outcome_value) + + if last_decision_point: + observation = last_decision_point_state.copy() + for param_name, param_value in last_decision_point.choice_params.items(): + observation['chosen_param_' + param_name] = float(param_value) + if last_decision_point.choice_label not in dataset: + dataset[last_decision_point.choice_label] = [] + dataset[last_decision_point.choice_label].append( + (observation, accumulated_outcome)) + state = {} + + for choice_label, obs_data in dataset.items(): + yield ( + choice_label, + ( + obs_data, + state_attrs, + action_attrs, + ), + ) class TrainOutcomePrediction(beam.DoFn): - """Trains a model that predicts decision outcome values from decisions.""" - - def process( - self, - task: Tuple[ - str, - Iterable[Tuple[ - List[Tuple[Dict[str, Any], float]], - Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], - Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], - ]], - ], - ) -> None: - choice_label = task[0] - columns = None - - state_attrs = None - action_attrs = None - for dataset in task[1]: - if state_attrs is None: - _, state_attrs, action_attrs = dataset - else: - if state_attrs != dataset[1] or action_attrs != dataset[2]: - raise ValueError( - 'Inconsistent state/action attributes across runs.') - - input_data = [] - output_data = [] - for dataset in task[1]: - for obs in dataset[0]: - if not columns: - columns = obs[0].keys() - row = [] - for c in columns: - row.append(obs[0][c]) - input_data.append(row) - output_data.append(obs[1]) - - num_total_rows = len(input_data) - num_train_rows = int(num_total_rows * 0.8) - input_array = PolynomialFeatures(2).fit_transform(np.array(input_data)) - output_array = np.array(output_data) - - indices = np.random.permutation(num_total_rows) - train_idx, eval_idx = indices[:num_train_rows], indices[ - num_train_rows:] - train_input_data = input_array[train_idx, :] - train_output_data = output_array[train_idx] - eval_input_data = input_array[eval_idx, :] - eval_output_data = output_array[eval_idx] - - np.set_printoptions(threshold=sys.maxsize) - - with gfile.Open('/tmp/decision_outcomes.' + choice_label + '.csv', - 'w') as f: - pd.DataFrame( - np.concatenate( - ( - input_array, - np.reshape(output_array, (output_array.shape[0], 1)), - ), - axis=1, - )).to_csv(f) - - lowest_error = 1e100 - best_model = None - for learner in [ - AdaBoostRegressor(), - GradientBoostingRegressor(), - RandomForestRegressor(), - LinearRegression(), - ]: - model = learner.fit(train_input_data, train_output_data) - - predicted_array = model.predict(eval_input_data) - - logging.info('eval_input_data%s=\n%s', eval_input_data.shape, - eval_input_data) - logging.info('eval_output_data%s=\n%s', eval_output_data.shape, - eval_output_data) - logging.info('predicted_array%s=%s', predicted_array.shape, - predicted_array) - mae = metrics.mean_absolute_error(eval_output_data, - predicted_array) - logging.info( - '%s: mae=%s, rmse=%s', - task[0], - mae / abs(np.mean(eval_output_data)), - math.sqrt( - metrics.mean_squared_error(eval_output_data, - predicted_array)) / - abs(np.mean(eval_output_data)), - ) - if lowest_error > mae: - lowest_error = mae - best_model = model - - with io.BytesIO() as model_bytes: - joblib.dump(best_model, model_bytes) - - with Sight( - sight_pb2.Params( - label='Decision Outcomes', - log_owner='bronevet@google.com', - capacitor_output=True, - )) as sight: - scikit_learn_algorithm = (sight_pb2.DecisionConfigurationStart. - ScikitLearnAlgorithm()) - scikit_learn_algorithm.model_encoding = model_bytes.getvalue() - scikit_learn_algorithm.input_fields.extend(list(columns)) - - choice_algorithm = ( - sight_pb2.DecisionConfigurationStart.ChoiceAlgorithm()) - choice_algorithm.scikit_learn.CopyFrom(scikit_learn_algorithm) - - decision_configuration = sight_pb2.DecisionConfigurationStart() - for attr_name, props in state_attrs.items(): - decision_configuration.state_attrs[attr_name].CopyFrom( - props) - for attr_name, props in action_attrs.items(): - decision_configuration.action_attrs[attr_name].CopyFrom( - props) - decision_configuration.choice_algorithm[choice_label].CopyFrom( - choice_algorithm) - - sight.enter_block( - 'Decision Configuration', - sight_pb2.Object(block_start=sight_pb2.BlockStart( - sub_type=sight_pb2.BlockStart.ST_CONFIGURATION, - configuration=sight_pb2.ConfigurationStart( - sub_type=sight_pb2.ConfigurationStart. - ST_DECISION_CONFIGURATION, - decision_configuration=decision_configuration, - ), - )), - ) - sight.exit_block('Decision Configuration', sight_pb2.Object()) + """Trains a model that predicts decision outcome values from decisions.""" + + def process( + self, + task: Tuple[ + str, + Iterable[Tuple[ + List[Tuple[Dict[str, Any], float]], + Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], + Dict[str, sight_pb2.DecisionConfigurationStart.StateProps], + ]], + ], + ) -> None: + choice_label = task[0] + columns = None + + state_attrs = None + action_attrs = None + for dataset in task[1]: + if state_attrs is None: + _, state_attrs, action_attrs = dataset + else: + if state_attrs != dataset[1] or action_attrs != dataset[2]: + raise ValueError('Inconsistent state/action attributes across runs.') + + input_data = [] + output_data = [] + for dataset in task[1]: + for obs in dataset[0]: + if not columns: + columns = obs[0].keys() + row = [] + for c in columns: + row.append(obs[0][c]) + input_data.append(row) + output_data.append(obs[1]) + + num_total_rows = len(input_data) + num_train_rows = int(num_total_rows * 0.8) + input_array = PolynomialFeatures(2).fit_transform(np.array(input_data)) + output_array = np.array(output_data) + + indices = np.random.permutation(num_total_rows) + train_idx, eval_idx = indices[:num_train_rows], indices[num_train_rows:] + train_input_data = input_array[train_idx, :] + train_output_data = output_array[train_idx] + eval_input_data = input_array[eval_idx, :] + eval_output_data = output_array[eval_idx] + + np.set_printoptions(threshold=sys.maxsize) + + with gfile.Open('/tmp/decision_outcomes.' + choice_label + '.csv', + 'w') as f: + pd.DataFrame( + np.concatenate( + ( + input_array, + np.reshape(output_array, (output_array.shape[0], 1)), + ), + axis=1, + )).to_csv(f) + + lowest_error = 1e100 + best_model = None + for learner in [ + AdaBoostRegressor(), + GradientBoostingRegressor(), + RandomForestRegressor(), + LinearRegression(), + ]: + model = learner.fit(train_input_data, train_output_data) + + predicted_array = model.predict(eval_input_data) + + logging.info('eval_input_data%s=\n%s', eval_input_data.shape, + eval_input_data) + logging.info('eval_output_data%s=\n%s', eval_output_data.shape, + eval_output_data) + logging.info('predicted_array%s=%s', predicted_array.shape, + predicted_array) + mae = metrics.mean_absolute_error(eval_output_data, predicted_array) + logging.info( + '%s: mae=%s, rmse=%s', + task[0], + mae / abs(np.mean(eval_output_data)), + math.sqrt( + metrics.mean_squared_error(eval_output_data, predicted_array)) / + abs(np.mean(eval_output_data)), + ) + if lowest_error > mae: + lowest_error = mae + best_model = model + + with io.BytesIO() as model_bytes: + joblib.dump(best_model, model_bytes) + + with Sight( + sight_pb2.Params( + label='Decision Outcomes', + log_owner='bronevet@google.com', + capacitor_output=True, + )) as sight: + scikit_learn_algorithm = ( + sight_pb2.DecisionConfigurationStart.ScikitLearnAlgorithm()) + scikit_learn_algorithm.model_encoding = model_bytes.getvalue() + scikit_learn_algorithm.input_fields.extend(list(columns)) + + choice_algorithm = ( + sight_pb2.DecisionConfigurationStart.ChoiceAlgorithm()) + choice_algorithm.scikit_learn.CopyFrom(scikit_learn_algorithm) + + decision_configuration = sight_pb2.DecisionConfigurationStart() + for attr_name, props in state_attrs.items(): + decision_configuration.state_attrs[attr_name].CopyFrom(props) + for attr_name, props in action_attrs.items(): + decision_configuration.action_attrs[attr_name].CopyFrom(props) + decision_configuration.choice_algorithm[choice_label].CopyFrom( + choice_algorithm) + + sight.enter_block( + 'Decision Configuration', + sight_pb2.Object(block_start=sight_pb2.BlockStart( + sub_type=sight_pb2.BlockStart.ST_CONFIGURATION, + configuration=sight_pb2.ConfigurationStart( + sub_type=sight_pb2.ConfigurationStart. + ST_DECISION_CONFIGURATION, + decision_configuration=decision_configuration, + ), + )), + ) + sight.exit_block('Decision Configuration', sight_pb2.Object()) def main(argv): - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - root = beam.Pipeline( - runner=runner.FlumeRunner()) # beam.runners.DirectRunner()) - reads = [] - for file_path in _IN_LOG_FILE.value: - reads.append(root - | f'Read {file_path}' >> capacitorio.ReadFromCapacitor( - file_path, ['*'], ProtoCoder(sight_pb2.Object))) - - log: beam.pvalue.PCollection[sight_pb2.Object] = reads | beam.Flatten() - - objects_with_ancestors = log | beam.ParDo( - analysis_utils.ExtractAncestorBlockStartLocations()) - - named_value = analysis_utils.block_start_objects_key_self( - log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value') - decision_point = analysis_utils.single_objects_key_log_uid( - log, sight_pb2.Object.ST_DECISION_POINT, 'decision_point') - decision_outcome = analysis_utils.single_objects_key_log_uid( - log, sight_pb2.Object.ST_DECISION_OUTCOME, 'decision_outcome') - configuration = analysis_utils.block_start_objects_key_log_uid( - log, sight_pb2.BlockStart.ST_CONFIGURATION, 'configuration') - - _ = decision_point | 'decision_point' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.decision_point') - _ = decision_outcome | 'decision_outcome' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.decision_outcome') - - named_value_and_object = analysis_utils.create_log_uid_key( - 'named_values_to_objects log_uid_key', - 'named_value', - analysis_utils.named_values_to_objects( - 'named_value', - named_value, - 'objects', - objects_with_ancestors, - ), - ) - _ = named_value_and_object | 'named_value_and_object' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.named_value_and_object') - - analyzed = ( - { - 'named_value_and_object': named_value_and_object, - 'decision_point': decision_point, - 'decision_outcome': decision_outcome, - 'configuration': configuration, - } - | - 'named_value_and_object decision_point decision_outcome configuration CoGroupByKey' - >> beam.CoGroupByKey() - | - 'named_value_and_object decision_point decision_outcome configuration AnalyzeSequence' - >> beam.ParDo( - AnalyzeSequence( - 'named_value_and_object', - 'decision_point', - 'decision_outcome', - 'configuration', - ))) - - _ = analyzed | 'analyzed' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.analyzed') - - _ = (analyzed - | 'TrainOutcomePrediction GroupByKey' >> beam.GroupByKey() - | 'TrainOutcomePrediction' >> beam.ParDo(TrainOutcomePrediction())) - - results = root.run() - results.wait_until_finish() + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + root = beam.Pipeline( + runner=runner.FlumeRunner()) # beam.runners.DirectRunner()) + reads = [] + for file_path in _IN_LOG_FILE.value: + reads.append(root | f'Read {file_path}' >> capacitorio.ReadFromCapacitor( + file_path, ['*'], ProtoCoder(sight_pb2.Object))) + + log: beam.pvalue.PCollection[sight_pb2.Object] = reads | beam.Flatten() + + objects_with_ancestors = log | beam.ParDo( + analysis_utils.ExtractAncestorBlockStartLocations()) + + named_value = analysis_utils.block_start_objects_key_self( + log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value') + decision_point = analysis_utils.single_objects_key_log_uid( + log, sight_pb2.Object.ST_DECISION_POINT, 'decision_point') + decision_outcome = analysis_utils.single_objects_key_log_uid( + log, sight_pb2.Object.ST_DECISION_OUTCOME, 'decision_outcome') + configuration = analysis_utils.block_start_objects_key_log_uid( + log, sight_pb2.BlockStart.ST_CONFIGURATION, 'configuration') + + _ = decision_point | 'decision_point' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.decision_point') + _ = decision_outcome | 'decision_outcome' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.decision_outcome') + + named_value_and_object = analysis_utils.create_log_uid_key( + 'named_values_to_objects log_uid_key', + 'named_value', + analysis_utils.named_values_to_objects( + 'named_value', + named_value, + 'objects', + objects_with_ancestors, + ), + ) + _ = named_value_and_object | 'named_value_and_object' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.named_value_and_object') + + analyzed = ( + { + 'named_value_and_object': named_value_and_object, + 'decision_point': decision_point, + 'decision_outcome': decision_outcome, + 'configuration': configuration, + } | + 'named_value_and_object decision_point decision_outcome configuration CoGroupByKey' + >> beam.CoGroupByKey() | + 'named_value_and_object decision_point decision_outcome configuration AnalyzeSequence' + >> beam.ParDo( + AnalyzeSequence( + 'named_value_and_object', + 'decision_point', + 'decision_outcome', + 'configuration', + ))) + + _ = analyzed | 'analyzed' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.analyzed') + + _ = (analyzed | 'TrainOutcomePrediction GroupByKey' >> beam.GroupByKey() | + 'TrainOutcomePrediction' >> beam.ParDo(TrainOutcomePrediction())) + + results = root.run() + results.wait_until_finish() if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/converse.py b/py/sight/widgets/decision/converse.py index eed0a2c..acfbdb1 100644 --- a/py/sight/widgets/decision/converse.py +++ b/py/sight/widgets/decision/converse.py @@ -21,14 +21,13 @@ from absl import app from absl import flags -from helpers.logs.logs_handler import logger as logging import grpc -from sight_service.proto import service_pb2 -from sight_service.proto import service_pb2_grpc +from helpers.logs.logs_handler import logger as logging from sight import service_utils as service - from sight.proto import sight_pb2 from sight.service_utils import generate_metadata +from sight_service.proto import service_pb2 +from sight_service.proto import service_pb2_grpc _LOG_ID = flags.DEFINE_string( 'log_id', None, 'ID of the Sight log that tracks this execution.') @@ -42,29 +41,27 @@ def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - while True: - message = input('# ') - # print ('message=', message) - req = service_pb2.TellRequest() - req.client_id = _LOG_ID.value - req.message_str = message - response = service.call( - lambda s, meta: s.Tell(req, 300, metadata=meta)) - print('$ ' + response.response_str) + while True: + message = input('# ') + # print ('message=', message) + req = service_pb2.TellRequest() + req.client_id = _LOG_ID.value + req.message_str = message + response = service.call(lambda s, meta: s.Tell(req, 300, metadata=meta)) + print('$ ' + response.response_str) - while True: - req = service_pb2.ListenRequest() - req.client_id = _LOG_ID.value - response = service.call( - lambda s, meta: s.Listen(req, 300, metadata=meta)) - if response.response_ready: - print(response.response_str) - break - time.sleep(5) + while True: + req = service_pb2.ListenRequest() + req.client_id = _LOG_ID.value + response = service.call(lambda s, meta: s.Listen(req, 300, metadata=meta)) + if response.response_ready: + print(response.response_str) + break + time.sleep(5) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/current_status.py b/py/sight/widgets/decision/current_status.py index 8139ce7..e53e01e 100644 --- a/py/sight/widgets/decision/current_status.py +++ b/py/sight/widgets/decision/current_status.py @@ -21,14 +21,13 @@ from absl import app from absl import flags -from helpers.logs.logs_handler import logger as logging import grpc -from sight_service.proto import service_pb2 -from sight_service.proto import service_pb2_grpc +from helpers.logs.logs_handler import logger as logging from sight import service_utils as service - from sight.proto import sight_pb2 from sight.service_utils import generate_metadata +from sight_service.proto import service_pb2 +from sight_service.proto import service_pb2_grpc _LOG_ID = flags.DEFINE_string( "log_id", None, "ID of the Sight log that tracks this execution.") @@ -42,27 +41,27 @@ def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - req = service_pb2.CurrentStatusRequest() - req.client_id = _LOG_ID.value - response = service.call( - lambda s, meta: s.CurrentStatus(req, 300, metadata=meta)) + req = service_pb2.CurrentStatusRequest() + req.client_id = _LOG_ID.value + response = service.call( + lambda s, meta: s.CurrentStatus(req, 300, metadata=meta)) - # print('response :', response.response_str) + # print('response :', response.response_str) - if response.status == service_pb2.CurrentStatusResponse.Status.DEFAULT: - print('Experiment is in Default state') - elif response.status == service_pb2.CurrentStatusResponse.Status.IN_PROGRESS: - print('Experiment is in-progress state') - elif response.status == service_pb2.CurrentStatusResponse.Status.SUCCESS: - print('Experiment is in Success state') - elif response.status == service_pb2.CurrentStatusResponse.Status.FAILURE: - print('Experiment is in Failure state') - else: - print('response.status = ', response.status) + if response.status == service_pb2.CurrentStatusResponse.Status.DEFAULT: + print('Experiment is in Default state') + elif response.status == service_pb2.CurrentStatusResponse.Status.IN_PROGRESS: + print('Experiment is in-progress state') + elif response.status == service_pb2.CurrentStatusResponse.Status.SUCCESS: + print('Experiment is in Success state') + elif response.status == service_pb2.CurrentStatusResponse.Status.FAILURE: + print('Experiment is in Failure state') + else: + print('response.status = ', response.status) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index f1dc2ec..075fdab 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -14,33 +14,35 @@ """Decisions and their outcomes within the Sight log.""" import inspect +import json import os import sys -import dm_env -import json -import numpy as np -import pandas as pd -import pandas as pd -from typing import Any, Callable, Dict, List, Optional, Text -import time import threading +import time +from typing import Any, Callable, Dict, List, Optional, Text from absl import flags +import dm_env # from absl import logging from helpers.logs.logs_handler import logger as logging - -from sight_service.proto import service_pb2 +import numpy as np +import pandas as pd from sight import service_utils as service from sight.proto import sight_pb2 -from sight.widgets.decision.llm_optimizer_client import LLMOptimizerClient -from sight.widgets.decision.single_action_optimizer_client import SingleActionOptimizerClient -from sight.widgets.decision.acme.acme_optimizer_client import AcmeOptimizerClient -from sight.widgets.decision.env_driver import driver_fn +from sight.utility import poll_network_batch_outcome # from sight.widgets.decision.cartpole_driver import driver_fn from sight.widgets.decision import decision_episode_fn from sight.widgets.decision import trials from sight.widgets.decision import utils -from sight.utility import poll_network_batch_outcome +from sight.widgets.decision.acme.acme_optimizer_client import ( + AcmeOptimizerClient +) +from sight.widgets.decision.env_driver import driver_fn +from sight.widgets.decision.llm_optimizer_client import LLMOptimizerClient +from sight.widgets.decision.single_action_optimizer_client import ( + SingleActionOptimizerClient +) +from sight_service.proto import service_pb2 # logging.basicConfig(level=logging.DEBUG) @@ -145,7 +147,7 @@ def configure( decision_configuration: Optional[sight_pb2.DecisionConfigurationStart], widget_decision_state: Dict[str, Any], ): - """Augments the Decision-API specific state within a Sight logger. + """Augments the Decision-API specific state within a Sight logger. The configuration object contains the state of this widgets and tracks any updates to the widget's state within the context of a single Sight @@ -161,56 +163,56 @@ def configure( The dictionary that maps each choice label to the algorithm to be used to make the choice. """ - method_name = 'configure' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + method_name = 'configure' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - if decision_configuration: - widget_decision_state['choice_config'] = ( - decision_configuration.choice_config) + if decision_configuration: + widget_decision_state['choice_config'] = ( + decision_configuration.choice_config) - if 'state' not in widget_decision_state: - widget_decision_state['state'] = {} + if 'state' not in widget_decision_state: + widget_decision_state['state'] = {} - if 'decision_episode_fn' not in widget_decision_state: - widget_decision_state['decision_episode_fn'] = None + if 'decision_episode_fn' not in widget_decision_state: + widget_decision_state['decision_episode_fn'] = None - if 'rl_decision_driver' not in widget_decision_state: - widget_decision_state['rl_decision_driver'] = None + if 'rl_decision_driver' not in widget_decision_state: + widget_decision_state['rl_decision_driver'] = None - logging.debug("<<<< Out %s of %s", method_name, _file_name) + logging.debug("<<<< Out %s of %s", method_name, _file_name) def init_sight_polling_thread(sight_id): - # print - status_update_thread = threading.Thread(target=poll_network_batch_outcome, - args=(sight_id, )) - print('*************** starting thread ************') - status_update_thread.start() + # print + status_update_thread = threading.Thread(target=poll_network_batch_outcome, + args=(sight_id,)) + print('*************** starting thread ************') + status_update_thread.start() def attr_dict_to_proto( attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps], attrs_proto: Any, ): - """Converts a dict of attribute constraints to its proto representation.""" - for attr_name, attr_details in attrs.items(): - attrs_proto[attr_name].CopyFrom(attr_details) + """Converts a dict of attribute constraints to its proto representation.""" + for attr_name, attr_details in attrs.items(): + attrs_proto[attr_name].CopyFrom(attr_details) class Optimizer: - def __init__(self): - self.obj = None + def __init__(self): + self.obj = None - def get_instance(self): - return self.obj + def get_instance(self): + return self.obj optimizer = Optimizer() def attr_to_dict(attr, array): - """Converts a spec type array to a dict of attribute constraints. + """Converts a spec type array to a dict of attribute constraints. Args: array: The spec array to be converted. @@ -219,77 +221,76 @@ def attr_to_dict(attr, array): Returns: A dict of attribute constraints. """ - result = {} - method_name = 'attr_to_dict' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - # print('Array : ', array) - # if(array.dtype == np.float32): - # dtype = sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT32 - # elif(array.dtype == np.int64): - # dtype = sight_pb2.DecisionConfigurationStart.DataType.DT_INT64 - - # default - # dtype = sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT32 - - if isinstance(array, dm_env.specs.DiscreteArray): - valid_values = [] - for i in range(array.num_values): - valid_values.append(i) - if array.shape == (): - key = f'{attr}_{1}' - result[key] = sight_pb2.DecisionConfigurationStart.AttrProps( - valid_int_values=valid_values) - - elif isinstance(array, dm_env.specs.BoundedArray): - if array.shape == () or array.shape == (1, ): - # minimum = float(array.minimum if array.minimum.size == 1 else array.minimum[0]) - # maximum = float(array.maximum if array.maximum.size == 1 else array.maximum[0]) - minimum = float(array.minimum[0]) - maximum = float(array.maximum[0]) - key = f'{attr}_{1}' - result[key] = sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=minimum, - max_value=maximum, - # datatype=dtype - ) - else: - minimum = np.repeat( - array.minimum, - array.shape[0]) if array.minimum.size == 1 else array.minimum - maximum = np.repeat( - array.maximum, - array.shape[0]) if array.maximum.size == 1 else array.maximum - - for i in range(array.shape[0]): - key = f'{attr}_{i + 1}' - result[key] = sight_pb2.DecisionConfigurationStart.AttrProps( - min_value=float(minimum[i]), - max_value=float(maximum[i]), - # datatype=dtype - ) - # todo : need to handle this case when specs are in different form + result = {} + method_name = 'attr_to_dict' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + # print('Array : ', array) + # if(array.dtype == np.float32): + # dtype = sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT32 + # elif(array.dtype == np.int64): + # dtype = sight_pb2.DecisionConfigurationStart.DataType.DT_INT64 + + # default + # dtype = sight_pb2.DecisionConfigurationStart.DataType.DT_FLOAT32 + + if isinstance(array, dm_env.specs.DiscreteArray): + valid_values = [] + for i in range(array.num_values): + valid_values.append(i) + if array.shape == (): + key = f'{attr}_{1}' + result[key] = sight_pb2.DecisionConfigurationStart.AttrProps( + valid_int_values=valid_values) + + elif isinstance(array, dm_env.specs.BoundedArray): + if array.shape == () or array.shape == (1,): + # minimum = float(array.minimum if array.minimum.size == 1 else array.minimum[0]) + # maximum = float(array.maximum if array.maximum.size == 1 else array.maximum[0]) + minimum = float(array.minimum[0]) + maximum = float(array.maximum[0]) + key = f'{attr}_{1}' + result[key] = sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=minimum, + max_value=maximum, + # datatype=dtype + ) else: - for i in range(array.shape[0]): - key = f'{attr}_{i + 1}' - result[key] = sight_pb2.DecisionConfigurationStart.AttrProps() + minimum = np.repeat( + array.minimum, + array.shape[0]) if array.minimum.size == 1 else array.minimum + maximum = np.repeat( + array.maximum, + array.shape[0]) if array.maximum.size == 1 else array.maximum + + for i in range(array.shape[0]): + key = f'{attr}_{i + 1}' + result[key] = sight_pb2.DecisionConfigurationStart.AttrProps( + min_value=float(minimum[i]), + max_value=float(maximum[i]), + # datatype=dtype + ) + # todo : need to handle this case when specs are in different form + else: + for i in range(array.shape[0]): + key = f'{attr}_{i + 1}' + result[key] = sight_pb2.DecisionConfigurationStart.AttrProps() - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return result + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return result def run( sight: Any, env: Any = None, driver_fn: Callable[[Any], Any] = driver_fn, - state_attrs: Dict[str, - sight_pb2.DecisionConfigurationStart.AttrProps] = {}, + state_attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps] = {}, action_attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps] = {}, outcome_attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps] = {}, description: str = '', ): - """Driver for running applications that use the Decision API. + """Driver for running applications that use the Decision API. Args: sight: The Sight object to be used for logging. @@ -308,346 +309,338 @@ def run( description: Human-readable description of the application. """ - method_name = 'run' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - - if env is not None: - if state_attrs == {}: - state_attrs = attr_to_dict(env.observation_spec(), 'state') - if action_attrs == {}: - action_attrs = attr_to_dict(env.action_spec(), 'action') + method_name = 'run' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + + if env is not None: + if state_attrs == {}: + state_attrs = attr_to_dict(env.observation_spec(), 'state') + if action_attrs == {}: + action_attrs = attr_to_dict(env.action_spec(), 'action') + + sight.widget_decision_state['decision_episode_fn'] = ( + decision_episode_fn.DecisionEpisodeFn(driver_fn, state_attrs, + action_attrs)) + # print(sight.widget_decision_state['decision_episode_fn']) + # raise SystemError + + if _OPTIMIZER_TYPE.value == 'dm_acme': + optimizer.obj = AcmeOptimizerClient(sight) + elif _OPTIMIZER_TYPE.value == 'vizier': + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER, sight) + elif _OPTIMIZER_TYPE.value == 'genetic_algorithm': + optimizer.obj = GeneticAlgorithmOptimizerClient( + max_population_size=_NUM_TRAIN_WORKERS.value, sight=sight) + elif _OPTIMIZER_TYPE.value == 'exhaustive_search': + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_EXHAUSTIVE_SEARCH, + sight) + elif _OPTIMIZER_TYPE.value.startswith('llm_'): + optimizer.obj = LLMOptimizerClient( + _OPTIMIZER_TYPE.value.partition('llm_')[2], description, sight) + elif _OPTIMIZER_TYPE.value == 'bayesian_opt': + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_BAYESIAN_OPT, + sight) + elif _OPTIMIZER_TYPE.value == 'sensitivity_analysis': + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_SENSITIVITY_ANALYSIS, sight) + elif _OPTIMIZER_TYPE.value.startswith('ng_'): + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_NEVER_GRAD, sight, + _OPTIMIZER_TYPE.value.partition('ng_')[2]) + elif _OPTIMIZER_TYPE.value == 'smcpy': + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SMC_PY, sight) + elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_WORKLIST_SCHEDULER, sight) + else: + raise ValueError(f'Unknown optimizer type {_OPTIMIZER_TYPE.value}') + + if env is not None: + if state_attrs == {}: + state_attrs = attr_to_dict(env.observation_spec(), 'state') + if action_attrs == {}: + action_attrs = attr_to_dict(env.action_spec(), 'action') + if outcome_attrs == {}: + outcome_attrs = { + 'outcome': sight_pb2.DecisionConfigurationStart.AttrProps() + } + + decision_configuration = sight_pb2.DecisionConfigurationStart() + decision_configuration.optimizer_type = optimizer.obj.optimizer_type() + + if (_NUM_TRIALS.value): + decision_configuration.num_trials = _NUM_TRIALS.value + # if FLAGS.deployment_mode == 'worker_mode': + # decision_configuration.num_trials = int(os.environ['num_samples']) + # else: + # decision_configuration.num_trials = _NUM_TRIALS.value + decision_configuration.choice_config[sight.params.label].CopyFrom( + optimizer.obj.create_config()) + attr_dict_to_proto(state_attrs, decision_configuration.state_attrs) + attr_dict_to_proto(action_attrs, decision_configuration.action_attrs) + attr_dict_to_proto(outcome_attrs, decision_configuration.outcome_attrs) + + sight.enter_block( + 'Decision Configuration', + sight_pb2.Object(block_start=sight_pb2.BlockStart( + sub_type=sight_pb2.BlockStart.ST_CONFIGURATION, + configuration=sight_pb2.ConfigurationStart( + sub_type=sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION, + decision_configuration=decision_configuration, + ), + )), + ) + sight.exit_block('Decision Configuration', sight_pb2.Object()) + sight.widget_decision_state['num_decision_points'] = 0 + + sight.widget_decision_state['decision_episode_fn'] = ( + decision_episode_fn.DecisionEpisodeFn(driver_fn, state_attrs, + action_attrs)) + sight.widget_decision_state['proposed_actions'] = [] + + if _DECISON_MODE.value == 'run': + logging.info('_DECISON_MODE.value == run') + # sight.widget_decision_state['sum_outcome'] = 0 + # sight.widget_decision_state['last_reward'] = None + # if env: + # driver_fn(env, sight) + # else: + # driver_fn(sight) + # finalize_episode(sight) - sight.widget_decision_state['decision_episode_fn'] = ( - decision_episode_fn.DecisionEpisodeFn(driver_fn, state_attrs, - action_attrs)) - # print(sight.widget_decision_state['decision_episode_fn']) - # raise SystemError + if (not FLAGS.sight_log_id): + raise ValueError( + "sight_log_id have to be passed from the trained run for decision_mokde = run" + ) - if _OPTIMIZER_TYPE.value == 'dm_acme': - optimizer.obj = AcmeOptimizerClient(sight) - elif _OPTIMIZER_TYPE.value == 'vizier': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER, - sight) - elif _OPTIMIZER_TYPE.value == 'genetic_algorithm': - optimizer.obj = GeneticAlgorithmOptimizerClient( - max_population_size=_NUM_TRAIN_WORKERS.value, sight=sight) - elif _OPTIMIZER_TYPE.value == 'exhaustive_search': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_EXHAUSTIVE_SEARCH, sight) - elif _OPTIMIZER_TYPE.value.startswith('llm_'): - optimizer.obj = LLMOptimizerClient( - _OPTIMIZER_TYPE.value.partition('llm_')[2], description, sight) - elif _OPTIMIZER_TYPE.value == 'bayesian_opt': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_BAYESIAN_OPT, - sight) - elif _OPTIMIZER_TYPE.value == 'sensitivity_analysis': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_SENSITIVITY_ANALYSIS, sight) - elif _OPTIMIZER_TYPE.value.startswith('ng_'): - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_NEVER_GRAD, - sight, - _OPTIMIZER_TYPE.value.partition('ng_')[2]) - elif _OPTIMIZER_TYPE.value == 'smcpy': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SMC_PY, - sight) - elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_WORKLIST_SCHEDULER, sight) + req = service_pb2.FetchOptimalActionRequest( + client_id=FLAGS.sight_log_id, + # worker_id=f'client_{client_id}_worker_{worker_location}', + ) + response = service.call( + lambda s, meta: s.FetchOptimalAction(req, 300, metadata=meta)) + print('response : ', response.response_str) + + elif _DECISON_MODE.value == 'configured_run': + # ? not proper flow right now + # If the run configuration is provided in a file. + # if _DECISION_RUN_CONFIG_FILE.value: + if flags.FLAGS.decision_run_config_file: + sight.add_config_file(_DECISION_RUN_CONFIG_FILE.value) + # If the run configuration is provided on the command line. + elif _DECISION_PARAMS.value: + chosen_action = {} + for key_val in _DECISION_PARAMS.value.split(':'): + key, val = tuple(key_val.split('=')) + chosen_action[key] = float(val) + sight.widget_decision_state['constant_action'] = chosen_action + # sight.widget_decision_state['sum_outcome'] = 0 + sight.widget_decision_state['last_reward'] = None else: - raise ValueError(f'Unknown optimizer type {_OPTIMIZER_TYPE.value}') - - if env is not None: - if state_attrs == {}: - state_attrs = attr_to_dict(env.observation_spec(), 'state') - if action_attrs == {}: - action_attrs = attr_to_dict(env.action_spec(), 'action') - if outcome_attrs == {}: - outcome_attrs = { - 'outcome': sight_pb2.DecisionConfigurationStart.AttrProps() - } - - decision_configuration = sight_pb2.DecisionConfigurationStart() - decision_configuration.optimizer_type = optimizer.obj.optimizer_type() - - if (_NUM_TRIALS.value): - decision_configuration.num_trials = _NUM_TRIALS.value - # if FLAGS.deployment_mode == 'worker_mode': - # decision_configuration.num_trials = int(os.environ['num_samples']) - # else: - # decision_configuration.num_trials = _NUM_TRIALS.value - decision_configuration.choice_config[sight.params.label].CopyFrom( - optimizer.obj.create_config()) - attr_dict_to_proto(state_attrs, decision_configuration.state_attrs) - attr_dict_to_proto(action_attrs, decision_configuration.action_attrs) - attr_dict_to_proto(outcome_attrs, decision_configuration.outcome_attrs) - - sight.enter_block( - 'Decision Configuration', - sight_pb2.Object(block_start=sight_pb2.BlockStart( - sub_type=sight_pb2.BlockStart.ST_CONFIGURATION, - configuration=sight_pb2.ConfigurationStart( - sub_type=sight_pb2.ConfigurationStart. - ST_DECISION_CONFIGURATION, - decision_configuration=decision_configuration, - ), - )), + raise ValueError( + 'In configured_run mode decision_run_config_file is required.') + + # If a docker image is provided, run within it. + logging.info( + 'decision_train_alg=%s docker_image=%s', + FLAGS.deployment_mode, + _DOCKER_IMAGE.value, ) - sight.exit_block('Decision Configuration', sight_pb2.Object()) - sight.widget_decision_state['num_decision_points'] = 0 - - sight.widget_decision_state['decision_episode_fn'] = ( - decision_episode_fn.DecisionEpisodeFn(driver_fn, state_attrs, - action_attrs)) - sight.widget_decision_state['proposed_actions'] = [] - - if _DECISON_MODE.value == 'run': - logging.info('_DECISON_MODE.value == run') - # sight.widget_decision_state['sum_outcome'] = 0 - # sight.widget_decision_state['last_reward'] = None - # if env: - # driver_fn(env, sight) - # else: - # driver_fn(sight) - # finalize_episode(sight) - - if (not FLAGS.sight_log_id): - raise ValueError( - "sight_log_id have to be passed from the trained run for decision_mokde = run" - ) - - req = service_pb2.FetchOptimalActionRequest( - client_id=FLAGS.sight_log_id, - # worker_id=f'client_{client_id}_worker_{worker_location}', + if FLAGS.deployment_mode == 'local' and _DOCKER_IMAGE.value: + trials.start_job_in_docker( + 1, + _BINARY_PATH.value, + _OPTIMIZER_TYPE.value, + _DOCKER_IMAGE.value, + _DECISON_MODE.value, + 'docker_worker', + 'worker_mode', + _DECISION_PARAMS.value, + sight, + ) + # Otherwise, run within the current process. + else: + driver_fn(sight) + + elif _DECISON_MODE.value == 'train': + details = sight.widget_decision_state['decision_episode_fn'] + possible_actions = list(details.action_max.values())[0] - list( + details.action_min.values())[0] + 2 + + print('_DECISON_MODE.value : ', _DECISON_MODE.value) + if FLAGS.deployment_mode in ['distributed', 'vm']: + if (_OPTIMIZER_TYPE.value == 'exhaustive_search' and + possible_actions < _NUM_TRIALS.value): + raise ValueError( + f"max possible value for num_trials is : {possible_actions}") + # logging.info('FLAGS.deployment_mode == distributed') + if (not _DOCKER_IMAGE.value): + raise ValueError("docker_image must be provided for distributed mode") + # print("decision_config : ", decision_configuration) + trials.launch( + optimizer.obj, + decision_configuration, + _NUM_TRAIN_WORKERS.value, + sight, + ) + trials.start_jobs( + _NUM_TRAIN_WORKERS.value, + _BINARY_PATH.value, + _OPTIMIZER_TYPE.value, + _DOCKER_IMAGE.value, + _DECISON_MODE.value, + 'worker_mode', + 'dsub_cloud_worker', + sight, + ) + elif FLAGS.deployment_mode in [ + 'local', + 'dsub_local', + 'docker_local', + 'worker_mode', + ]: + if FLAGS.deployment_mode == 'worker_mode' or 'PARENT_LOG_ID' in os.environ: + # not used anymore - for worklist scheduler + # num_samples_to_run = int(os.environ['num_samples']) + pass + else: + trials.launch( + optimizer.obj, + decision_configuration, + _NUM_TRAIN_WORKERS.value, + sight, ) - response = service.call( - lambda s, meta: s.FetchOptimalAction(req, 300, metadata=meta)) - print('response : ', response.response_str) - - elif _DECISON_MODE.value == 'configured_run': - # ? not proper flow right now - # If the run configuration is provided in a file. - # if _DECISION_RUN_CONFIG_FILE.value: - if flags.FLAGS.decision_run_config_file: - sight.add_config_file(_DECISION_RUN_CONFIG_FILE.value) - # If the run configuration is provided on the command line. - elif _DECISION_PARAMS.value: - chosen_action = {} - for key_val in _DECISION_PARAMS.value.split(':'): - key, val = tuple(key_val.split('=')) - chosen_action[key] = float(val) - sight.widget_decision_state['constant_action'] = chosen_action - # sight.widget_decision_state['sum_outcome'] = 0 - sight.widget_decision_state['last_reward'] = None - else: - raise ValueError( - 'In configured_run mode decision_run_config_file is required.') - - # If a docker image is provided, run within it. - logging.info( - 'decision_train_alg=%s docker_image=%s', - FLAGS.deployment_mode, + # not used anymore - for worklist scheduler + num_samples_to_run = _NUM_TRIALS.value + + # If a docker image is provided, run within it. + if (FLAGS.deployment_mode == 'docker_local' + ): # and _NUM_TRAIN_WORKERS.value==1: + trials.start_job_in_docker( + _NUM_TRIALS.value, + _BINARY_PATH.value, + _OPTIMIZER_TYPE.value, _DOCKER_IMAGE.value, + _DECISON_MODE.value, + 'worker_mode', + 'docker_local_worker', + _DECISION_PARAMS.value, + sight, ) - if FLAGS.deployment_mode == 'local' and _DOCKER_IMAGE.value: - trials.start_job_in_docker( - 1, - _BINARY_PATH.value, - _OPTIMIZER_TYPE.value, - _DOCKER_IMAGE.value, - _DECISON_MODE.value, - 'docker_worker', - 'worker_mode', - _DECISION_PARAMS.value, - sight, - ) - # Otherwise, run within the current process. - else: - driver_fn(sight) - - elif _DECISON_MODE.value == 'train': - details = sight.widget_decision_state['decision_episode_fn'] - possible_actions = list(details.action_max.values())[0] - list( - details.action_min.values())[0] + 2 - - print('_DECISON_MODE.value : ', _DECISON_MODE.value) - if FLAGS.deployment_mode in ['distributed', 'vm']: - if (_OPTIMIZER_TYPE.value == 'exhaustive_search' - and possible_actions < _NUM_TRIALS.value): - raise ValueError( - f"max possible value for num_trials is : {possible_actions}" - ) - # logging.info('FLAGS.deployment_mode == distributed') - if (not _DOCKER_IMAGE.value): - raise ValueError( - "docker_image must be provided for distributed mode") - # print("decision_config : ", decision_configuration) - trials.launch( - optimizer.obj, - decision_configuration, - _NUM_TRAIN_WORKERS.value, - sight, - ) - trials.start_jobs( - _NUM_TRAIN_WORKERS.value, - _BINARY_PATH.value, - _OPTIMIZER_TYPE.value, - _DOCKER_IMAGE.value, - _DECISON_MODE.value, - 'worker_mode', - 'dsub_cloud_worker', - sight, - ) - elif FLAGS.deployment_mode in [ - 'local', - 'dsub_local', - 'docker_local', - 'worker_mode', - ]: - if FLAGS.deployment_mode == 'worker_mode' or 'PARENT_LOG_ID' in os.environ: - # not used anymore - for worklist scheduler - # num_samples_to_run = int(os.environ['num_samples']) - pass + # run d-sub locally + elif (FLAGS.deployment_mode == 'dsub_local' + ): # and _NUM_TRAIN_WORKERS.value>1: + trials.start_job_in_dsub_local( + _NUM_TRAIN_WORKERS.value, + _NUM_TRIALS.value, + _BINARY_PATH.value, + _OPTIMIZER_TYPE.value, + _DOCKER_IMAGE.value, + _DECISON_MODE.value, + 'worker_mode', + 'dsub_local_worker', + sight, + ) + # Otherwise, run within the current process. + else: # local & worker_mode + # if _OPTIMIZER_TYPE.value == 'dm_acme': + # optimizer.obj = acme_optimizer_client.Acme(sight) + # elif _OPTIMIZER_TYPE.value == 'vizier': + # optimizer.obj = vizier_optimizer_client.Vizier(sight) + # elif _OPTIMIZER_TYPE.value == 'exhaustive_search': + # optimizer.obj = exhaustive_search_client.ExhaustiveSearch(sight) + + # actions_list = [ + # {'action_1': 1, 'action_2': 1, 'action_3': 1}, + # {'action_1': 2, 'action_2': 2, 'action_3': 2}, + # {'action_1': 3, 'action_2': 3, 'action_3': 3} + # ] + # unique_action_ids = propose_actions(sight, actions_list) + + if FLAGS.deployment_mode == 'local': + client_id = str(sight.id) + worker_location = '0' + elif (FLAGS.deployment_mode == 'worker_mode' + # or FLAGS.deployment_mode == 'docker_mode' + ): + client_id = os.environ['PARENT_LOG_ID'] + worker_location = os.environ['worker_location'] + + # for _ in range(num_samples_to_run): + # if(FLAGS.optimizer_type == "worklist_scheduler"): + # if (FLAGS.deployment_mode == 'worker_mode'): + while (True): + # #? new rpc just to check move forward or not? + req = service_pb2.WorkerAliveRequest( + client_id=client_id, + worker_id=f'client_{client_id}_worker_{worker_location}') + response = service.call( + lambda s, meta: s.WorkerAlive(req, 300, metadata=meta)) + + logging.info("response from workAlive rpc is : %s", + response.status_type) + if (response.status_type == + service_pb2.WorkerAliveResponse.StatusType.ST_DONE): + break + elif (response.status_type == + service_pb2.WorkerAliveResponse.StatusType.ST_RETRY): + logging.info('sleeping for 5 seconds......') + time.sleep(5) + elif (response.status_type == + service_pb2.WorkerAliveResponse.StatusType.ST_ACT): + sight.enter_block('Decision Sample', sight_pb2.Object()) + if 'constant_action' in sight.widget_decision_state: + del sight.widget_decision_state['constant_action'] + sight.widget_decision_state['discount'] = 0 + sight.widget_decision_state['last_reward'] = None + + if env: + driver_fn(env, sight) else: - trials.launch( - optimizer.obj, - decision_configuration, - _NUM_TRAIN_WORKERS.value, - sight, - ) - # not used anymore - for worklist scheduler - num_samples_to_run = _NUM_TRIALS.value - - # If a docker image is provided, run within it. - if (FLAGS.deployment_mode == 'docker_local' - ): # and _NUM_TRAIN_WORKERS.value==1: - trials.start_job_in_docker( - _NUM_TRIALS.value, - _BINARY_PATH.value, - _OPTIMIZER_TYPE.value, - _DOCKER_IMAGE.value, - _DECISON_MODE.value, - 'worker_mode', - 'docker_local_worker', - _DECISION_PARAMS.value, - sight, - ) - # run d-sub locally - elif (FLAGS.deployment_mode == 'dsub_local' - ): # and _NUM_TRAIN_WORKERS.value>1: - trials.start_job_in_dsub_local( - _NUM_TRAIN_WORKERS.value, - _NUM_TRIALS.value, - _BINARY_PATH.value, - _OPTIMIZER_TYPE.value, - _DOCKER_IMAGE.value, - _DECISON_MODE.value, - 'worker_mode', - 'dsub_local_worker', - sight, - ) - # Otherwise, run within the current process. - else: # local & worker_mode - # if _OPTIMIZER_TYPE.value == 'dm_acme': - # optimizer.obj = acme_optimizer_client.Acme(sight) - # elif _OPTIMIZER_TYPE.value == 'vizier': - # optimizer.obj = vizier_optimizer_client.Vizier(sight) - # elif _OPTIMIZER_TYPE.value == 'exhaustive_search': - # optimizer.obj = exhaustive_search_client.ExhaustiveSearch(sight) - - # actions_list = [ - # {'action_1': 1, 'action_2': 1, 'action_3': 1}, - # {'action_1': 2, 'action_2': 2, 'action_3': 2}, - # {'action_1': 3, 'action_2': 3, 'action_3': 3} - # ] - # unique_action_ids = propose_actions(sight, actions_list) - - if FLAGS.deployment_mode == 'local': - client_id = str(sight.id) - worker_location = '0' - elif (FLAGS.deployment_mode == 'worker_mode' - # or FLAGS.deployment_mode == 'docker_mode' - ): - client_id = os.environ['PARENT_LOG_ID'] - worker_location = os.environ['worker_location'] - - # for _ in range(num_samples_to_run): - # if(FLAGS.optimizer_type == "worklist_scheduler"): - # if (FLAGS.deployment_mode == 'worker_mode'): - while (True): - # #? new rpc just to check move forward or not? - req = service_pb2.WorkerAliveRequest( - client_id=client_id, - worker_id=f'client_{client_id}_worker_{worker_location}' - ) - response = service.call( - lambda s, meta: s.WorkerAlive(req, 300, metadata=meta)) - - logging.info("response from workAlive rpc is : %s", - response.status_type) - if (response.status_type == service_pb2. - WorkerAliveResponse.StatusType.ST_DONE): - break - elif (response.status_type == - service_pb2.WorkerAliveResponse.StatusType.ST_RETRY): - logging.info('sleeping for 5 seconds......') - time.sleep(5) - elif (response.status_type == - service_pb2.WorkerAliveResponse.StatusType.ST_ACT): - sight.enter_block('Decision Sample', - sight_pb2.Object()) - if 'constant_action' in sight.widget_decision_state: - del sight.widget_decision_state['constant_action'] - sight.widget_decision_state['discount'] = 0 - sight.widget_decision_state['last_reward'] = None - - if env: - driver_fn(env, sight) - else: - driver_fn(sight) - - finalize_episode(sight) - sight.exit_block('Decision Sample', sight_pb2.Object()) - else: - raise ValueError("invalid response from server") - logging.info('exiting from the loop.....') - # else: - # for _ in range(num_samples_to_run): - # sight.enter_block('Decision Sample', sight_pb2.Object()) - # if 'constant_action' in sight.widget_decision_state: - # del sight.widget_decision_state['constant_action'] - # sight.widget_decision_state['discount'] = 0 - # sight.widget_decision_state['last_reward'] = None - - # if env: - # driver_fn(env, sight) - # else: - # driver_fn(sight) - - # finalize_episode(sight) - # sight.exit_block('Decision Sample', sight_pb2.Object()) - - # req = service_pb2.TestRequest(client_id=str(sight.id)) - # response = service.call( - # lambda s, meta: s.PrintInsertionTime(req, 300, metadata=meta) - # ) - - logging.debug("<<<< Out %s of %s", method_name, _file_name) + driver_fn(sight) + + finalize_episode(sight) + sight.exit_block('Decision Sample', sight_pb2.Object()) + else: + raise ValueError("invalid response from server") + logging.info('exiting from the loop.....') + # else: + # for _ in range(num_samples_to_run): + # sight.enter_block('Decision Sample', sight_pb2.Object()) + # if 'constant_action' in sight.widget_decision_state: + # del sight.widget_decision_state['constant_action'] + # sight.widget_decision_state['discount'] = 0 + # sight.widget_decision_state['last_reward'] = None + + # if env: + # driver_fn(env, sight) + # else: + # driver_fn(sight) + + # finalize_episode(sight) + # sight.exit_block('Decision Sample', sight_pb2.Object()) + + # req = service_pb2.TestRequest(client_id=str(sight.id)) + # response = service.call( + # lambda s, meta: s.PrintInsertionTime(req, 300, metadata=meta) + # ) + + logging.debug("<<<< Out %s of %s", method_name, _file_name) def get_state_attrs(sight: Any) -> list[str]: - state_attrs = [] - state_details = sight.widget_decision_state['decision_episode_fn'] + state_attrs = [] + state_details = sight.widget_decision_state['decision_episode_fn'] - for i in range(len(state_details.state_attrs)): - state_attrs.append(state_details.state_attrs[i]) - return state_attrs + for i in range(len(state_details.state_attrs)): + state_attrs.append(state_details.state_attrs[i]) + return state_attrs def state_updated( @@ -655,65 +648,64 @@ def state_updated( obj_to_log: Any, sight: Any, ) -> None: - """Called to inform the decision API that the current state has been updated. + """Called to inform the decision API that the current state has been updated. Args: name: The name of the updated state variable. obj_to_log: The value of the state variable. sight: Instance of a Sight logger. """ - if (sight.widget_decision_state is not None - and 'decision_episode_fn' in sight.widget_decision_state - and sight.widget_decision_state['decision_episode_fn'] and name - in sight.widget_decision_state['decision_episode_fn'].state_attrs): - sight.widget_decision_state['state'][name] = obj_to_log + if (sight.widget_decision_state is not None and + 'decision_episode_fn' in sight.widget_decision_state and + sight.widget_decision_state['decision_episode_fn'] and + name in sight.widget_decision_state['decision_episode_fn'].state_attrs): + sight.widget_decision_state['state'][name] = obj_to_log def get_decision_outcome_proto(outcome_label: str, sight: Any) -> sight_pb2.DecisionOutcome: - decision_outcome = sight_pb2.DecisionOutcome(outcome_label=outcome_label) - if 'sum_reward' in sight.widget_decision_state: - decision_outcome.reward = sight.widget_decision_state['sum_reward'] - - if 'sum_outcome' in sight.widget_decision_state: - outcome_params: List[sight_pb2.DecisionParam] = [] - for key in sight.widget_decision_state['sum_outcome']: - val = sight.widget_decision_state['sum_outcome'][key] - if (utils.is_scalar(val)): - #todo: assuming only double for now in scalar - value = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=val, - ) - else: - if (isinstance(val, dict)): - json_value = json.dumps(val) - elif (isinstance(val, pd.Series)): - json_value = json.dumps(val.to_dict()) - else: - raise TypeError("value needs to be dict type") + decision_outcome = sight_pb2.DecisionOutcome(outcome_label=outcome_label) + if 'sum_reward' in sight.widget_decision_state: + decision_outcome.reward = sight.widget_decision_state['sum_reward'] + + if 'sum_outcome' in sight.widget_decision_state: + outcome_params: List[sight_pb2.DecisionParam] = [] + for key in sight.widget_decision_state['sum_outcome']: + val = sight.widget_decision_state['sum_outcome'][key] + if (utils.is_scalar(val)): + #todo: assuming only double for now in scalar + value = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_DOUBLE, + double_value=val, + ) + else: + if (isinstance(val, dict)): + json_value = json.dumps(val) + elif (isinstance(val, pd.Series)): + json_value = json.dumps(val.to_dict()) + else: + raise TypeError("value needs to be dict type") - value = sight_pb2.Value(sub_type=sight_pb2.Value.ST_JSON, - json_value=json_value) + value = sight_pb2.Value(sub_type=sight_pb2.Value.ST_JSON, + json_value=json_value) - outcome_params.append( - sight_pb2.DecisionParam( - key=key, - value=value, - )) - decision_outcome.outcome_params.extend(outcome_params) + outcome_params.append(sight_pb2.DecisionParam( + key=key, + value=value, + )) + decision_outcome.outcome_params.extend(outcome_params) - if 'discount' in sight.widget_decision_state: - decision_outcome.discount = sight.widget_decision_state['discount'] + if 'discount' in sight.widget_decision_state: + decision_outcome.discount = sight.widget_decision_state['discount'] - return decision_outcome + return decision_outcome def decision_point( choice_label: str, sight: Any, ) -> Dict[Text, float]: - """Documents an execution point when a decision is made. + """Documents an execution point when a decision is made. If chosen_option is not provided, it is logged into sight. Otherwise, this method uses its own decision procedure, guided by the previously observed @@ -727,130 +719,128 @@ def decision_point( Returns: Dict that maps the name of each action variable to its chosen value. """ - method_name = 'decision_point' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - # logging.info('>>>>>>>>> In %s of %s, sight.widget_decision_state=%s', method_name, _file_name, sight.widget_decision_state) + method_name = 'decision_point' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + # logging.info('>>>>>>>>> In %s of %s, sight.widget_decision_state=%s', method_name, _file_name, sight.widget_decision_state) + + sight.widget_decision_state['num_decision_points'] += 1 + chosen_action = None + + if 'constant_action' in sight.widget_decision_state: + return sight.widget_decision_state['constant_action'] + + req = service_pb2.DecisionPointRequest() + + if FLAGS.deployment_mode == 'local' or _TRAINED_MODEL_LOG_ID.value: + global _sight_id + _sight_id = str(sight.id) + client_id = str(sight.id) + worker_location = '0' + elif (FLAGS.deployment_mode == 'worker_mode' + # or FLAGS.deployment_mode == 'docker_mode' + ): + client_id = os.environ['PARENT_LOG_ID'] + worker_location = os.environ['worker_location'] + + req.client_id = client_id + req.worker_id = f'client_{client_id}_worker_{worker_location}' + + if _OPTIMIZER_TYPE.value == 'dm_acme': + optimizer_obj = optimizer.get_instance() + selected_action = optimizer_obj.decision_point(sight, req) + # print("selected_action : ", selected_action, type(selected_action), selected_action.shape, ) + # raise SystemError - sight.widget_decision_state['num_decision_points'] += 1 - chosen_action = None + chosen_action = {} + #? when action space is scalar (DQN agent - cartpole) + if (selected_action.shape == ()): + chosen_action[sight.widget_decision_state['decision_episode_fn']. + action_attrs[0]] = selected_action[()] + #? when action space is 1d array (D4pg agent - pendulum) + else: + for i in range( + len(sight.widget_decision_state['decision_episode_fn'].action_attrs)): + chosen_action[sight.widget_decision_state['decision_episode_fn']. + action_attrs[i]] = selected_action[i] + # print("chosen_action : ", chosen_action) + + # selected_action will be same for all calls of decision point in these + # optimizers. As such, it is cached as the constant action. + elif _OPTIMIZER_TYPE.value in [ + 'vizier', 'genetic_algorithm', 'exhaustive_search', 'bayesian_opt', + 'sensitivity_analysis', 'smcpy' + ] or _OPTIMIZER_TYPE.value.startswith('ng_'): + optimizer_obj = optimizer.get_instance() + chosen_action = optimizer_obj.decision_point(sight, req) + sight.widget_decision_state['constant_action'] = chosen_action + elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': + if (not optimizer.obj): + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_WORKLIST_SCHEDULER, sight) + optimizer_obj = optimizer.get_instance() + chosen_action = optimizer_obj.decision_point(sight, req) + # if(chosen_action == None): + # print("received None in chosen action") + # return None + sight.widget_decision_state['constant_action'] = chosen_action + + elif _OPTIMIZER_TYPE.value.startswith('llm_'): + optimizer_obj = optimizer.get_instance() + if 'reward' in sight.widget_decision_state: + req.decision_outcome.reward = sight.widget_decision_state['reward'] + if 'outcome_value' in sight.widget_decision_state: + outcome_params: List[sight_pb2.DecisionParam] = [] + for key in sight.widget_decision_state['outcome_value']: + outcome_params.append( + sight_pb2.DecisionParam( + key=key, + value=sight_pb2.Value( + sub_type=sight_pb2.Value.ST_DOUBLE, + double_value=sight.widget_decision_state['outcome_value'] + [key], + ), + )) + req.decision_outcome.outcome_params.extend(outcome_params) + req.decision_outcome.discount = sight.widget_decision_state['discount'] + chosen_action = optimizer_obj.decision_point(sight, req) + + choice_params: List[sight_pb2.DecisionParam] = [] + # for attr in sight.widget_decision_state[ + # 'decision_episode_fn'].action_attrs: + for attr in chosen_action.keys(): + #? keep this might need to change sub_type of deicision param value + if isinstance(chosen_action[attr], str): + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_STRING, + string_value=chosen_action[attr], + ) + elif isinstance(chosen_action[attr], float): + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_DOUBLE, + double_value=chosen_action[attr], + ) + else: + raise ValueError("unsupported type!!") - if 'constant_action' in sight.widget_decision_state: - return sight.widget_decision_state['constant_action'] + choice_params.append(sight_pb2.DecisionParam( + key=attr, + value=val, + )) - req = service_pb2.DecisionPointRequest() + # pytype: disable=attribute-error + obj = sight_pb2.Object( + sub_type=sight_pb2.Object.ST_DECISION_POINT, + decision_point=sight_pb2.DecisionPoint(choice_label=choice_label, + # choice_params=choice_params, + ), + ) + obj.decision_point.choice_params.extend(choice_params) + sight.log_object(obj, inspect.currentframe().f_back.f_back) - if FLAGS.deployment_mode == 'local' or _TRAINED_MODEL_LOG_ID.value: - global _sight_id - _sight_id = str(sight.id) - client_id = str(sight.id) - worker_location = '0' - elif (FLAGS.deployment_mode == 'worker_mode' - # or FLAGS.deployment_mode == 'docker_mode' - ): - client_id = os.environ['PARENT_LOG_ID'] - worker_location = os.environ['worker_location'] - - req.client_id = client_id - req.worker_id = f'client_{client_id}_worker_{worker_location}' - - if _OPTIMIZER_TYPE.value == 'dm_acme': - optimizer_obj = optimizer.get_instance() - selected_action = optimizer_obj.decision_point(sight, req) - # print("selected_action : ", selected_action, type(selected_action), selected_action.shape, ) - # raise SystemError - - chosen_action = {} - #? when action space is scalar (DQN agent - cartpole) - if (selected_action.shape == ()): - chosen_action[sight.widget_decision_state['decision_episode_fn']. - action_attrs[0]] = selected_action[()] - #? when action space is 1d array (D4pg agent - pendulum) - else: - for i in range( - len(sight.widget_decision_state['decision_episode_fn']. - action_attrs)): - chosen_action[ - sight.widget_decision_state['decision_episode_fn']. - action_attrs[i]] = selected_action[i] - # print("chosen_action : ", chosen_action) - - # selected_action will be same for all calls of decision point in these - # optimizers. As such, it is cached as the constant action. - elif _OPTIMIZER_TYPE.value in [ - 'vizier', 'genetic_algorithm', 'exhaustive_search', 'bayesian_opt', - 'sensitivity_analysis', 'smcpy' - ] or _OPTIMIZER_TYPE.value.startswith('ng_'): - optimizer_obj = optimizer.get_instance() - chosen_action = optimizer_obj.decision_point(sight, req) - sight.widget_decision_state['constant_action'] = chosen_action - elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': - if (not optimizer.obj): - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_WORKLIST_SCHEDULER, sight) - optimizer_obj = optimizer.get_instance() - chosen_action = optimizer_obj.decision_point(sight, req) - # if(chosen_action == None): - # print("received None in chosen action") - # return None - sight.widget_decision_state['constant_action'] = chosen_action - - elif _OPTIMIZER_TYPE.value.startswith('llm_'): - optimizer_obj = optimizer.get_instance() - if 'reward' in sight.widget_decision_state: - req.decision_outcome.reward = sight.widget_decision_state['reward'] - if 'outcome_value' in sight.widget_decision_state: - outcome_params: List[sight_pb2.DecisionParam] = [] - for key in sight.widget_decision_state['outcome_value']: - outcome_params.append( - sight_pb2.DecisionParam( - key=key, - value=sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=sight. - widget_decision_state['outcome_value'][key], - ), - )) - req.decision_outcome.outcome_params.extend(outcome_params) - req.decision_outcome.discount = sight.widget_decision_state['discount'] - chosen_action = optimizer_obj.decision_point(sight, req) - - choice_params: List[sight_pb2.DecisionParam] = [] - # for attr in sight.widget_decision_state[ - # 'decision_episode_fn'].action_attrs: - for attr in chosen_action.keys(): - #? keep this might need to change sub_type of deicision param value - if isinstance(chosen_action[attr], str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=chosen_action[attr], - ) - elif isinstance(chosen_action[attr], float): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=chosen_action[attr], - ) - else: - raise ValueError("unsupported type!!") - - choice_params.append(sight_pb2.DecisionParam( - key=attr, - value=val, - )) - - # pytype: disable=attribute-error - obj = sight_pb2.Object( - sub_type=sight_pb2.Object.ST_DECISION_POINT, - decision_point=sight_pb2.DecisionPoint(choice_label=choice_label, - # choice_params=choice_params, - ), - ) - obj.decision_point.choice_params.extend(choice_params) - sight.log_object(obj, inspect.currentframe().f_back.f_back) - - logging.info('decision_point() chosen_action=%s', chosen_action) - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return chosen_action + logging.info('decision_point() chosen_action=%s', chosen_action) + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return chosen_action def decision_outcome( @@ -861,7 +851,7 @@ def decision_outcome( discount=1.0, # optimizer_type: str ) -> None: - """Documents the outcome of prior decisions. + """Documents the outcome of prior decisions. Args: outcome_label: Label that identifies the outcome. @@ -871,226 +861,226 @@ def decision_outcome( outcome: Dictionary that describes the various outcome attributes of the application. discount: discount value to be used """ - method_name = 'decision_outcome' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - - sight.widget_decision_state['discount'] = discount - - if reward is not None: - logging.info('decision_outcome() reward=%s', reward) - sight.widget_decision_state['reward'] = reward - if 'sum_reward' not in sight.widget_decision_state: - sight.widget_decision_state['sum_reward'] = 0 - sight.widget_decision_state['sum_reward'] += reward - - if outcome is not None: - logging.info('decision_outcome() outcome=%s', outcome) - if 'sum_outcome' not in sight.widget_decision_state: - sight.widget_decision_state['sum_outcome'] = {} - for key in outcome: - # print(key, outcome[key], type(outcome[key])) - # checking for scalar types - if utils.is_scalar(outcome[key]): - if key not in sight.widget_decision_state['sum_outcome']: - sight.widget_decision_state['sum_outcome'][key] = 0 - sight.widget_decision_state['sum_outcome'][key] += outcome[key] - # converting json into string - else: - # converting pandas datafram to json and storing it as json string - # sight.widget_decision_state['sum_outcome'][key] = json.dumps(outcome[key].to_json()) - sight.widget_decision_state['sum_outcome'][key] = outcome[key] - - # if not isinstance(outcome[key], float) and not isinstance(outcome[key], int): - # continue - # if key not in sight.widget_decision_state['sum_outcome']: - # sight.widget_decision_state['sum_outcome'][key] = 0 - # sight.widget_decision_state['sum_outcome'][key] += outcome[key] - - sight.log_object( - sight_pb2.Object( - sub_type=sight_pb2.Object.ST_DECISION_OUTCOME, - decision_outcome=get_decision_outcome_proto(outcome_label, sight), - ), - inspect.currentframe().f_back.f_back, - ) - - logging.debug("<<<< Out %s of %s", method_name, _file_name) + method_name = 'decision_outcome' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + + sight.widget_decision_state['discount'] = discount + + if reward is not None: + logging.info('decision_outcome() reward=%s', reward) + sight.widget_decision_state['reward'] = reward + if 'sum_reward' not in sight.widget_decision_state: + sight.widget_decision_state['sum_reward'] = 0 + sight.widget_decision_state['sum_reward'] += reward + + if outcome is not None: + logging.info('decision_outcome() outcome=%s', outcome) + if 'sum_outcome' not in sight.widget_decision_state: + sight.widget_decision_state['sum_outcome'] = {} + for key in outcome: + # print(key, outcome[key], type(outcome[key])) + # checking for scalar types + if utils.is_scalar(outcome[key]): + if key not in sight.widget_decision_state['sum_outcome']: + sight.widget_decision_state['sum_outcome'][key] = 0 + sight.widget_decision_state['sum_outcome'][key] += outcome[key] + # converting json into string + else: + # converting pandas datafram to json and storing it as json string + # sight.widget_decision_state['sum_outcome'][key] = json.dumps(outcome[key].to_json()) + sight.widget_decision_state['sum_outcome'][key] = outcome[key] + + # if not isinstance(outcome[key], float) and not isinstance(outcome[key], int): + # continue + # if key not in sight.widget_decision_state['sum_outcome']: + # sight.widget_decision_state['sum_outcome'][key] = 0 + # sight.widget_decision_state['sum_outcome'][key] += outcome[key] + + sight.log_object( + sight_pb2.Object( + sub_type=sight_pb2.Object.ST_DECISION_OUTCOME, + decision_outcome=get_decision_outcome_proto(outcome_label, sight), + ), + inspect.currentframe().f_back.f_back, + ) + + logging.debug("<<<< Out %s of %s", method_name, _file_name) def propose_actions(sight, action_dict): - request = service_pb2.ProposeActionRequest() - request.client_id = str(sight.id) - - actions_data = [] - attributes_data = [] - - # Process actions - for k, v in action_dict.items(): - action_attr = sight_pb2.DecisionParam() - action_attr.key = k - if isinstance(v, str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=v, - ) - else: - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=v, - ) - action_attr.value.CopyFrom(val) - # Append to actions_data list - actions_data.append(action_attr) - request.action_attrs.extend(actions_data) - - attr_dict = sight.fetch_attributes() - # print('attr_dict : ', attr_dict) - - # Process attributes - for k, v in attr_dict.items(): - attribute = sight_pb2.DecisionParam() - attribute.key = k - if isinstance(v, str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=v, - ) - else: - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=v, - ) - attribute.value.CopyFrom(val) - # Append to attributes_data list - attributes_data.append(attribute) - request.attributes.extend(attributes_data) + request = service_pb2.ProposeActionRequest() + request.client_id = str(sight.id) + + actions_data = [] + attributes_data = [] + + # Process actions + for k, v in action_dict.items(): + action_attr = sight_pb2.DecisionParam() + action_attr.key = k + if isinstance(v, str): + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_STRING, + string_value=v, + ) + else: + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_DOUBLE, + double_value=v, + ) + action_attr.value.CopyFrom(val) + # Append to actions_data list + actions_data.append(action_attr) + request.action_attrs.extend(actions_data) + + attr_dict = sight.fetch_attributes() + # print('attr_dict : ', attr_dict) + + # Process attributes + for k, v in attr_dict.items(): + attribute = sight_pb2.DecisionParam() + attribute.key = k + if isinstance(v, str): + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_STRING, + string_value=v, + ) + else: + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_DOUBLE, + double_value=v, + ) + attribute.value.CopyFrom(val) + # Append to attributes_data list + attributes_data.append(attribute) + request.attributes.extend(attributes_data) - response = service.call( - lambda s, meta: s.ProposeAction(request, 300, metadata=meta)) - action_id = response.action_id + response = service.call( + lambda s, meta: s.ProposeAction(request, 300, metadata=meta)) + action_id = response.action_id - # log_object call - sight_obj = sight_pb2.Object() - sight_obj.sub_type = sight_pb2.Object.SubType.ST_PROPOSE_ACTION - sight_obj.propose_action.action_id = str(action_id) - sight_obj.propose_action.action_attrs.extend(actions_data) - sight_obj.propose_action.attributes.extend(attributes_data) + # log_object call + sight_obj = sight_pb2.Object() + sight_obj.sub_type = sight_pb2.Object.SubType.ST_PROPOSE_ACTION + sight_obj.propose_action.action_id = str(action_id) + sight_obj.propose_action.action_attrs.extend(actions_data) + sight_obj.propose_action.attributes.extend(attributes_data) - frame = inspect.currentframe().f_back.f_back - sight.set_object_code_loc(sight_obj, frame) - sight.log_object(sight_obj, True) + frame = inspect.currentframe().f_back.f_back + sight.set_object_code_loc(sight_obj, frame) + sight.log_object(sight_obj, True) - return action_id + return action_id def finalize_episode(sight): # , optimizer_obj - """Finalize the run. + """Finalize the run. Args: sight: Instance of a Sight logger. optimizer_obj: Object of Optimizer instance """ - method_name = 'finalize_episode' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + method_name = 'finalize_episode' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + + if (FLAGS.deployment_mode == 'local' + # or FLAGS.deployment_mode == 'docker_mode' + or FLAGS.deployment_mode == 'worker_mode'): + if FLAGS.deployment_mode == 'local': + client_id = str(sight.id) + worker_location = '0' + elif (FLAGS.deployment_mode == 'worker_mode' + # or FLAGS.deployment_mode == 'docker_mode' + ): + client_id = os.environ['PARENT_LOG_ID'] + worker_location = os.environ['worker_location'] - if (FLAGS.deployment_mode == 'local' - # or FLAGS.deployment_mode == 'docker_mode' - or FLAGS.deployment_mode == 'worker_mode'): - if FLAGS.deployment_mode == 'local': - client_id = str(sight.id) - worker_location = '0' - elif (FLAGS.deployment_mode == 'worker_mode' - # or FLAGS.deployment_mode == 'docker_mode' - ): - client_id = os.environ['PARENT_LOG_ID'] - worker_location = os.environ['worker_location'] + req = service_pb2.FinalizeEpisodeRequest( + client_id=client_id, + worker_id=f'client_{client_id}_worker_{worker_location}', + ) - req = service_pb2.FinalizeEpisodeRequest( + if _OPTIMIZER_TYPE.value in [ + 'genetic_algorithm', 'exhaustive_search', 'vizier', 'bayesian_opt', + 'sensitivity_analysis', 'smcpy' + ] or _OPTIMIZER_TYPE.value.startswith( + 'llm_') or _OPTIMIZER_TYPE.value.startswith('ng_'): + req.decision_outcome.CopyFrom(get_decision_outcome_proto( + 'outcome', sight)) + optimizer_obj = optimizer.get_instance() + optimizer_obj.finalize_episode(sight, req) + elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': + if (not optimizer.obj): + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_WORKLIST_SCHEDULER, sight) + req.decision_outcome.CopyFrom( + # get_fvs_outcome_proto('outcome', sight)) + # whole output of key "fvs_outcome" is stringified, not individual key-value + get_decision_outcome_proto('outcome', sight)) + # print('request : ', req) + optimizer_obj = optimizer.get_instance() + optimizer_obj.finalize_episode(sight, req) + elif _OPTIMIZER_TYPE.value == 'dm_acme': + optimizer_obj = optimizer.get_instance() + optimizer_obj.finalize_episode(sight) + + if 'outcome_value' in sight.widget_decision_state: + del sight.widget_decision_state['outcome_value'] + + else: + logging.info('Not in local/worker mode, so skipping it') + + if sight.widget_decision_state['proposed_actions']: + for proposal in sight.widget_decision_state['proposed_actions']: + # logging.info('proposal=%s', proposal) + proposal_req = service_pb2.ProposeActionRequest( client_id=client_id, worker_id=f'client_{client_id}_worker_{worker_location}', + outcome=sight_pb2.DecisionOutcome( + outcome_label='estimated_outcome', + outcome_value=proposal['outcome'], + ), + action=proposal['action'], ) - if _OPTIMIZER_TYPE.value in [ - 'genetic_algorithm', 'exhaustive_search', 'vizier', - 'bayesian_opt', 'sensitivity_analysis', 'smcpy' - ] or _OPTIMIZER_TYPE.value.startswith( - 'llm_') or _OPTIMIZER_TYPE.value.startswith('ng_'): - req.decision_outcome.CopyFrom( - get_decision_outcome_proto('outcome', sight)) - optimizer_obj = optimizer.get_instance() - optimizer_obj.finalize_episode(sight, req) - elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': - if (not optimizer.obj): - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_WORKLIST_SCHEDULER, sight) - req.decision_outcome.CopyFrom( - # get_fvs_outcome_proto('outcome', sight)) - # whole output of key "fvs_outcome" is stringified, not individual key-value - get_decision_outcome_proto('outcome', sight)) - # print('request : ', req) - optimizer_obj = optimizer.get_instance() - optimizer_obj.finalize_episode(sight, req) - elif _OPTIMIZER_TYPE.value == 'dm_acme': - optimizer_obj = optimizer.get_instance() - optimizer_obj.finalize_episode(sight) - - if 'outcome_value' in sight.widget_decision_state: - del sight.widget_decision_state['outcome_value'] + response = service.call( + lambda s, meta: s.ProposeAction(proposal_req, 300, metadata=meta)) + sight.widget_decision_state['proposed_actions'] = [] - else: - logging.info('Not in local/worker mode, so skipping it') - - if sight.widget_decision_state['proposed_actions']: - for proposal in sight.widget_decision_state['proposed_actions']: - # logging.info('proposal=%s', proposal) - proposal_req = service_pb2.ProposeActionRequest( - client_id=client_id, - worker_id=f'client_{client_id}_worker_{worker_location}', - outcome=sight_pb2.DecisionOutcome( - outcome_label='estimated_outcome', - outcome_value=proposal['outcome'], - ), - action=proposal['action'], - ) - - response = service.call(lambda s, meta: s.ProposeAction( - proposal_req, 300, metadata=meta)) - sight.widget_decision_state['proposed_actions'] = [] - - if 'sum_reward' in sight.widget_decision_state: - _rewards.append(sight.widget_decision_state['sum_reward']) - sight.widget_decision_state.pop('sum_reward', None) - sight.widget_decision_state.pop('sum_outcome', None) + if 'sum_reward' in sight.widget_decision_state: + _rewards.append(sight.widget_decision_state['sum_reward']) + sight.widget_decision_state.pop('sum_reward', None) + sight.widget_decision_state.pop('sum_outcome', None) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + logging.debug("<<<< Out %s of %s", method_name, _file_name) def get_outcome(sight): - request = service_pb2.GetOutcomeRequest() - request.client_id = str(sight.id) - # request.unique_ids.append(3) - response = service.call( - lambda s, meta: s.GetOutcome(request, 300, metadata=meta)) + request = service_pb2.GetOutcomeRequest() + request.client_id = str(sight.id) + # request.unique_ids.append(3) + response = service.call( + lambda s, meta: s.GetOutcome(request, 300, metadata=meta)) - if (response.response_str): - return response.response_str + if (response.response_str): + return response.response_str - outcome_list = [] - for outcome in response.outcome: - outcome_dict = {} - outcome_dict['reward'] = outcome.reward - outcome_dict['action'] = dict(outcome.action_attrs) - outcome_dict['outcome'] = dict(outcome.outcome_attrs) - outcome_list.append(outcome_dict) - return outcome_list + outcome_list = [] + for outcome in response.outcome: + outcome_dict = {} + outcome_dict['reward'] = outcome.reward + outcome_dict['action'] = dict(outcome.action_attrs) + outcome_dict['outcome'] = dict(outcome.outcome_attrs) + outcome_list.append(outcome_dict) + return outcome_list def finalize(sight): - logging.info( - 'Get latest status of this training by running this script : ' - 'python3 x-sight/py/sight/widgets/decision/current_status.py' - ' --log_id=%s --service_name=%s', - sight.id, - service.get_service_id(), - ) + logging.info( + 'Get latest status of this training by running this script : ' + 'python3 x-sight/py/sight/widgets/decision/current_status.py' + ' --log_id=%s --service_name=%s', + sight.id, + service.get_service_id(), + ) diff --git a/py/sight/widgets/decision/decision_episode_fn.py b/py/sight/widgets/decision/decision_episode_fn.py index 509e570..7b35fd7 100644 --- a/py/sight/widgets/decision/decision_episode_fn.py +++ b/py/sight/widgets/decision/decision_episode_fn.py @@ -17,7 +17,6 @@ from typing import Any, Callable, Dict, List, Tuple from helpers.logs.logs_handler import logger as logging - import numpy as np # import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import # from tf_agents.specs import array_spec @@ -28,177 +27,173 @@ @dataclass class DecisionEpisodeFn: - """The computation of a Sight Decision API episode and its meta-data.""" - - # The function that runs a single episode. - driver_fn: Callable[[Any], Any] - - # The names of the episode's state variables. - state_attrs: List[str] - - # Maps the name of each state variable to its index in state_attrs. - state_attr_to_idx: Dict[str, int] - - # Mapping from all the state variables to their minimum and maximum values. - state_min: Dict[str, float] - state_max: Dict[str, float] - - # datatype of state attrs - # state_dtype: None - - # The TFAgents schema of the observation space. - # observation_spec: array_spec.BoundedArraySpec - - # The names of the episode's action variables. - action_attrs: List[str] - - # Maps the name of each action variable to its index in action_attrs. - action_attr_to_idx: Dict[str, int] - - # Mapping from all the action variables to their minimum and maximum values. - action_min: Dict[str, float] - action_max: Dict[str, float] - - # datatype of action attrs - # action_dtype: None - - # possible valid values of action attrs - valid_action_values: Dict[str, int] - - # possible valid values of action attrs - step_size: Dict[str, int] - - # The TFAgents schema of the action space. - # action_spec: array_spec.BoundedArraySpec - - # The TFAgents schema of the rewards space. - # reward_spec: array_spec.ArraySpec - - # The TFAgents schema of the time steps space (includes observations and - # rewards). - # time_step_spec: ts.TimeStep - - def __init__( - self, - driver_fn: Callable[[Any], Any], - state_attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps], - action_attrs: Dict[str, - sight_pb2.DecisionConfigurationStart.AttrProps], - ): - self.driver_fn = driver_fn - - self.state_attrs = list(state_attrs.keys()) - self.state_attr_to_idx = {} - for i in range(len(self.state_attrs)): - self.state_attr_to_idx[self.state_attrs[i]] = i - self.state_min = { - attr: min_max.min_value - for attr, min_max in state_attrs.items() - } - self.state_max = { - attr: min_max.max_value - for attr, min_max in state_attrs.items() - } - - # for attr, val in state_attrs.items(): - # self.state_dtype = val.datatype - # break - - # self.observation_spec = array_spec.BoundedArraySpec( - # shape=(len(state_attrs),), - # dtype=np.float32, - # minimum=[0] * len(state_attrs), - # maximum=[1] * len(state_attrs), - # name='observation', - # ) - - self.action_attrs = list(action_attrs.keys()) - self.action_attr_to_idx = {} - for i in range(len(self.action_attrs)): - self.action_attr_to_idx[self.action_attrs[i]] = i - self.action_min = { - attr: min_max.min_value - for attr, min_max in action_attrs.items() - } - self.action_max = { - attr: min_max.max_value - for attr, min_max in action_attrs.items() - } - - self.valid_action_values = { - attr: attr_val.valid_int_values - for attr, attr_val in action_attrs.items() - if attr_val.valid_int_values - } - - self.step_size = { - attr: attr_val.step_size - for attr, attr_val in action_attrs.items() if attr_val.step_size - } - - # for action, attributes in action_attrs.items(): - # if (attributes.valid_int_values): - # self.valid_action_values = attributes.valid_int_values - - # for attr, val in action_attrs.items(): - # self.action_dtype = val.datatype - # break - - # if len(self.action_attrs) == 1: - # self.action_spec = array_spec.BoundedArraySpec( - # shape=(), - # dtype=np.float32, - # minimum=0, - # maximum=20, - # name='action', - # ) - # else: - # self.action_spec = array_spec.BoundedArraySpec( - # shape=(len(action_attrs),), - # dtype=np.float32, - # minimum=[0] * len(action_attrs), - # maximum=[20] * len(action_attrs), - # name='action', - # ) - - # self.reward_spec = array_spec.ArraySpec( - # shape=(), dtype=np.float32, name='reward') - # self.time_step_spec = ts.time_step_spec(self.observation_spec, - # self.reward_spec) - - # def tf_observation_spec(self) -> tf.TensorSpec: - # """Returns the TFAgents Tensor schema of the observation space.""" - # return tensor_spec.from_spec(self.observation_spec) - - # def tf_action_spec(self) -> tf.TensorSpec: - # """Returns the TFAgents Tensor schema of the action space.""" - # return tensor_spec.from_spec(self.action_spec) - - # def tf_time_step_spec(self) -> tf.TensorSpec: - # """Returns the TFAgents Tensor schema of the time step space.""" - # return tensor_spec.from_spec(self.time_step_spec) - - # def create_tf_observation(self, state: Dict[str, float]) -> np.ndarray: - # """Creates an observation vector from a state dict. - - # The values of the observation vector are in a canonical order and in the - # 0-1 range, whereas the values in the state dict are in the range specified - # by the user when this object was initialized. - - # Args: - # state: Maps each state attribute to its value. - - # Returns: - # The array that contains state attribute values, ordered as in - # self.state_attrs. - # """ - # return np.array([[(state[v] - self.state_min[v] / - # (self.state_max[v] - self.state_min[v])) - # for v in self.state_attrs]], - # dtype=np.float32) - - def create_user_action(self, action: np.ndarray) -> Dict[str, float]: - """Converts an action vector to a dictionary of actions for the user. + """The computation of a Sight Decision API episode and its meta-data.""" + + # The function that runs a single episode. + driver_fn: Callable[[Any], Any] + + # The names of the episode's state variables. + state_attrs: List[str] + + # Maps the name of each state variable to its index in state_attrs. + state_attr_to_idx: Dict[str, int] + + # Mapping from all the state variables to their minimum and maximum values. + state_min: Dict[str, float] + state_max: Dict[str, float] + + # datatype of state attrs + # state_dtype: None + + # The TFAgents schema of the observation space. + # observation_spec: array_spec.BoundedArraySpec + + # The names of the episode's action variables. + action_attrs: List[str] + + # Maps the name of each action variable to its index in action_attrs. + action_attr_to_idx: Dict[str, int] + + # Mapping from all the action variables to their minimum and maximum values. + action_min: Dict[str, float] + action_max: Dict[str, float] + + # datatype of action attrs + # action_dtype: None + + # possible valid values of action attrs + valid_action_values: Dict[str, int] + + # possible valid values of action attrs + step_size: Dict[str, int] + + # The TFAgents schema of the action space. + # action_spec: array_spec.BoundedArraySpec + + # The TFAgents schema of the rewards space. + # reward_spec: array_spec.ArraySpec + + # The TFAgents schema of the time steps space (includes observations and + # rewards). + # time_step_spec: ts.TimeStep + + def __init__( + self, + driver_fn: Callable[[Any], Any], + state_attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps], + action_attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps], + ): + self.driver_fn = driver_fn + + self.state_attrs = list(state_attrs.keys()) + self.state_attr_to_idx = {} + for i in range(len(self.state_attrs)): + self.state_attr_to_idx[self.state_attrs[i]] = i + self.state_min = { + attr: min_max.min_value for attr, min_max in state_attrs.items() + } + self.state_max = { + attr: min_max.max_value for attr, min_max in state_attrs.items() + } + + # for attr, val in state_attrs.items(): + # self.state_dtype = val.datatype + # break + + # self.observation_spec = array_spec.BoundedArraySpec( + # shape=(len(state_attrs),), + # dtype=np.float32, + # minimum=[0] * len(state_attrs), + # maximum=[1] * len(state_attrs), + # name='observation', + # ) + + self.action_attrs = list(action_attrs.keys()) + self.action_attr_to_idx = {} + for i in range(len(self.action_attrs)): + self.action_attr_to_idx[self.action_attrs[i]] = i + self.action_min = { + attr: min_max.min_value for attr, min_max in action_attrs.items() + } + self.action_max = { + attr: min_max.max_value for attr, min_max in action_attrs.items() + } + + self.valid_action_values = { + attr: attr_val.valid_int_values + for attr, attr_val in action_attrs.items() + if attr_val.valid_int_values + } + + self.step_size = { + attr: attr_val.step_size + for attr, attr_val in action_attrs.items() + if attr_val.step_size + } + + # for action, attributes in action_attrs.items(): + # if (attributes.valid_int_values): + # self.valid_action_values = attributes.valid_int_values + + # for attr, val in action_attrs.items(): + # self.action_dtype = val.datatype + # break + + # if len(self.action_attrs) == 1: + # self.action_spec = array_spec.BoundedArraySpec( + # shape=(), + # dtype=np.float32, + # minimum=0, + # maximum=20, + # name='action', + # ) + # else: + # self.action_spec = array_spec.BoundedArraySpec( + # shape=(len(action_attrs),), + # dtype=np.float32, + # minimum=[0] * len(action_attrs), + # maximum=[20] * len(action_attrs), + # name='action', + # ) + + # self.reward_spec = array_spec.ArraySpec( + # shape=(), dtype=np.float32, name='reward') + # self.time_step_spec = ts.time_step_spec(self.observation_spec, + # self.reward_spec) + + # def tf_observation_spec(self) -> tf.TensorSpec: + # """Returns the TFAgents Tensor schema of the observation space.""" + # return tensor_spec.from_spec(self.observation_spec) + + # def tf_action_spec(self) -> tf.TensorSpec: + # """Returns the TFAgents Tensor schema of the action space.""" + # return tensor_spec.from_spec(self.action_spec) + + # def tf_time_step_spec(self) -> tf.TensorSpec: + # """Returns the TFAgents Tensor schema of the time step space.""" + # return tensor_spec.from_spec(self.time_step_spec) + + # def create_tf_observation(self, state: Dict[str, float]) -> np.ndarray: + # """Creates an observation vector from a state dict. + + # The values of the observation vector are in a canonical order and in the + # 0-1 range, whereas the values in the state dict are in the range specified + # by the user when this object was initialized. + + # Args: + # state: Maps each state attribute to its value. + + # Returns: + # The array that contains state attribute values, ordered as in + # self.state_attrs. + # """ + # return np.array([[(state[v] - self.state_min[v] / + # (self.state_max[v] - self.state_min[v])) + # for v in self.state_attrs]], + # dtype=np.float32) + + def create_user_action(self, action: np.ndarray) -> Dict[str, float]: + """Converts an action vector to a dictionary of actions for the user. The actions dictionary maps the name of each action to its value in the range specified by the user when this object was initialized. The values @@ -211,19 +206,18 @@ def create_user_action(self, action: np.ndarray) -> Dict[str, float]: The array that contains action attribute values, ordered as in self.action_attrs. """ - if len(self.action_attrs) == 1: - action_list = [action.numpy()] - else: - action_list = list(action[0].numpy()) - action_dict = {} - for i in range(len(action_list)): - var = self.action_attrs[i] - action_dict[var] = (action_list[i] / 20) * ( - self.action_max[var] - - self.action_min[var]) + self.action_min[var] - - return action_dict - - def run(self, sight: Any) -> None: - """Generates a dataset for a single episode.""" - self.driver_fn(sight) + if len(self.action_attrs) == 1: + action_list = [action.numpy()] + else: + action_list = list(action[0].numpy()) + action_dict = {} + for i in range(len(action_list)): + var = self.action_attrs[i] + action_dict[var] = (action_list[i] / 20) * ( + self.action_max[var] - self.action_min[var]) + self.action_min[var] + + return action_dict + + def run(self, sight: Any) -> None: + """Generates a dataset for a single episode.""" + self.driver_fn(sight) diff --git a/py/sight/widgets/decision/env_driver.py b/py/sight/widgets/decision/env_driver.py index bb7478f..ad15262 100644 --- a/py/sight/widgets/decision/env_driver.py +++ b/py/sight/widgets/decision/env_driver.py @@ -14,7 +14,6 @@ """Default Driver function to be used while training within the Sight log.""" from helpers.logs.logs_handler import logger as logging - import numpy as np from sight import data_structures # from sight.sight import Sight @@ -24,33 +23,32 @@ def driver_fn(env, sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: env: The dm_env type env obcject used to call the reset and step methods. sight: The Sight logger object used to drive decisions. """ - method_name = 'driver_fn' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - - timestep = env.reset() + method_name = 'driver_fn' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - state_attrs = decision.get_state_attrs(sight) - for i in range(len(state_attrs)): - data_structures.log_var(state_attrs[i], timestep.observation[i], sight) + timestep = env.reset() - while not timestep.last(): - chosen_action = decision.decision_point("DP_label", sight) + state_attrs = decision.get_state_attrs(sight) + for i in range(len(state_attrs)): + data_structures.log_var(state_attrs[i], timestep.observation[i], sight) - timestep = env.step(chosen_action) + while not timestep.last(): + chosen_action = decision.decision_point("DP_label", sight) - for i in range(len(state_attrs)): - data_structures.log_var(state_attrs[i], timestep.observation[i], - sight) + timestep = env.step(chosen_action) - decision.decision_outcome( - "DO_label", - timestep.reward, - sight, - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + for i in range(len(state_attrs)): + data_structures.log_var(state_attrs[i], timestep.observation[i], sight) + + decision.decision_outcome( + "DO_label", + timestep.reward, + sight, + ) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/py/sight/widgets/decision/get_outcome.py b/py/sight/widgets/decision/get_outcome.py index 40bc43e..e062a31 100644 --- a/py/sight/widgets/decision/get_outcome.py +++ b/py/sight/widgets/decision/get_outcome.py @@ -18,9 +18,9 @@ from absl import app from absl import flags from helpers.logs.logs_handler import logger as logging +from sight import service_utils as service from sight_service.proto import service_pb2 from sight_service.proto import service_pb2_grpc -from sight import service_utils as service FLAGS = flags.FLAGS @@ -36,27 +36,27 @@ def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - request = service_pb2.GetOutcomeRequest() - request.client_id = str(FLAGS.log_id) - # request.unique_ids.append(1) - response = service.call( - lambda s, meta: s.GetOutcome(request, 300, metadata=meta)) + request = service_pb2.GetOutcomeRequest() + request.client_id = str(FLAGS.log_id) + # request.unique_ids.append(1) + response = service.call( + lambda s, meta: s.GetOutcome(request, 300, metadata=meta)) - if (response.response_str): - return response.response_str + if (response.response_str): + return response.response_str - outcome_list = [] - for outcome in response.outcome: - outcome_dict = {} - outcome_dict['reward'] = outcome.reward - outcome_dict['action'] = dict(outcome.action_attrs) - outcome_dict['outcome'] = dict(outcome.outcome_attrs) - outcome_list.append(outcome_dict) - return outcome_list + outcome_list = [] + for outcome in response.outcome: + outcome_dict = {} + outcome_dict['reward'] = outcome.reward + outcome_dict['action'] = dict(outcome.action_attrs) + outcome_dict['outcome'] = dict(outcome.outcome_attrs) + outcome_list.append(outcome_dict) + return outcome_list if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/listen.py b/py/sight/widgets/decision/listen.py index 3f22362..44fbfb1 100644 --- a/py/sight/widgets/decision/listen.py +++ b/py/sight/widgets/decision/listen.py @@ -21,14 +21,13 @@ from absl import app from absl import flags -from helpers.logs.logs_handler import logger as logging import grpc -from sight_service.proto import service_pb2 -from sight_service.proto import service_pb2_grpc +from helpers.logs.logs_handler import logger as logging from sight import service_utils as service from sight.proto import sight_pb2 from sight.service_utils import generate_metadata -import time +from sight_service.proto import service_pb2 +from sight_service.proto import service_pb2_grpc _LOG_ID = flags.DEFINE_string( 'log_id', None, 'ID of the Sight log that tracks this execution.') @@ -42,20 +41,19 @@ def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - while True: - req = service_pb2.ListenRequest() - req.client_id = _LOG_ID.value - response = service.call( - lambda s, meta: s.Listen(req, 300, metadata=meta)) - print('response=', response) - if response.response_ready: - print(response.response_str) - break - time.sleep(5) + while True: + req = service_pb2.ListenRequest() + req.client_id = _LOG_ID.value + response = service.call(lambda s, meta: s.Listen(req, 300, metadata=meta)) + print('response=', response) + if response.response_ready: + print(response.response_str) + break + time.sleep(5) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/llm_optimizer_client.py b/py/sight/widgets/decision/llm_optimizer_client.py index 49537df..907f497 100644 --- a/py/sight/widgets/decision/llm_optimizer_client.py +++ b/py/sight/widgets/decision/llm_optimizer_client.py @@ -13,79 +13,76 @@ # limitations under the License. """Client for LLM optimizer to communicate with server.""" +import time from typing import Optional, Sequence, Tuple from helpers.logs.logs_handler import logger as logging -from sight_service.proto import service_pb2 +from overrides import override from sight import service_utils as service from sight.proto import sight_pb2 from sight.widgets.decision.optimizer_client import OptimizerClient -from overrides import override -import time +from sight_service.proto import service_pb2 class LLMOptimizerClient(OptimizerClient): - """LLM client for the Sight service.""" + """LLM client for the Sight service.""" - def __init__(self, llm_name: str, description: str, sight): - super().__init__( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_LLM) - if llm_name.startswith('text_bison'): - self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_TEXT_BISON - elif llm_name.startswith('chat_bison'): - self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON - elif llm_name.startswith('gemini_pro'): - self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO - else: - raise ValueError(f'Unknown LLM Algorithm {llm_name}') + def __init__(self, llm_name: str, description: str, sight): + super().__init__(sight_pb2.DecisionConfigurationStart.OptimizerType.OT_LLM) + if llm_name.startswith('text_bison'): + self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_TEXT_BISON + elif llm_name.startswith('chat_bison'): + self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON + elif llm_name.startswith('gemini_pro'): + self._algorithm = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO + else: + raise ValueError(f'Unknown LLM Algorithm {llm_name}') - if llm_name.endswith('_optimize'): - self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_OPTIMIZE - elif llm_name.endswith('_recommend'): - self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_RECOMMEND - elif llm_name.endswith('_interactive'): - self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE - else: - raise ValueError(f'Unknown LLM Goal {llm_name}') + if llm_name.endswith('_optimize'): + self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_OPTIMIZE + elif llm_name.endswith('_recommend'): + self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_RECOMMEND + elif llm_name.endswith('_interactive'): + self._goal = sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE + else: + raise ValueError(f'Unknown LLM Goal {llm_name}') - self._description = description + self._description = description - self._sight = sight - self._worker_id = None + self._sight = sight + self._worker_id = None - @override - def create_config( - self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: - choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig() - llm_config = sight_pb2.DecisionConfigurationStart.LLMConfig( - algorithm=self._algorithm, - goal=self._goal, - description=self._description) - choice_config.llm_config.CopyFrom(llm_config) - return choice_config + @override + def create_config(self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: + choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig() + llm_config = sight_pb2.DecisionConfigurationStart.LLMConfig( + algorithm=self._algorithm, + goal=self._goal, + description=self._description) + choice_config.llm_config.CopyFrom(llm_config) + return choice_config - @override - def decision_point(self, sight, request: service_pb2.DecisionPointRequest): - for key, value in sight.widget_decision_state["state"].items(): - param = request.decision_point.state_params.add() - param.key = key - param.value.sub_type - param.value.double_value = value + @override + def decision_point(self, sight, request: service_pb2.DecisionPointRequest): + for key, value in sight.widget_decision_state["state"].items(): + param = request.decision_point.state_params.add() + param.key = key + param.value.sub_type + param.value.double_value = value - while True: - response = service.call( - lambda s, meta: s.DecisionPoint(request, 300, metadata=meta)) - logging.info('decision_point() response=%s' % response) - if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_ACT: - return self._get_dp_action(response) - if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_RETRY: - time.sleep(5) + while True: + response = service.call( + lambda s, meta: s.DecisionPoint(request, 300, metadata=meta)) + logging.info('decision_point() response=%s' % response) + if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_ACT: + return self._get_dp_action(response) + if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_RETRY: + time.sleep(5) - @override - def finalize_episode(self, sight, - request: service_pb2.FinalizeEpisodeRequest): - logging.info('LLMOptimizerClient() finalize_episode, request=%s', - request) - response = service.call( - lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta)) - return response + @override + def finalize_episode(self, sight, + request: service_pb2.FinalizeEpisodeRequest): + logging.info('LLMOptimizerClient() finalize_episode, request=%s', request) + response = service.call( + lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta)) + return response diff --git a/py/sight/widgets/decision/optimizer_client.py b/py/sight/widgets/decision/optimizer_client.py index e5f287d..da0a209 100644 --- a/py/sight/widgets/decision/optimizer_client.py +++ b/py/sight/widgets/decision/optimizer_client.py @@ -11,22 +11,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Base implementation of optimizer clients that communicate with the decision service.""" from typing import Any, Dict, Optional, Sequence, Tuple -from sight_service.proto import service_pb2 from sight import service_utils as service from sight.proto import sight_pb2 +from sight_service.proto import service_pb2 + class OptimizerClient: """Generic optimizer for the Sight Decision service.""" - def __init__(self, optimizer_type: sight_pb2.DecisionConfigurationStart.OptimizerType): + def __init__( + self, optimizer_type: sight_pb2.DecisionConfigurationStart.OptimizerType): self._optimizer_type = optimizer_type - def optimizer_type(self) -> sight_pb2.DecisionConfigurationStart.OptimizerType: + def optimizer_type( + self) -> sight_pb2.DecisionConfigurationStart.OptimizerType: return self._optimizer_type def create_config(self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: @@ -34,35 +36,40 @@ def create_config(self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: def decision_point(self, sight, request: service_pb2.DecisionPointRequest): response = service.call( - lambda s, meta: s.DecisionPoint(request, 300, metadata=meta) - ) + lambda s, meta: s.DecisionPoint(request, 300, metadata=meta)) return self._get_dp_action(response) - def _get_dp_action(self, dp_response: service_pb2.DecisionPointResponse) -> Dict[str, Any]: + def _get_dp_action( + self, dp_response: service_pb2.DecisionPointResponse) -> Dict[str, Any]: """Returns the dict representation of the action encoded in dp_response.""" d = {} for a in dp_response.action: - if(a.value.sub_type == sight_pb2.Value.ST_DOUBLE): + if (a.value.sub_type == sight_pb2.Value.ST_DOUBLE): d[a.key] = a.value.double_value - elif(a.value.sub_type == sight_pb2.Value.ST_STRING): + elif (a.value.sub_type == sight_pb2.Value.ST_STRING): d[a.key] = a.value.string_value else: raise ValueError("not supported type!!") return d - def _set_dp_action(self, dp: sight_pb2.DecisionPoint, action: Dict[str, Any]) -> None: + def _set_dp_action(self, dp: sight_pb2.DecisionPoint, + action: Dict[str, Any]) -> None: """Add to dp the attributes of action.""" for key, val in action.items(): - if(isinstance(val,str)): - dp.value.add(sight_pb2.DecisionParam(key=key, value=sight_pb2.Value(string_value=val))) - elif(isinstance(val,float)): - dp.value.add(sight_pb2.DecisionParam(key=key, value=sight_pb2.Value(double_value=val))) + if (isinstance(val, str)): + dp.value.add( + sight_pb2.DecisionParam(key=key, + value=sight_pb2.Value(string_value=val))) + elif (isinstance(val, float)): + dp.value.add( + sight_pb2.DecisionParam(key=key, + value=sight_pb2.Value(double_value=val))) else: raise ValueError("not supported type!!") - def finalize_episode(self, sight, request: service_pb2.FinalizeEpisodeRequest): + def finalize_episode(self, sight, + request: service_pb2.FinalizeEpisodeRequest): response = service.call( - lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta) - ) + lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta)) return response diff --git a/py/sight/widgets/decision/proposal.py b/py/sight/widgets/decision/proposal.py index 900f685..3ece0c9 100644 --- a/py/sight/widgets/decision/proposal.py +++ b/py/sight/widgets/decision/proposal.py @@ -13,21 +13,23 @@ # limitations under the License. import asyncio +import json + +from absl import flags +from helpers.cache.cache_factory import CacheFactory +from helpers.cache.cache_helper import CacheConfig +from helpers.cache.cache_helper import CacheKeyMaker +from helpers.cache.cache_interface import CacheInterface from sight.attribute import Attribute from sight.block import Block -from absl import flags from sight.proto import sight_pb2 from sight.sight import Sight from sight.widgets.decision import decision from sight.widgets.decision import trials -from sight.widgets.decision.single_action_optimizer_client import ( - SingleActionOptimizerClient) from sight.widgets.decision.resource_lock import RWLockDictWrapper - -from helpers.cache.cache_helper import CacheKeyMaker, CacheConfig -from helpers.cache.cache_factory import CacheFactory -from helpers.cache.cache_interface import CacheInterface -import json +from sight.widgets.decision.single_action_optimizer_client import ( + SingleActionOptimizerClient +) _CACHE_MODE = flags.DEFINE_enum( 'cache_mode', 'none', @@ -40,57 +42,57 @@ async def push_message(sight_id, action_id): - try: - global_outcome_mapping.set_for_key(action_id, None) - except Exception as e: - print(f'Exception => {e}') - raise e + try: + global_outcome_mapping.set_for_key(action_id, None) + except Exception as e: + print(f'Exception => {e}') + raise e async def fetch_outcome(sight_id, actions_id): - while True: - try: - outcome = global_outcome_mapping.get_for_key(actions_id) - if outcome: - return outcome - else: - # async_dict = global_outcome_mapping.get() - # print(f'GLOBAL_MAPPING_GET_OUTCOME_QUEUE => {async_dict}') - time = 5 - # print(f'Waiting for {actions_id} for {time} seconds...') - await asyncio.sleep(time) - except Exception as e: - raise e + while True: + try: + outcome = global_outcome_mapping.get_for_key(actions_id) + if outcome: + return outcome + else: + # async_dict = global_outcome_mapping.get() + # print(f'GLOBAL_MAPPING_GET_OUTCOME_QUEUE => {async_dict}') + time = 5 + # print(f'Waiting for {actions_id} for {time} seconds...') + await asyncio.sleep(time) + except Exception as e: + raise e async def propose_actions(sight, action_dict, custom_part="sight_cache"): - key_maker = CacheKeyMaker() - cache_key = key_maker.make_custom_key(custom_part, action_dict) - - cache_client = CacheFactory.get_cache( - _CACHE_MODE.value, - with_redis=CacheConfig.get_redis_instance(_CACHE_MODE.value)) + key_maker = CacheKeyMaker() + cache_key = key_maker.make_custom_key(custom_part, action_dict) - outcome = cache_client.json_get(key=cache_key) + cache_client = CacheFactory.get_cache( + _CACHE_MODE.value, + with_redis=CacheConfig.get_redis_instance(_CACHE_MODE.value)) - if outcome is not None: - print('Getting response from cache !!') - return outcome + outcome = cache_client.json_get(key=cache_key) - unique_action_id = decision.propose_actions(sight, action_dict) - await push_message(sight.id, unique_action_id) - response = await fetch_outcome(sight.id, unique_action_id) - outcome = response.get('outcome', None) - if response is None or outcome is None: - raise Exception('fetch_outcome response or respose["outcome"] is none') - # converting the stringify data into json data if it can - for key in outcome: - value = outcome[key] - try: - final_value = json.loads(value) - except (json.JSONDecodeError,TypeError): - final_value = value - outcome[key] = final_value - cache_client.json_set(key=cache_key, value=outcome) + if outcome is not None: + print('Getting response from cache !!') return outcome + + unique_action_id = decision.propose_actions(sight, action_dict) + await push_message(sight.id, unique_action_id) + response = await fetch_outcome(sight.id, unique_action_id) + outcome = response.get('outcome', None) + if response is None or outcome is None: + raise Exception('fetch_outcome response or respose["outcome"] is none') + # converting the stringify data into json data if it can + for key in outcome: + value = outcome[key] + try: + final_value = json.loads(value) + except (json.JSONDecodeError, TypeError): + final_value = value + outcome[key] = final_value + cache_client.json_set(key=cache_key, value=outcome) + return outcome diff --git a/py/sight/widgets/decision/resource_lock.py b/py/sight/widgets/decision/resource_lock.py index a38e5df..dd70fdd 100644 --- a/py/sight/widgets/decision/resource_lock.py +++ b/py/sight/widgets/decision/resource_lock.py @@ -1,29 +1,28 @@ from readerwriterlock import rwlock -class RWLockDictWrapper: - # Shared with every instance - __shared_state = {} +class RWLockDictWrapper: - def __init__(self): - self.__dict__ = self.__shared_state - self.lock = rwlock.RWLockWrite() - self.resource = {} + # Shared with every instance + __shared_state = {} - def get(self): - with self.lock.gen_rlock(): - return self.resource + def __init__(self): + self.__dict__ = self.__shared_state + self.lock = rwlock.RWLockWrite() + self.resource = {} - def get_for_key(self, key): - with self.lock.gen_rlock(): - return self.resource.get(key,None) + def get(self): + with self.lock.gen_rlock(): + return self.resource - def set_for_key(self, key, value): - with self.lock.gen_wlock(): - self.resource[key] = value + def get_for_key(self, key): + with self.lock.gen_rlock(): + return self.resource.get(key, None) - def update(self, mapping): - with self.lock.gen_wlock(): - self.resource.update(mapping) + def set_for_key(self, key, value): + with self.lock.gen_wlock(): + self.resource[key] = value - \ No newline at end of file + def update(self, mapping): + with self.lock.gen_wlock(): + self.resource.update(mapping) diff --git a/py/sight/widgets/decision/shower_env_driver.py b/py/sight/widgets/decision/shower_env_driver.py index 062a548..b180f66 100644 --- a/py/sight/widgets/decision/shower_env_driver.py +++ b/py/sight/widgets/decision/shower_env_driver.py @@ -13,8 +13,9 @@ # limitations under the License. """Demo of Drivier function to be used in case Sight used without any environment.""" -from helpers.logs.logs_handler import logger as logging import random + +from helpers.logs.logs_handler import logger as logging import numpy as np from sight import data_structures from sight.sight import Sight @@ -24,43 +25,42 @@ def driver_fn(sight: Sight) -> None: - """Executes the logic of searching for a value. + """Executes the logic of searching for a value. Args: sight: The Sight logger object used to drive decisions. """ - method_name = 'driver_fn' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + method_name = 'driver_fn' + logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - logging.info('sight.widget_decision_state : %s', - sight.widget_decision_state) + logging.info('sight.widget_decision_state : %s', sight.widget_decision_state) - temperature = 38 + random.randint(-3, 3) - shower_length = 60 - data_structures.log_var("Temperature", temperature, sight) + temperature = 38 + random.randint(-3, 3) + shower_length = 60 + data_structures.log_var("Temperature", temperature, sight) - for _ in range(shower_length): - # Ask Sight's optimizer for the action to perform. - chosen_action = decision.decision_point("DP_label", sight) - # direction = np.array(chosen_action["Direction"], dtype=np.int64) + for _ in range(shower_length): + # Ask Sight's optimizer for the action to perform. + chosen_action = decision.decision_point("DP_label", sight) + # direction = np.array(chosen_action["Direction"], dtype=np.int64) - # Change temperature based on the Sight-recommended direction. - temperature += chosen_action["Direction"] - logging.info('temperature=%s, direction=%s', temperature, - chosen_action["Direction"]) - data_structures.log_var("Temperature", temperature, sight) + # Change temperature based on the Sight-recommended direction. + temperature += chosen_action["Direction"] + logging.info('temperature=%s, direction=%s', temperature, + chosen_action["Direction"]) + data_structures.log_var("Temperature", temperature, sight) - # Calculate reward based on whether the temperature target has - # been achieved. - if temperature >= 37 and temperature <= 39: - current_reward = 1 - else: - current_reward = -abs(temperature - 38) + # Calculate reward based on whether the temperature target has + # been achieved. + if temperature >= 37 and temperature <= 39: + current_reward = 1 + else: + current_reward = -abs(temperature - 38) - # Inform Sight of the outcome of the recommended action. - decision.decision_outcome( - "DO_label", - current_reward, - sight, - ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + # Inform Sight of the outcome of the recommended action. + decision.decision_outcome( + "DO_label", + current_reward, + sight, + ) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/py/sight/widgets/decision/single_action_optimizer_client.py b/py/sight/widgets/decision/single_action_optimizer_client.py index dc38316..d57ea6f 100644 --- a/py/sight/widgets/decision/single_action_optimizer_client.py +++ b/py/sight/widgets/decision/single_action_optimizer_client.py @@ -11,27 +11,31 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Client for optimizers that are called once per episode to communicate with server.""" -from helpers.logs.logs_handler import logger as logging +import time from typing import Optional, Sequence, Tuple -from sight_service.proto import service_pb2 + +from helpers.logs.logs_handler import logger as logging +from overrides import override from sight import service_utils as service from sight.proto import sight_pb2 from sight.widgets.decision.optimizer_client import OptimizerClient -from overrides import override -import time +from sight_service.proto import service_pb2 class SingleActionOptimizerClient(OptimizerClient): """Single-action Client for the Sight service.""" - def __init__(self, optimizer_type: sight_pb2.DecisionConfigurationStart.OptimizerType, sight, algorithm=None): + def __init__( + self, + optimizer_type: sight_pb2.DecisionConfigurationStart.OptimizerType, + sight, + algorithm=None): super().__init__(optimizer_type) self._sight = sight self._last_action = None self.exp_completed = False - if(algorithm == None): + if (algorithm == None): self._algorithm = algorithm elif algorithm == 'auto': self._algorithm = sight_pb2.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm.NG_AUTO @@ -70,42 +74,39 @@ def __init__(self, optimizer_type: sight_pb2.DecisionConfigurationStart.Optimize @override def create_config(self) -> sight_pb2.DecisionConfigurationStart.ChoiceConfig: - choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig( - ) - if(self._algorithm): + choice_config = sight_pb2.DecisionConfigurationStart.ChoiceConfig() + if (self._algorithm): ng_config = sight_pb2.DecisionConfigurationStart.NeverGradConfig( - algorithm=self._algorithm - ) + algorithm=self._algorithm) choice_config.never_grad_config.CopyFrom(ng_config) return choice_config @override def decision_point(self, sight, request: service_pb2.DecisionPointRequest): # while True: - response = service.call( - lambda s, meta: s.DecisionPoint(request, 300, metadata=meta) - ) - logging.info('response: %s', response) - if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_ACT: - self._last_action = response.action - return self._get_dp_action(response) - # elif response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_DONE: - # self.exp_completed = True - # return None - # elif response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_RETRY: - # print('waiting in decision point to get server from response......') - # logging.info('sleeping for 5 seconds......') - # time.sleep(5) - else: - raise ValueError("No action received from server") + response = service.call( + lambda s, meta: s.DecisionPoint(request, 300, metadata=meta)) + logging.info('response: %s', response) + if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_ACT: + self._last_action = response.action + return self._get_dp_action(response) + # elif response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_DONE: + # self.exp_completed = True + # return None + # elif response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_RETRY: + # print('waiting in decision point to get server from response......') + # logging.info('sleeping for 5 seconds......') + # time.sleep(5) + else: + raise ValueError("No action received from server") @override - def finalize_episode(self, sight, request: service_pb2.FinalizeEpisodeRequest): + def finalize_episode(self, sight, + request: service_pb2.FinalizeEpisodeRequest): # logging.info('SingleActionOptimizerClient() finalize_episode') if self._last_action: for a in self._last_action: request.decision_point.choice_params.append(a) response = service.call( - lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta) - ) + lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta)) return response diff --git a/py/sight/widgets/decision/tell.py b/py/sight/widgets/decision/tell.py index ab21023..4fe9adf 100644 --- a/py/sight/widgets/decision/tell.py +++ b/py/sight/widgets/decision/tell.py @@ -21,14 +21,13 @@ from absl import app from absl import flags -from helpers.logs.logs_handler import logger as logging import grpc -from sight_service.proto import service_pb2 -from sight_service.proto import service_pb2_grpc +from helpers.logs.logs_handler import logger as logging from sight import service_utils as service - from sight.proto import sight_pb2 from sight.service_utils import generate_metadata +from sight_service.proto import service_pb2 +from sight_service.proto import service_pb2_grpc _LOG_ID = flags.DEFINE_string( 'log_id', None, 'ID of the Sight log that tracks this execution.') @@ -43,15 +42,15 @@ def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError("Too many command-line arguments.") + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") - req = service_pb2.TellRequest() - req.client_id = _LOG_ID.value - req.message_str = _TELL.value - response = service.call(lambda s, meta: s.Tell(req, 300, metadata=meta)) - print(response.response_str) + req = service_pb2.TellRequest() + req.client_id = _LOG_ID.value + req.message_str = _TELL.value + response = service.call(lambda s, meta: s.Tell(req, 300, metadata=meta)) + print(response.response_str) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/py/sight/widgets/decision/trials.py b/py/sight/widgets/decision/trials.py index 6d9c6e5..3b089f0 100644 --- a/py/sight/widgets/decision/trials.py +++ b/py/sight/widgets/decision/trials.py @@ -19,21 +19,19 @@ import random import subprocess import time -import pytz from typing import Any, Dict, Optional from absl import flags -from helpers.logs.logs_handler import logger as logging -import grpc -from sight_service.proto import service_pb2 - from dotenv import load_dotenv +import grpc +from helpers.logs.logs_handler import logger as logging +import pytz from sight import service_utils as service - from sight.proto import sight_pb2 +from sight.widgets.decision import decision # from sight.widgets.decision.acme import acme_optimizer_client from sight.widgets.decision.optimizer_client import OptimizerClient -from sight.widgets.decision import decision +from sight_service.proto import service_pb2 load_dotenv() @@ -71,12 +69,12 @@ def _get_experiment_name(sight: Any) -> str: - if _EXPERIMENT_NAME.value: - return _EXPERIMENT_NAME.value - else: - return ('Sight_Decision_Study_' + - sight.params.label.replace(' ', '_') + '_' + str(sight.id) + - '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) + if _EXPERIMENT_NAME.value: + return _EXPERIMENT_NAME.value + else: + return ('Sight_Decision_Study_' + sight.params.label.replace(' ', '_') + + '_' + str(sight.id) + '_' + + datetime.now().strftime('%Y%m%d_%H%M%S')) def launch( @@ -89,7 +87,7 @@ def launch( num_train_workers: int, sight: Any, ): - """Launches the experiment with the service. + """Launches the experiment with the service. Args: optimizer_type: Type of optimizer we are using. @@ -99,41 +97,39 @@ def launch( num_train_workers: numbers of workers to be spawned sight: The Sight object to be used for logging. """ - method_name = 'launch' - logging.debug('>>>>>>>>> In %s method of %s file.', method_name, - _file_name) + method_name = 'launch' + logging.debug('>>>>>>>>> In %s method of %s file.', method_name, _file_name) - req = service_pb2.LaunchRequest() + req = service_pb2.LaunchRequest() - # config_param = sight_pb2.DecisionConfigurationStart() - # for key, attr in action_attrs.items(): - # config_param.action_attrs[key].CopyFrom(attr) - # for key, attr in state_attrs.items(): - # config_param.state_attrs[key].CopyFrom(attr) - req.decision_config_params.CopyFrom(decision_configuration) + # config_param = sight_pb2.DecisionConfigurationStart() + # for key, attr in action_attrs.items(): + # config_param.action_attrs[key].CopyFrom(attr) + # for key, attr in state_attrs.items(): + # config_param.state_attrs[key].CopyFrom(attr) + req.decision_config_params.CopyFrom(decision_configuration) - req.label = sight.params.label - req.client_id = str(sight.id) + req.label = sight.params.label + req.client_id = str(sight.id) - response = service.call(lambda s, meta: s.Launch(req, 300, metadata=meta)) - # start polling thread, fetching outcome from server for proposed actions - if (decision_configuration.optimizer_type == sight_pb2. - DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER - and response.display_string == "Worklist Scheduler SUCCESS!"): - decision.init_sight_polling_thread(sight.id) - logging.info('##### Launch response=%s #####', response) + response = service.call(lambda s, meta: s.Launch(req, 300, metadata=meta)) + # start polling thread, fetching outcome from server for proposed actions + if (decision_configuration.optimizer_type == sight_pb2. + DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER and + response.display_string == "Worklist Scheduler SUCCESS!"): + decision.init_sight_polling_thread(sight.id) + logging.info('##### Launch response=%s #####', response) - logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, - _file_name) + logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) def append_ist_time_to_logging_path_12hr(): - # Define IST timezone - ist = pytz.timezone('Asia/Kolkata') - # Get the current date and time in IST - current_time = datetime.now(ist) - formatted_time = current_time.strftime('%Y-%m-%d-%I-%M-%S') - return formatted_time + # Define IST timezone + ist = pytz.timezone('Asia/Kolkata') + # Get the current date and time in IST + current_time = datetime.now(ist) + formatted_time = current_time.strftime('%Y-%m-%d-%I-%M-%S') + return formatted_time def start_job_in_docker( @@ -147,7 +143,7 @@ def start_job_in_docker( decision_params: str, sight: Any, ): - """Starts a single worker in a docker container. + """Starts a single worker in a docker container. Args: num_trials: The number of times the experiment will be run during training. @@ -160,69 +156,67 @@ def start_job_in_docker( decision_params: add sight: The Sight object to be used for logging. """ - method_name = 'start_job_in_docker' - logging.debug('>>>>>>>>> In %s method of %s file.', method_name, - _file_name) - - sight.enter_block('Worker Spawning', sight_pb2.Object()) - # Write the script that will execute the binary within the docker container. - decision_params_arg = (f' --decision_params={decision_params}' - if decision_params else '') - os.makedirs('/tmp/sight_script', exist_ok=True) - with open('/tmp/sight_script/sight_decision_command.sh', 'w') as f: - f.write('#!/bin/bash\n') - f.write('echo "$PYTHONPATH"') - f.write( - '/usr/bin/python3' - f' /project/{binary_path.split("/")[-1]} --decision_mode={decision_mode} --deployment_mode={deployment_mode}' - f' --worker_mode={worker_mode} --optimizer_type={optimizer_type} --num_trials={num_trials} ' - ) - if FLAGS.service_account: - f.write(f' --service_account={FLAGS.service_account}') - f.write(f' {decision_params_arg}\n ') - os.chmod('/tmp/sight_script/sight_decision_command.sh', 0o755) - subprocess.run(['cp', binary_path, '/tmp'], check=True) - - args = [ - 'docker', - 'run', - '-v', - f'/tmp/{binary_path.split("/")[-1]}:/project/{binary_path.split("/")[-1]}:ro', - '-v', - '/tmp/sight_script:/project/sight_script:ro', - '-v', - # f'{os.path.expanduser("~")}/.config/gcloud:/project/.config/gcloud:ro', - f'{FLAGS.gcloud_dir_path}:/project/.config/gcloud:ro', - '--env', - 'GOOGLE_APPLICATION_CREDENTIALS=/project/.config/gcloud/application_default_credentials.json', - # '--env', - # 'PYTHONPATH=/project', - '--env', - f'GOOGLE_CLOUD_PROJECT={_PROJECT_ID.value}', - '--env', - f'PARENT_LOG_ID={sight.id}', - '--env', - f'SIGHT_SERVICE_ID={service._SERVICE_ID}', - # '--env', - # f'SIGHT_SERVICE_ACCOUNT={_SERVICE_ACCOUNT.value}', - '--env', - f'worker_location={sight.location.get()}', - '--env', - f'num_samples={num_trials}', - '--net=host', - '-t', - '-i', - '--rm', - docker_image, - '/project/sight_script/sight_decision_command.sh', - # 'bash', - ] - logging.info('DOCKER CONTAINER SPAWNING =%s', ' '.join(args)) - subprocess.run(args, check=True) - - sight.exit_block('Worker Spawning', sight_pb2.Object()) - logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, - _file_name) + method_name = 'start_job_in_docker' + logging.debug('>>>>>>>>> In %s method of %s file.', method_name, _file_name) + + sight.enter_block('Worker Spawning', sight_pb2.Object()) + # Write the script that will execute the binary within the docker container. + decision_params_arg = (f' --decision_params={decision_params}' + if decision_params else '') + os.makedirs('/tmp/sight_script', exist_ok=True) + with open('/tmp/sight_script/sight_decision_command.sh', 'w') as f: + f.write('#!/bin/bash\n') + f.write('echo "$PYTHONPATH"') + f.write( + '/usr/bin/python3' + f' /project/{binary_path.split("/")[-1]} --decision_mode={decision_mode} --deployment_mode={deployment_mode}' + f' --worker_mode={worker_mode} --optimizer_type={optimizer_type} --num_trials={num_trials} ' + ) + if FLAGS.service_account: + f.write(f' --service_account={FLAGS.service_account}') + f.write(f' {decision_params_arg}\n ') + os.chmod('/tmp/sight_script/sight_decision_command.sh', 0o755) + subprocess.run(['cp', binary_path, '/tmp'], check=True) + + args = [ + 'docker', + 'run', + '-v', + f'/tmp/{binary_path.split("/")[-1]}:/project/{binary_path.split("/")[-1]}:ro', + '-v', + '/tmp/sight_script:/project/sight_script:ro', + '-v', + # f'{os.path.expanduser("~")}/.config/gcloud:/project/.config/gcloud:ro', + f'{FLAGS.gcloud_dir_path}:/project/.config/gcloud:ro', + '--env', + 'GOOGLE_APPLICATION_CREDENTIALS=/project/.config/gcloud/application_default_credentials.json', + # '--env', + # 'PYTHONPATH=/project', + '--env', + f'GOOGLE_CLOUD_PROJECT={_PROJECT_ID.value}', + '--env', + f'PARENT_LOG_ID={sight.id}', + '--env', + f'SIGHT_SERVICE_ID={service._SERVICE_ID}', + # '--env', + # f'SIGHT_SERVICE_ACCOUNT={_SERVICE_ACCOUNT.value}', + '--env', + f'worker_location={sight.location.get()}', + '--env', + f'num_samples={num_trials}', + '--net=host', + '-t', + '-i', + '--rm', + docker_image, + '/project/sight_script/sight_decision_command.sh', + # 'bash', + ] + logging.info('DOCKER CONTAINER SPAWNING =%s', ' '.join(args)) + subprocess.run(args, check=True) + + sight.exit_block('Worker Spawning', sight_pb2.Object()) + logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) def start_jobs( @@ -236,7 +230,7 @@ def start_jobs( worker_mode: str, sight: Any, ): - """Starts the dsub workers that will run the optimization. + """Starts the dsub workers that will run the optimization. Args: num_train_workers: Number of workers to use in a training run. @@ -249,105 +243,101 @@ def start_jobs( worker_mode: add sight: The Sight object to be used for logging. """ - method_name = 'start_jobs' - logging.debug('>>>>>>>>> In %s method of %s file.', method_name, - _file_name) - - sight.enter_block('Worker Spawning', sight_pb2.Object()) - with open('/tmp/optimization_tasks.tsv', 'w') as outf: - outf.write('--env worker_id\t--env worker_location\n') - # num_tasks_per_worker = math.floor(num_trials / num_train_workers) - for worker_id in range(num_train_workers): - # tasks_for_cur_worker = num_tasks_per_worker - # # If _NUM_TRIALS is not evenly divisible by num_train_workers, add - # # the extra extra tasks to the first few workers. - # if worker_id < num_trials % num_train_workers: - # tasks_for_cur_worker += 1 - outf.write(f'{worker_id}\t{sight.location.get()}\n') - sight.location.get().next() - - remote_script = ( - # 'gs://dsub_cameltrain/cameltrain/' + binary_path.split('/')[-1] - f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/binary/{str(sight.id)}/' - + binary_path.split('/')[-1]) - print(f'Uploading {binary_path}...') - subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], - check=True) - - if not FLAGS.service_account: - raise ValueError( - 'flag --service_account required for worker_mode as dsub_cloud_worker.' - ) - - # provider = 'local' if deployment_mode == 'local' else 'google-cls-v2' - - # cd /x-sight && - command = ( - 'ls -l && echo "${SCRIPT}" && echo "${PYTHONPATH}" && python3 "${SCRIPT}"' - + f' --decision_mode={decision_mode}' + - f' --deployment_mode={deployment_mode}' + - f' --worker_mode={worker_mode}' + f' --optimizer_type={optimizer_type}' - # + f' --project_id={os.environ["PROJECT_ID"]}' - ) - if FLAGS.env_name: - command += f' --env_name={FLAGS.env_name}' - - logging_path = f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/logs/{sight.params.label}/{append_ist_time_to_logging_path_12hr()}/' - if (FLAGS.parent_id): - logging_path += f'{FLAGS.parent_id}/' - logging_path += str(sight.id) - - env_vars = [ - '--env', f'PARENT_LOG_ID={sight.id}', '--env', - f'PORT={service.get_port_number()}' - ] - - print("FLAGS.deployment_mode : ", FLAGS.deployment_mode) - if FLAGS.deployment_mode == 'vm': - if FLAGS.ip_addr == 'localhost': - raise ValueError("ip_address must be provided for workers") - env_vars += ['--env', f'IP_ADDR={FLAGS.ip_addr}'] - elif FLAGS.deployment_mode == 'distributed': - env_vars += ['--env', f'SIGHT_SERVICE_ID={service._SERVICE_ID}'] - - print('sight.id=%s' % sight.id) - args = [ - 'dsub', - '--provider=google-cls-v2', - f'--regions={_PROJECT_REGION.value}', - '--use-private-address', - # f'--location={_PROJECT_REGION.value}', - f'--image={docker_image}', - f'--machine-type={_DSUB_MACHINE_TYPE.value}', - f'--project={_PROJECT_ID.value}', - # f'--logging=gs://{os.environ["PROJECT_ID"]}-sight/d-sub/logs/{service._SERVICE_ID}/{sight.id}', - f'--logging={logging_path}', - # '--env', - # f'PARENT_LOG_ID={sight.id}', - # '--env', - # f'SIGHT_SERVICE_ID={service._SERVICE_ID}', - # '--env', - # f'PORT_NUMBER={service.get_port_number()}', - *env_vars, - '--input', - f'SCRIPT={remote_script}', - f'--command={command}', - f'--service-account={FLAGS.service_account}@{os.environ["PROJECT_ID"]}.iam.gserviceaccount.com', - f'--boot-disk-size={_DSUB_BOOT_DISK_SIZE.value}', - '--tasks', - '/tmp/optimization_tasks.tsv', - '--name', - _get_experiment_name(sight)[:63], - ] - - logging.info('CLI=%s', ' '.join(args)) - subprocess.run(args, check=True) - - sight.exit_block('Worker Spawning', sight_pb2.Object()) - logging.info('worker logs available at : %s', - f'gs://{os.environ["PROJECT_ID"]}/d-sub/logs/default') - logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, - _file_name) + method_name = 'start_jobs' + logging.debug('>>>>>>>>> In %s method of %s file.', method_name, _file_name) + + sight.enter_block('Worker Spawning', sight_pb2.Object()) + with open('/tmp/optimization_tasks.tsv', 'w') as outf: + outf.write('--env worker_id\t--env worker_location\n') + # num_tasks_per_worker = math.floor(num_trials / num_train_workers) + for worker_id in range(num_train_workers): + # tasks_for_cur_worker = num_tasks_per_worker + # # If _NUM_TRIALS is not evenly divisible by num_train_workers, add + # # the extra extra tasks to the first few workers. + # if worker_id < num_trials % num_train_workers: + # tasks_for_cur_worker += 1 + outf.write(f'{worker_id}\t{sight.location.get()}\n') + sight.location.get().next() + + remote_script = ( + # 'gs://dsub_cameltrain/cameltrain/' + binary_path.split('/')[-1] + f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/binary/{str(sight.id)}/' + + binary_path.split('/')[-1]) + print(f'Uploading {binary_path}...') + subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], check=True) + + if not FLAGS.service_account: + raise ValueError( + 'flag --service_account required for worker_mode as dsub_cloud_worker.') + + # provider = 'local' if deployment_mode == 'local' else 'google-cls-v2' + + # cd /x-sight && + command = ( + 'ls -l && echo "${SCRIPT}" && echo "${PYTHONPATH}" && python3 "${SCRIPT}"' + + f' --decision_mode={decision_mode}' + + f' --deployment_mode={deployment_mode}' + + f' --worker_mode={worker_mode}' + f' --optimizer_type={optimizer_type}' + # + f' --project_id={os.environ["PROJECT_ID"]}' + ) + if FLAGS.env_name: + command += f' --env_name={FLAGS.env_name}' + + logging_path = f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/logs/{sight.params.label}/{append_ist_time_to_logging_path_12hr()}/' + if (FLAGS.parent_id): + logging_path += f'{FLAGS.parent_id}/' + logging_path += str(sight.id) + + env_vars = [ + '--env', f'PARENT_LOG_ID={sight.id}', '--env', + f'PORT={service.get_port_number()}' + ] + + print("FLAGS.deployment_mode : ", FLAGS.deployment_mode) + if FLAGS.deployment_mode == 'vm': + if FLAGS.ip_addr == 'localhost': + raise ValueError("ip_address must be provided for workers") + env_vars += ['--env', f'IP_ADDR={FLAGS.ip_addr}'] + elif FLAGS.deployment_mode == 'distributed': + env_vars += ['--env', f'SIGHT_SERVICE_ID={service._SERVICE_ID}'] + + print('sight.id=%s' % sight.id) + args = [ + 'dsub', + '--provider=google-cls-v2', + f'--regions={_PROJECT_REGION.value}', + '--use-private-address', + # f'--location={_PROJECT_REGION.value}', + f'--image={docker_image}', + f'--machine-type={_DSUB_MACHINE_TYPE.value}', + f'--project={_PROJECT_ID.value}', + # f'--logging=gs://{os.environ["PROJECT_ID"]}-sight/d-sub/logs/{service._SERVICE_ID}/{sight.id}', + f'--logging={logging_path}', + # '--env', + # f'PARENT_LOG_ID={sight.id}', + # '--env', + # f'SIGHT_SERVICE_ID={service._SERVICE_ID}', + # '--env', + # f'PORT_NUMBER={service.get_port_number()}', + *env_vars, + '--input', + f'SCRIPT={remote_script}', + f'--command={command}', + f'--service-account={FLAGS.service_account}@{os.environ["PROJECT_ID"]}.iam.gserviceaccount.com', + f'--boot-disk-size={_DSUB_BOOT_DISK_SIZE.value}', + '--tasks', + '/tmp/optimization_tasks.tsv', + '--name', + _get_experiment_name(sight)[:63], + ] + + logging.info('CLI=%s', ' '.join(args)) + subprocess.run(args, check=True) + + sight.exit_block('Worker Spawning', sight_pb2.Object()) + logging.info('worker logs available at : %s', + f'gs://{os.environ["PROJECT_ID"]}/d-sub/logs/default') + logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) def start_job_in_dsub_local( @@ -361,7 +351,7 @@ def start_job_in_dsub_local( worker_mode: str, sight: Any, ): - """Starts the dsub workers that will run the optimization. + """Starts the dsub workers that will run the optimization. Args: num_train_workers: Number of workers to use in a training run. @@ -374,78 +364,74 @@ def start_job_in_dsub_local( worker_mode: add sight: The Sight object to be used for logging. """ - method_name = 'start_job_in_dsub_local' - logging.debug('>>>>>>>>> In %s method of %s file.', method_name, - _file_name) - - sight.enter_block('Worker Spawning locally', sight_pb2.Object()) - with open('/tmp/optimization_tasks.tsv', 'w') as outf: - outf.write( - '--env worker_id\t--env num_samples\t--env worker_location\n') - num_tasks_per_worker = math.floor(num_trials / num_train_workers) - for worker_id in range(num_train_workers): - tasks_for_cur_worker = num_tasks_per_worker - # If _NUM_TRIALS is not evenly divisible by num_train_workers, add - # the extra extra tasks to the first few workers. - if worker_id < num_trials % num_train_workers: - tasks_for_cur_worker += 1 - outf.write( - f'{worker_id}\t{tasks_for_cur_worker}\t{sight.location.get()}\n' - ) - sight.location.get().next() - - # remote_script = (f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/binary/{str(sight.id)}/' + - # binary_path.split('/')[-1]) - remote_script = binary_path - # print(f'Uploading {binary_path}...') - # subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], - # check=True) - - # provider = 'google-cls-v2' if deployment_mode == 'distributed' else 'local' - - script_args = ( - f'--decision_mode={decision_mode} --deployment_mode={deployment_mode} --worker_mode={worker_mode} --optimizer_type={optimizer_type} ' - ) - # if FLAGS.service_account: - # script_args = (script_args + - # f'--service_account={FLAGS.service_account}') - - print('sight.id=%s' % sight.id) - args = [ - 'dsub', - '--provider=local', - f'--image={docker_image}', - f'--project={_PROJECT_ID.value}', - # f'--logging=gs://{os.environ["PROJECT_ID"]}/d-sub/logs/local/{sight.id}', - f'--logging=extra/dsub-logs', - '--env', - f'GOOGLE_CLOUD_PROJECT={os.environ["PROJECT_ID"]}', - # '--env', - # 'GOOGLE_APPLICATION_CREDENTIALS=/mnt/data/mount/file' + - # f'{FLAGS.gcloud_dir_path}/application_default_credentials.json', - '--env', - f'PARENT_LOG_ID={sight.id}', - # '--env', - # 'PYTHONPATH=/project', - '--env', - f'SIGHT_SERVICE_ID={service._SERVICE_ID}', - '--input', - f'SCRIPT={remote_script}', - '--input-recursive', - f'CLOUDSDK_CONFIG={os.path.expanduser("~")}/.config/gcloud', - f'--command=python3 "${{SCRIPT}}" {script_args}', - # + f'--optimizer_type={optimizer_type}', - # '--mount', - # 'RESOURCES=file:/' + f'{FLAGS.gcloud_dir_path}', - # + f'{os.path.expanduser("~")}/.config/gcloud', - '--tasks', - '/tmp/optimization_tasks.tsv', - '--name', - _get_experiment_name(sight)[:63], - ] - logging.info('CLI=%s', ' '.join(args)) - subprocess.run(args, check=True) - - sight.exit_block('Worker Spawning', sight_pb2.Object()) - logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, - _file_name) + method_name = 'start_job_in_dsub_local' + logging.debug('>>>>>>>>> In %s method of %s file.', method_name, _file_name) + + sight.enter_block('Worker Spawning locally', sight_pb2.Object()) + with open('/tmp/optimization_tasks.tsv', 'w') as outf: + outf.write('--env worker_id\t--env num_samples\t--env worker_location\n') + num_tasks_per_worker = math.floor(num_trials / num_train_workers) + for worker_id in range(num_train_workers): + tasks_for_cur_worker = num_tasks_per_worker + # If _NUM_TRIALS is not evenly divisible by num_train_workers, add + # the extra extra tasks to the first few workers. + if worker_id < num_trials % num_train_workers: + tasks_for_cur_worker += 1 + outf.write( + f'{worker_id}\t{tasks_for_cur_worker}\t{sight.location.get()}\n') + sight.location.get().next() + + # remote_script = (f'gs://{os.environ["PROJECT_ID"]}-sight/d-sub/binary/{str(sight.id)}/' + + # binary_path.split('/')[-1]) + remote_script = binary_path + # print(f'Uploading {binary_path}...') + # subprocess.run(['gsutil', 'cp', '-c', binary_path, remote_script], + # check=True) + + # provider = 'google-cls-v2' if deployment_mode == 'distributed' else 'local' + + script_args = ( + f'--decision_mode={decision_mode} --deployment_mode={deployment_mode} --worker_mode={worker_mode} --optimizer_type={optimizer_type} ' + ) + # if FLAGS.service_account: + # script_args = (script_args + + # f'--service_account={FLAGS.service_account}') + + print('sight.id=%s' % sight.id) + args = [ + 'dsub', + '--provider=local', + f'--image={docker_image}', + f'--project={_PROJECT_ID.value}', + # f'--logging=gs://{os.environ["PROJECT_ID"]}/d-sub/logs/local/{sight.id}', + f'--logging=extra/dsub-logs', + '--env', + f'GOOGLE_CLOUD_PROJECT={os.environ["PROJECT_ID"]}', + # '--env', + # 'GOOGLE_APPLICATION_CREDENTIALS=/mnt/data/mount/file' + + # f'{FLAGS.gcloud_dir_path}/application_default_credentials.json', + '--env', + f'PARENT_LOG_ID={sight.id}', + # '--env', + # 'PYTHONPATH=/project', + '--env', + f'SIGHT_SERVICE_ID={service._SERVICE_ID}', + '--input', + f'SCRIPT={remote_script}', + '--input-recursive', + f'CLOUDSDK_CONFIG={os.path.expanduser("~")}/.config/gcloud', + f'--command=python3 "${{SCRIPT}}" {script_args}', + # + f'--optimizer_type={optimizer_type}', + # '--mount', + # 'RESOURCES=file:/' + f'{FLAGS.gcloud_dir_path}', + # + f'{os.path.expanduser("~")}/.config/gcloud', + '--tasks', + '/tmp/optimization_tasks.tsv', + '--name', + _get_experiment_name(sight)[:63], + ] + logging.info('CLI=%s', ' '.join(args)) + subprocess.run(args, check=True) + + sight.exit_block('Worker Spawning', sight_pb2.Object()) + logging.debug('<<<<<<<<< Out %s method of %s file.', method_name, _file_name) diff --git a/py/sight/widgets/decision/utils.py b/py/sight/widgets/decision/utils.py index af3cf77..b2340d6 100644 --- a/py/sight/widgets/decision/utils.py +++ b/py/sight/widgets/decision/utils.py @@ -11,10 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - """utility functions to be used in other functionalities.""" + def is_scalar(value): - scalar_types = (int, float, str, bool, type(None), bytes) - return isinstance(value, scalar_types) + scalar_types = (int, float, str, bool, type(None), bytes) + return isinstance(value, scalar_types) diff --git a/py/sight/widgets/numpy_sight/demo.py b/py/sight/widgets/numpy_sight/demo.py index 85e5237..5b56aed 100644 --- a/py/sight/widgets/numpy_sight/demo.py +++ b/py/sight/widgets/numpy_sight/demo.py @@ -11,13 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo for the numpy bindings to the Sight logging library.""" from absl import app from absl import flags import numpy as np - from proto import sight_pb2 from py.sight import Sight from py.widgets.numpy_sight import numpy_sight diff --git a/py/sight/widgets/numpy_sight/numpy_sight.py b/py/sight/widgets/numpy_sight/numpy_sight.py index 76543fb..4597fa4 100644 --- a/py/sight/widgets/numpy_sight/numpy_sight.py +++ b/py/sight/widgets/numpy_sight/numpy_sight.py @@ -19,23 +19,22 @@ from helpers.logs.logs_handler import logger as logging import numpy as np - -from sight.proto import sight_pb2 from sight.location import Location +from sight.proto import sight_pb2 @dataclasses.dataclass class LabeledNpArray: - """A variant on np.ndarrays where the dimensions are labeled.""" + """A variant on np.ndarrays where the dimensions are labeled.""" - array: np.ndarray + array: np.ndarray - # The labels of all the array dimensions. - dim_label: List[str] + # The labels of all the array dimensions. + dim_label: List[str] - # For each dimension of array contains the string labels of each slice - # in that dimension. - dim_axis_values: List[List[str]] + # For each dimension of array contains the string labels of each slice + # in that dimension. + dim_axis_values: List[List[str]] def log( @@ -44,7 +43,7 @@ def log( sight: Any, frame: Optional[Any] = None, ) -> Optional[Location]: - """Documents numpy object in the Sight log if Sight is being used. + """Documents numpy object in the Sight log if Sight is being used. Args: label: The label that identifies this object. @@ -56,82 +55,82 @@ def log( Returns: The location of this object within the log. """ - if sight is None: - return None - - if not sight.is_logging_enabled(): - return None - - obj = sight_pb2.Object() - - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - sight.set_object_code_loc(obj, frame) - - # obj_to_log is a scalar - if isinstance(obj_to_log, np.int64): - obj.sub_type = sight_pb2.Object.SubType.ST_VALUE - obj.value.sub_type = sight_pb2.Value.ST_INT64 - obj.value.int64_value = int(obj_to_log) - return sight.log_object(obj, True) - - if isinstance(obj_to_log, np.float64): - obj.sub_type = sight_pb2.Object.SubType.ST_VALUE - obj.value.sub_type = sight_pb2.Value.ST_DOUBLE - obj.value.double_value = int(obj_to_log) - return sight.log_object(obj, True) - - if isinstance(obj_to_log, bool): - obj.sub_type = sight_pb2.Object.SubType.ST_VALUE - obj.value.sub_type = sight_pb2.Value.ST_BOOL - obj.value.bool_value = int(obj_to_log) - return sight.log_object(obj, True) - - # obj_to_log is an array - if isinstance(obj_to_log, np.ndarray): - labeled_array = LabeledNpArray( - obj_to_log, - [f'dim{i}' for i in range(len(obj_to_log.shape))], - [[f'v{v}' for v in range(obj_to_log.shape[i])] - for i in range(len(obj_to_log.shape))], - ) - elif isinstance(obj_to_log, LabeledNpArray): - labeled_array = obj_to_log - else: - logging.error('Invalid type for array: %s', obj_to_log) - return None - - obj.sub_type = sight_pb2.Object.SubType.ST_TENSOR - obj.tensor.label = label - obj.tensor.shape.extend(labeled_array.array.shape) - # print('labeled_array=%s' % labeled_array) - # print('labeled_array.array.dtype=%s' % labeled_array.array.dtype) - if (labeled_array.array.dtype == float - or labeled_array.array.dtype == np.float32 - or labeled_array.array.dtype == np.float64): - obj.tensor.sub_type = sight_pb2.Tensor.ST_DOUBLE - obj.tensor.double_values.value.extend( - labeled_array.array.reshape(labeled_array.array.size).tolist()) - elif ( - # labeled_array.array.dtype == np.int - # or - labeled_array.array.dtype == np.int32 - or labeled_array.array.dtype == np.int64): - obj.tensor.sub_type = sight_pb2.Tensor.ST_INT64 - obj.tensor.int64_values.value.extend( - labeled_array.array.reshape(labeled_array.array.size).tolist()) - obj.tensor.dim_label.extend(labeled_array.dim_label) - for dav in labeled_array.dim_axis_values: - obj.tensor.dim_axis_values.append( - sight_pb2.Tensor.StringValues(value=dav)) + if sight is None: + return None + if not sight.is_logging_enabled(): + return None + + obj = sight_pb2.Object() + + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + sight.set_object_code_loc(obj, frame) + + # obj_to_log is a scalar + if isinstance(obj_to_log, np.int64): + obj.sub_type = sight_pb2.Object.SubType.ST_VALUE + obj.value.sub_type = sight_pb2.Value.ST_INT64 + obj.value.int64_value = int(obj_to_log) return sight.log_object(obj, True) + if isinstance(obj_to_log, np.float64): + obj.sub_type = sight_pb2.Object.SubType.ST_VALUE + obj.value.sub_type = sight_pb2.Value.ST_DOUBLE + obj.value.double_value = int(obj_to_log) + return sight.log_object(obj, True) + + if isinstance(obj_to_log, bool): + obj.sub_type = sight_pb2.Object.SubType.ST_VALUE + obj.value.sub_type = sight_pb2.Value.ST_BOOL + obj.value.bool_value = int(obj_to_log) + return sight.log_object(obj, True) + + # obj_to_log is an array + if isinstance(obj_to_log, np.ndarray): + labeled_array = LabeledNpArray( + obj_to_log, + [f'dim{i}' for i in range(len(obj_to_log.shape))], + [[f'v{v}' + for v in range(obj_to_log.shape[i])] + for i in range(len(obj_to_log.shape))], + ) + elif isinstance(obj_to_log, LabeledNpArray): + labeled_array = obj_to_log + else: + logging.error('Invalid type for array: %s', obj_to_log) + return None + + obj.sub_type = sight_pb2.Object.SubType.ST_TENSOR + obj.tensor.label = label + obj.tensor.shape.extend(labeled_array.array.shape) + # print('labeled_array=%s' % labeled_array) + # print('labeled_array.array.dtype=%s' % labeled_array.array.dtype) + if (labeled_array.array.dtype == float or + labeled_array.array.dtype == np.float32 or + labeled_array.array.dtype == np.float64): + obj.tensor.sub_type = sight_pb2.Tensor.ST_DOUBLE + obj.tensor.double_values.value.extend( + labeled_array.array.reshape(labeled_array.array.size).tolist()) + elif ( + # labeled_array.array.dtype == np.int + # or + labeled_array.array.dtype == np.int32 or + labeled_array.array.dtype == np.int64): + obj.tensor.sub_type = sight_pb2.Tensor.ST_INT64 + obj.tensor.int64_values.value.extend( + labeled_array.array.reshape(labeled_array.array.size).tolist()) + obj.tensor.dim_label.extend(labeled_array.dim_label) + for dav in labeled_array.dim_axis_values: + obj.tensor.dim_axis_values.append(sight_pb2.Tensor.StringValues(value=dav)) + + return sight.log_object(obj, True) + def from_log(sub_log: List[sight_pb2.Object]) -> Optional[np.ndarray]: - """Loads a numpy array from a log sub-sequence. + """Loads a numpy array from a log sub-sequence. Args: sub_log: The sub-sequence of log objects to load from. @@ -139,17 +138,15 @@ def from_log(sub_log: List[sight_pb2.Object]) -> Optional[np.ndarray]: Returns: The loaded numpy array. """ - obj = sub_log[0] + obj = sub_log[0] - if obj.sub_type != sight_pb2.Object.ST_TENSOR: - return None + if obj.sub_type != sight_pb2.Object.ST_TENSOR: + return None - # No case for int64 since it is treated as a Python int for now - if obj.tensor.sub_type == sight_pb2.Tensor.ST_DOUBLE: - return np.array(obj.tensor.double_values.value).reshape( - obj.tensor.shape) - if obj.tensor.sub_type == sight_pb2.Tensor.ST_INT64: - return np.array(obj.tensor.int64_values.value).reshape( - obj.tensor.shape) + # No case for int64 since it is treated as a Python int for now + if obj.tensor.sub_type == sight_pb2.Tensor.ST_DOUBLE: + return np.array(obj.tensor.double_values.value).reshape(obj.tensor.shape) + if obj.tensor.sub_type == sight_pb2.Tensor.ST_INT64: + return np.array(obj.tensor.int64_values.value).reshape(obj.tensor.shape) - return None + return None diff --git a/py/sight/widgets/numpy_sight/numpy_sight_test.py b/py/sight/widgets/numpy_sight/numpy_sight_test.py index d6ea49a..b771733 100644 --- a/py/sight/widgets/numpy_sight/numpy_sight_test.py +++ b/py/sight/widgets/numpy_sight/numpy_sight_test.py @@ -11,20 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Tests for numpy_sight.""" import inspect from typing import Any, Sequence +from absl.testing import absltest import numpy as np - # from google3.analysis.dremel.core.capacitor.public.python import pywrap_record_reader from proto import sight_pb2 from sight.sight import Sight from sight.widgets.numpy_sight import numpy_sight from tensorflow.python.util.protobuf import compare -from absl.testing import absltest def _read_text_file(file_path: str) -> str: @@ -38,8 +36,7 @@ def _read_text_file(file_path: str) -> str: def _read_capacitor_file(file_path: str) -> Sequence[Any]: protos = [] record_reader = pywrap_record_reader.RecordReader.CreateFromPath( - file_path, ['*'], 60.0 - ) + file_path, ['*'], 60.0) protos.extend(record_reader.IterRecords()) return sorted(protos, key=lambda x: x.index) @@ -48,14 +45,12 @@ def _create_attributes(sight: Sight) -> Sequence[sight_pb2.Attribute]: attribute = [] if hasattr(sight, 'change_list_number'): attribute.append( - sight_pb2.Attribute( - key='change_list_number', value=str(sight.change_list_number) - ) - ) + sight_pb2.Attribute(key='change_list_number', + value=str(sight.change_list_number))) if hasattr(sight, 'citc_snapshot'): attribute.append( - sight_pb2.Attribute(key='citc_snapshot', value=str(sight.citc_snapshot)) - ) + sight_pb2.Attribute(key='citc_snapshot', + value=str(sight.citc_snapshot))) return attribute @@ -65,8 +60,7 @@ class NumpySightTest(absltest.TestCase): def setUpClass(cls): super().setUpClass() cls.test_path = ( - 'googlex/cortex/sight/py/widgets/numpy_sight/numpy_sight_test.py' - ) + 'googlex/cortex/sight/py/widgets/numpy_sight/numpy_sight_test.py') def testLogFloatArrayToText(self): # SETUP @@ -87,9 +81,8 @@ def testLogFloatArrayToText(self): # ASSERT expected_log = '' - actual_log = _read_text_file( - params.log_dir_path + '/testLogFloatArrayToText.txt' - ) + actual_log = _read_text_file(params.log_dir_path + + '/testLogFloatArrayToText.txt') self.assertEqual( expected_log, actual_log, @@ -109,9 +102,9 @@ def testLogFloatArrayToCapacitorFile(self): # ACT frameinfo = inspect.getframeinfo(inspect.currentframe()) with Sight(params) as sight: - numpy_sight.log( - 'array', np.array([[1, 2.5, 3], [4, 5.5, 6]], dtype=np.float32), sight - ) + numpy_sight.log('array', + np.array([[1, 2.5, 3], [4, 5.5, 6]], dtype=np.float32), + sight) # ASSERT expected_log = [ @@ -129,15 +122,13 @@ def testLogFloatArrayToCapacitorFile(self): label='array', shape=[2, 3], double_values=sight_pb2.Tensor.DoubleValues( - value=[1, 2.5, 3, 4, 5.5, 6] - ), + value=[1, 2.5, 3, 4, 5.5, 6]), ), ) ] actual_log = _read_capacitor_file( - params.log_dir_path + '/testLogFloatArrayToCapacitorFile.capacitor' - ) + params.log_dir_path + '/testLogFloatArrayToCapacitorFile.capacitor') self.assertEqual(len(expected_log), len(actual_log)) for i in range(0, len(expected_log)): @@ -146,8 +137,8 @@ def testLogFloatArrayToCapacitorFile(self): expected_log[i], actual_log[i], 'Target code and generated logs are different. Expected' - ' log[%d]:\n%s\nActual log[%d]:\n%s\n' - % (i, expected_log[i], i, actual_log[i]), + ' log[%d]:\n%s\nActual log[%d]:\n%s\n' % + (i, expected_log[i], i, actual_log[i]), ignored_fields=['line'], ) @@ -163,9 +154,8 @@ def testLogIntArrayToCapacitorFile(self): # ACT frameinfo = inspect.getframeinfo(inspect.currentframe()) with Sight(params) as sight: - numpy_sight.log( - 'array', np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64), sight - ) + numpy_sight.log('array', np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64), + sight) # ASSERT expected_log = [ @@ -183,15 +173,13 @@ def testLogIntArrayToCapacitorFile(self): label='array', shape=[2, 3], int64_values=sight_pb2.Tensor.Int64Values( - value=[1, 2, 3, 4, 5, 6] - ), + value=[1, 2, 3, 4, 5, 6]), ), ) ] actual_log = _read_capacitor_file( - params.log_dir_path + '/testLogIntArrayToCapacitorFile.capacitor' - ) + params.log_dir_path + '/testLogIntArrayToCapacitorFile.capacitor') self.assertEqual(len(expected_log), len(actual_log)) for i in range(0, len(expected_log)): @@ -200,8 +188,8 @@ def testLogIntArrayToCapacitorFile(self): expected_log[i], actual_log[i], 'Target code and generated logs are different. Expected' - ' log[%d]:\n%s\nActual log[%d]:\n%s\n' - % (i, expected_log[i], i, actual_log[i]), + ' log[%d]:\n%s\nActual log[%d]:\n%s\n' % + (i, expected_log[i], i, actual_log[i]), ignored_fields=['line'], ) diff --git a/py/sight/widgets/pandas_sight/pandas_sight.py b/py/sight/widgets/pandas_sight/pandas_sight.py index e9c4121..a5aff25 100644 --- a/py/sight/widgets/pandas_sight/pandas_sight.py +++ b/py/sight/widgets/pandas_sight/pandas_sight.py @@ -20,9 +20,8 @@ from helpers.logs.logs_handler import logger as logging import numpy as np import pandas as pd - -from sight.proto import sight_pb2 from sight.location import Location +from sight.proto import sight_pb2 def _df_start( @@ -30,11 +29,11 @@ def _df_start( sight: Any, frame: Any, ) -> None: - start_obj = sight_pb2.Object() - start_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START - start_obj.block_start.sub_type = sight_pb2.BlockStart.ST_LIST - start_obj.block_start.list.sub_type = sight_pb2.ListStart.ST_HETEROGENEOUS - sight.enter_block(label, start_obj, frame) + start_obj = sight_pb2.Object() + start_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START + start_obj.block_start.sub_type = sight_pb2.BlockStart.ST_LIST + start_obj.block_start.list.sub_type = sight_pb2.ListStart.ST_HETEROGENEOUS + sight.enter_block(label, start_obj, frame) def _df_end( @@ -42,8 +41,8 @@ def _df_end( sight: Any, frame: Any, ) -> None: - end_obj = sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_BLOCK_END) - sight.exit_block(label, end_obj) + end_obj = sight_pb2.Object(sub_type=sight_pb2.Object.SubType.ST_BLOCK_END) + sight.exit_block(label, end_obj) def log( @@ -52,7 +51,7 @@ def log( sight: Any, frame: Optional[Any] = None, ) -> Optional[Location]: - """Documents pandas DataFrame object in the Sight log if Sight is being used. + """Documents pandas DataFrame object in the Sight log if Sight is being used. Args: label: The label that identifies this object. obj_to_log: The pandas frame to be logged. @@ -62,76 +61,74 @@ def log( Returns: The location of this object within the log. """ - if sight is None: - return None - - if not sight.is_logging_enabled(): - return None - - if frame is None: - # pytype: disable=attribute-error - frame = inspect.currentframe().f_back - # pytype: enable=attribute-error - - _df_start(label, sight, frame) - - for i in range(df.shape[1]): - nv_start_obj = sight_pb2.Object() - nv_start_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START - nv_start_obj.block_start.sub_type = sight_pb2.BlockStart.ST_NAMED_VALUE - sight.enter_block(str(df.columns[i]), nv_start_obj, frame) - - obj = sight_pb2.Object() - sight.set_object_code_loc(obj, frame) - - obj.sub_type = sight_pb2.Object.SubType.ST_TENSOR - obj.tensor.label = str(df.columns[i]) - obj.tensor.shape.append(df.shape[0]) - if (df.dtypes[df.columns[i]] == float - or df.dtypes[df.columns[i]] == np.float32 - or df.dtypes[df.columns[i]] == np.float64): - obj.tensor.sub_type = sight_pb2.Tensor.ST_DOUBLE - obj.tensor.double_values.value.extend(df[df.columns[i]].tolist()) - elif ( - # df.dtypes[df.columns[i]] == np.int - # or - df.dtypes[df.columns[i]] == np.int32 - or df.dtypes[df.columns[i]] == np.int64): - obj.tensor.sub_type = sight_pb2.Tensor.ST_INT64 - obj.tensor.int64_values.value.extend(df[df.columns[i]].tolist()) - else: - obj.tensor.sub_type = sight_pb2.Tensor.ST_STRING - obj.tensor.string_values.value.extend( - [str(v) for v in df[df.columns[i]].tolist()]) - obj.tensor.dim_label.append(str(df.columns[i])) - - sight.log_object(obj, True) - - nv_end_obj = sight_pb2.Object() - nv_end_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_END - sight.exit_block(label, nv_end_obj) - - _df_end(label, sight, frame) + if sight is None: + return None + + if not sight.is_logging_enabled(): + return None + + if frame is None: + # pytype: disable=attribute-error + frame = inspect.currentframe().f_back + # pytype: enable=attribute-error + + _df_start(label, sight, frame) + + for i in range(df.shape[1]): + nv_start_obj = sight_pb2.Object() + nv_start_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_START + nv_start_obj.block_start.sub_type = sight_pb2.BlockStart.ST_NAMED_VALUE + sight.enter_block(str(df.columns[i]), nv_start_obj, frame) + + obj = sight_pb2.Object() + sight.set_object_code_loc(obj, frame) + + obj.sub_type = sight_pb2.Object.SubType.ST_TENSOR + obj.tensor.label = str(df.columns[i]) + obj.tensor.shape.append(df.shape[0]) + if (df.dtypes[df.columns[i]] == float or + df.dtypes[df.columns[i]] == np.float32 or + df.dtypes[df.columns[i]] == np.float64): + obj.tensor.sub_type = sight_pb2.Tensor.ST_DOUBLE + obj.tensor.double_values.value.extend(df[df.columns[i]].tolist()) + elif ( + # df.dtypes[df.columns[i]] == np.int + # or + df.dtypes[df.columns[i]] == np.int32 or + df.dtypes[df.columns[i]] == np.int64): + obj.tensor.sub_type = sight_pb2.Tensor.ST_INT64 + obj.tensor.int64_values.value.extend(df[df.columns[i]].tolist()) + else: + obj.tensor.sub_type = sight_pb2.Tensor.ST_STRING + obj.tensor.string_values.value.extend( + [str(v) for v in df[df.columns[i]].tolist()]) + obj.tensor.dim_label.append(str(df.columns[i])) + + sight.log_object(obj, True) + + nv_end_obj = sight_pb2.Object() + nv_end_obj.sub_type = sight_pb2.Object.SubType.ST_BLOCK_END + sight.exit_block(label, nv_end_obj) + + _df_end(label, sight, frame) def from_log(sub_log: List[sight_pb2.Object]) -> Optional[np.ndarray]: - """Loads a numpy array from a log sub-sequence. + """Loads a numpy array from a log sub-sequence. Args: sub_log: The sub-sequence of log objects to load from. Returns: The loaded numpy array. """ - obj = sub_log[0] + obj = sub_log[0] - if obj.sub_type != sight_pb2.Object.ST_TENSOR: - return None + if obj.sub_type != sight_pb2.Object.ST_TENSOR: + return None - # No case for int64 since it is treated as a Python int for now - if obj.tensor.sub_type == sight_pb2.Tensor.ST_DOUBLE: - return np.array(obj.tensor.double_values.value).reshape( - obj.tensor.shape) - if obj.tensor.sub_type == sight_pb2.Tensor.ST_INT64: - return np.array(obj.tensor.int64_values.value).reshape( - obj.tensor.shape) + # No case for int64 since it is treated as a Python int for now + if obj.tensor.sub_type == sight_pb2.Tensor.ST_DOUBLE: + return np.array(obj.tensor.double_values.value).reshape(obj.tensor.shape) + if obj.tensor.sub_type == sight_pb2.Tensor.ST_INT64: + return np.array(obj.tensor.int64_values.value).reshape(obj.tensor.shape) - return None + return None diff --git a/py/sight/widgets/simulation/analysis_utils.py b/py/sight/widgets/simulation/analysis_utils.py index c872323..83a0edf 100644 --- a/py/sight/widgets/simulation/analysis_utils.py +++ b/py/sight/widgets/simulation/analysis_utils.py @@ -13,12 +13,19 @@ # limitations under the License. """Utilities for analyzing Sight logs that document simulation runs.""" -from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple +from typing import ( + Any, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple +) -from helpers.logs.logs_handler import logger as logging from helpers.logs.logs_handler import logger as logging import numpy as np - from sight import data_structures from sight.proto import sight_pb2 @@ -30,21 +37,21 @@ def single_objects_filter(obj: sight_pb2.Object, sub_type: sight_pb2.Object.SubType) -> bool: - return obj.sub_type == sub_type + return obj.sub_type == sub_type def start_objects_filter(obj: sight_pb2.Object, block_sub_type: sight_pb2.BlockStart.SubType) -> bool: - return (obj.sub_type == sight_pb2.Object.ST_BLOCK_START - and obj.block_start.sub_type == block_sub_type) + return (obj.sub_type == sight_pb2.Object.ST_BLOCK_START and + obj.block_start.sub_type == block_sub_type) def log_uid(obj: sight_pb2.Object) -> str: - for a in obj.attribute: - if a.key != 'log_uid': - continue - return a.value - return '' + for a in obj.attribute: + if a.key != 'log_uid': + continue + return a.value + return '' def single_objects_key_parent( @@ -52,15 +59,14 @@ def single_objects_key_parent( sub_type: sight_pb2.Object.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return (object_col - | 'single_objects_key_parent Filter ' + label >> - beam.Filter(lambda obj: single_objects_filter(obj, sub_type)) - | 'single_objects_key_parent Map ' + label >> beam.Map(lambda x: ( - f'{x.ancestor_start_location[-2]} - {log_uid(x)}', - { - label: x - }, - ))) + return (object_col | 'single_objects_key_parent Filter ' + label >> + beam.Filter(lambda obj: single_objects_filter(obj, sub_type)) | + 'single_objects_key_parent Map ' + label >> beam.Map(lambda x: ( + f'{x.ancestor_start_location[-2]} - {log_uid(x)}', + { + label: x + }, + ))) def single_objects_key_log_uid( @@ -68,13 +74,12 @@ def single_objects_key_log_uid( sub_type: sight_pb2.Object.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return (object_col - | 'single_objects_key_log_uid Filter ' + label >> - beam.Filter(lambda obj: single_objects_filter(obj, sub_type)) - | 'single_objects_key_log_uid Map ' + label >> - beam.Map(lambda x: (log_uid(x), { - label: x - }))) + return (object_col | 'single_objects_key_log_uid Filter ' + label >> + beam.Filter(lambda obj: single_objects_filter(obj, sub_type)) | + 'single_objects_key_log_uid Map ' + label >> beam.Map(lambda x: + (log_uid(x), { + label: x + }))) def block_start_objects( @@ -82,12 +87,11 @@ def block_start_objects( block_sub_type: sight_pb2.BlockStart.SubType, label: str, ) -> beam.pvalue.PCollection[ObjMap]: - return (object_col - | 'objects Filter ' + label >> - beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) - | 'objects Map ' + label >> beam.Map(lambda x: ({ - label: x - }))) + return (object_col | 'objects Filter ' + label >> + beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) | + 'objects Map ' + label >> beam.Map(lambda x: ({ + label: x + }))) def block_start_objects_key_self( @@ -95,13 +99,12 @@ def block_start_objects_key_self( block_sub_type: sight_pb2.BlockStart.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return (object_col - | 'objects_key_self Filter ' + label >> - beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) - | 'objects_key_self Map ' + label >> - beam.Map(lambda x: (f'{x.location} - {log_uid(x)}', { - label: x - }))) + return (object_col | 'objects_key_self Filter ' + label >> + beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) | + 'objects_key_self Map ' + label >> + beam.Map(lambda x: (f'{x.location} - {log_uid(x)}', { + label: x + }))) def block_start_objects_key_parent( @@ -109,15 +112,14 @@ def block_start_objects_key_parent( block_sub_type: sight_pb2.BlockStart.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return (object_col - | 'objects_key_parent Filter ' + label >> - beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) - | 'objects_key_parent Map ' + label >> beam.Map(lambda x: ( - f'{x.ancestor_start_location[-2]} - {log_uid(x)}', - { - label: x - }, - ))) + return (object_col | 'objects_key_parent Filter ' + label >> + beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) | + 'objects_key_parent Map ' + label >> beam.Map(lambda x: ( + f'{x.ancestor_start_location[-2]} - {log_uid(x)}', + { + label: x + }, + ))) def block_start_objects_key_log_uid( @@ -125,108 +127,103 @@ def block_start_objects_key_log_uid( block_sub_type: sight_pb2.BlockStart.SubType, label: str, ) -> beam.pvalue.PCollection[KeyedObjMap]: - return ( - object_col - | 'objects_key_log_uid Filter ' + label >> - beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) - | - 'objects_key_log_uid Map ' + label >> beam.Map(lambda x: (log_uid(x), { - label: x - }))) + return ( + object_col | 'objects_key_log_uid Filter ' + label >> + beam.Filter(lambda obj: start_objects_filter(obj, block_sub_type)) | + 'objects_key_log_uid Map ' + label >> beam.Map(lambda x: (log_uid(x), { + label: x + }))) def create_constant_key( pcol_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return pcol | pcol_label + ' create_constant_key' >> beam.Map(lambda x: - ('', x)) + return pcol | pcol_label + ' create_constant_key' >> beam.Map(lambda x: + ('', x)) def create_log_uid_key( pcol_label: str, new_key_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return (pcol - | pcol_label + ' ' + new_key_label + ' create_log_uid_key' >> - beam.Map(lambda x: (log_uid(x[new_key_label]), x))) + return (pcol | pcol_label + ' ' + new_key_label + ' create_log_uid_key' >> + beam.Map(lambda x: (log_uid(x[new_key_label]), x))) def create_loc_log_uid_key( pcol_label: str, new_key_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return (pcol - | pcol_label + ' ' + new_key_label + ' create_loc_log_uid_key' >> - beam.Map(lambda x: ( - f'{x[new_key_label].location} - {log_uid(x[new_key_label])}', - x, - ))) + return (pcol | pcol_label + ' ' + new_key_label + ' create_loc_log_uid_key' >> + beam.Map(lambda x: ( + f'{x[new_key_label].location} - {log_uid(x[new_key_label])}', + x, + ))) def create_named_value_label_log_uid_key( pcol_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return (pcol - | pcol_label + ' create_named_value_label_log_uid_key' >> - beam.Map(lambda x: ( - (f'{x["named_value"].block_start.label} -' - f' {log_uid(x["named_value"])}'), - x, - ))) + return (pcol | pcol_label + ' create_named_value_label_log_uid_key' >> + beam.Map(lambda x: ( + (f'{x["named_value"].block_start.label} -' + f' {log_uid(x["named_value"])}'), + x, + ))) def create_var_key( pcol_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return pcol | pcol_label + ' create_var_key' >> beam.Map( - lambda x: (x['variable'], x)) + return pcol | pcol_label + ' create_var_key' >> beam.Map(lambda x: + (x['variable'], x)) def create_sim_ts_index_key( pcol_label: str, pcol: beam.pvalue.PCollection[ObjMap] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return pcol | pcol_label + ' create_sim_ts_index_key' >> beam.Map(lambda x: ( - (f'{log_uid(x["simulation"])}-{x["simulation"].location} {x["simulation_time_step"].block_start.simulation_time_step_start.time_step_index[0]}' - ), - x, - )) + return pcol | pcol_label + ' create_sim_ts_index_key' >> beam.Map(lambda x: ( + (f'{log_uid(x["simulation"])}-{x["simulation"].location} {x["simulation_time_step"].block_start.simulation_time_step_start.time_step_index[0]}' + ), + x, + )) def adjust_sim_ts_to_next_index_key( pcol_label: str, pcol: beam.pvalue.PCollection[Dict[str, sight_pb2.Object]] ) -> beam.pvalue.PCollection[KeyedObjMap]: - return pcol | pcol_label + ' adjust_sim_ts_to_next_index_key' >> beam.Map( - lambda x: (f'{x[0].split()[0]} {int(x[0].split()[1]) + 1}', x[1])) + return pcol | pcol_label + ' adjust_sim_ts_to_next_index_key' >> beam.Map( + lambda x: (f'{x[0].split()[0]} {int(x[0].split()[1]) + 1}', x[1])) def remove_key( pcol_label: str, pcol: beam.pvalue.PCollection[KeyedObjMap] ) -> beam.pvalue.PCollection[ObjMap]: - return pcol | pcol_label + ' remove_key' >> beam.Map(lambda x: x[1]) + return pcol | pcol_label + ' remove_key' >> beam.Map(lambda x: x[1]) def change_key_to_self( pcol_label: str, obj_label: str, pcol: beam.pvalue.PCollection[KeyedObjMap] ) -> beam.pvalue.PCollection[ObjMap]: - return pcol | pcol_label + ' change_key_to_self' >> beam.Map(lambda x: ( - f'{x[1][obj_label].location} - {log_uid(x[1][obj_label])}', - x[1], - )) + return pcol | pcol_label + ' change_key_to_self' >> beam.Map(lambda x: ( + f'{x[1][obj_label].location} - {log_uid(x[1][obj_label])}', + x[1], + )) def change_key_to_parent( pcol_label: str, obj_label: str, pcol: beam.pvalue.PCollection[KeyedObjMap] ) -> beam.pvalue.PCollection[ObjMap]: - return pcol | pcol_label + ' change_key_to_parent' >> beam.Map(lambda x: ( - (f'{x[1][obj_label].ancestor_start_location[-2]} -' - f' {log_uid(x[1][obj_label])}'), - x[1], - )) + return pcol | pcol_label + ' change_key_to_parent' >> beam.Map(lambda x: ( + (f'{x[1][obj_label].ancestor_start_location[-2]} -' + f' {log_uid(x[1][obj_label])}'), + x[1], + )) class ExtractAncestorBlockStartLocations(beam.DoFn): - """Beam stage that extracts each object's ancestor context locations.""" + """Beam stage that extracts each object's ancestor context locations.""" - def process(self, obj: sight_pb2.Object) -> Iterator[KeyedObjMap]: - """Extracts each object's ancestor context locations. + def process(self, obj: sight_pb2.Object) -> Iterator[KeyedObjMap]: + """Extracts each object's ancestor context locations. Includes the starting point of the block's containing blocks and of the object ends a block, the starting point of that block. @@ -238,28 +235,26 @@ def process(self, obj: sight_pb2.Object) -> Iterator[KeyedObjMap]: Pairs with the starting point of each object's ancestral context block and the object itself. """ - for ancestor_start_location in obj.ancestor_start_location: - yield (f'{ancestor_start_location} - {log_uid(obj)}', { - 'object': obj - }) + for ancestor_start_location in obj.ancestor_start_location: + yield (f'{ancestor_start_location} - {log_uid(obj)}', {'object': obj}) - if obj.sub_type == sight_pb2.Object.ST_BLOCK_END: - yield ( - f'{obj.block_end.location_of_block_start} - {log_uid(obj)}', - { - 'object': obj - }, - ) + if obj.sub_type == sight_pb2.Object.ST_BLOCK_END: + yield ( + f'{obj.block_end.location_of_block_start} - {log_uid(obj)}', + { + 'object': obj + }, + ) class AddAncestorKeysToObjs(beam.DoFn): - """Beam stage that extracts each object's ancestor context locations.""" + """Beam stage that extracts each object's ancestor context locations.""" - def __init__(self, anchor_obj_label: str): - self.anchor_obj_label = anchor_obj_label + def __init__(self, anchor_obj_label: str): + self.anchor_obj_label = anchor_obj_label - def process(self, task: ObjMap) -> Iterator[KeyedObjMap]: - """Attaches the ancestor locations of each object under .anchor_obj_label. + def process(self, task: ObjMap) -> Iterator[KeyedObjMap]: + """Attaches the ancestor locations of each object under .anchor_obj_label. Includes the starting point of the block's containing blocks and of the object ends a block, the starting point of that block. @@ -271,40 +266,38 @@ def process(self, task: ObjMap) -> Iterator[KeyedObjMap]: Pairs with the starting point of each object's ancestral context block and the map itself. """ - obj = task[self.anchor_obj_label] - for ancestor_start_location in obj.ancestor_start_location: - yield (f'{ancestor_start_location} - {log_uid(obj)}', task) + obj = task[self.anchor_obj_label] + for ancestor_start_location in obj.ancestor_start_location: + yield (f'{ancestor_start_location} - {log_uid(obj)}', task) - if obj.sub_type == sight_pb2.Object.ST_BLOCK_END: - yield (f'{obj.block_end.location_of_block_start} - {log_uid(obj)}', - task) + if obj.sub_type == sight_pb2.Object.ST_BLOCK_END: + yield (f'{obj.block_end.location_of_block_start} - {log_uid(obj)}', task) def objs_with_ancestor_keys( - objects_map: KeyedObjMap, - anchor_obj_label: str) -> beam.pvalue.PCollection[KeyedObjMap]: - return remove_key( - 'objs_with_ancestor_keys ' + anchor_obj_label, objects_map - ) | 'objs_with_ancestor_keys ' + anchor_obj_label >> beam.ParDo( - AddAncestorKeysToObjs(anchor_obj_label)) + objects_map: KeyedObjMap, + anchor_obj_label: str) -> beam.pvalue.PCollection[KeyedObjMap]: + return remove_key( + 'objs_with_ancestor_keys ' + anchor_obj_label, objects_map + ) | 'objs_with_ancestor_keys ' + anchor_obj_label >> beam.ParDo( + AddAncestorKeysToObjs(anchor_obj_label)) class CombineRecords(beam.DoFn): - """Combines CoGroupByKey-joined dicts from two sources.""" - - def __init__( - self, - source1_label: str, - source2_label: str, - ): - self.source1_label = source1_label - self.source2_label = source2_label - - def process( - self, task: Tuple[Any, Dict[str, Sequence[Dict[str, - sight_pb2.Object]]]] - ) -> Iterator[ObjMap]: - """Combines CoGroupByKey-joined dicts from two sources. + """Combines CoGroupByKey-joined dicts from two sources.""" + + def __init__( + self, + source1_label: str, + source2_label: str, + ): + self.source1_label = source1_label + self.source2_label = source2_label + + def process( + self, task: Tuple[Any, Dict[str, Sequence[Dict[str, sight_pb2.Object]]]] + ) -> Iterator[ObjMap]: + """Combines CoGroupByKey-joined dicts from two sources. Args: task: Length <=1 sequences of dicts from two sources, indexed at labels @@ -316,36 +309,36 @@ def process( If the length of a given source is 0, its key-value pairs are not included in the output dict. """ - x: Dict[str, Sequence[ObjMap]] = task[1] - source1: Sequence[ObjMap] = x[self.source1_label] - if len(source1) > 1: - logging.error( - 'Source 1 (%s) has %d entries, which is >1.', - self.source1_label, - len(source1), - ) - return - source2: List[ObjMap] = list(task[1][self.source2_label]) - if len(source2) > 1: - logging.error( - 'Source 2 (%s) has %d entries, which is >1.', - self.source2_label, - len(source2), - ) - return - - result: ObjMap = {} - if source1: - for key, val in source1[0].items(): - result[key] = val - if source2: - for key, val in source2[0].items(): - result[key] = val - yield result + x: Dict[str, Sequence[ObjMap]] = task[1] + source1: Sequence[ObjMap] = x[self.source1_label] + if len(source1) > 1: + logging.error( + 'Source 1 (%s) has %d entries, which is >1.', + self.source1_label, + len(source1), + ) + return + source2: List[ObjMap] = list(task[1][self.source2_label]) + if len(source2) > 1: + logging.error( + 'Source 2 (%s) has %d entries, which is >1.', + self.source2_label, + len(source2), + ) + return + + result: ObjMap = {} + if source1: + for key, val in source1[0].items(): + result[key] = val + if source2: + for key, val in source2[0].items(): + result[key] = val + yield result class ParentChildPairs(beam.DoFn): - """Given a parent and a list of children, emits parent-child pairs. + """Given a parent and a list of children, emits parent-child pairs. The key of these pairs is the location of the child object. @@ -356,21 +349,20 @@ class ParentChildPairs(beam.DoFn): the location of the parent or the child object. """ - def __init__( - self, - ancestors: str, - child: str, - index_by_parent: bool, - ): - self.ancestors = ancestors - self.child = child - self.index_by_parent = index_by_parent - - def process( - self, task: Tuple[str, Dict[str, Sequence[Dict[str, - sight_pb2.Object]]]] - ) -> Iterator[KeyedObjMap]: - """Combines objects and their ancestors. + def __init__( + self, + ancestors: str, + child: str, + index_by_parent: bool, + ): + self.ancestors = ancestors + self.child = child + self.index_by_parent = index_by_parent + + def process( + self, task: Tuple[str, Dict[str, Sequence[Dict[str, sight_pb2.Object]]]] + ) -> Iterator[KeyedObjMap]: + """Combines objects and their ancestors. Args: task: A pair of a key and - a sequence of ancestor log objects (assumed to @@ -382,55 +374,54 @@ def process( second is a dictionary that contains all the ancestors and the child object. """ - ancestors_objs = task[1][self.ancestors] - child_objs = task[1][self.child] - - # Skip named values that are not directly contained by ancestors. - if not ancestors_objs: - return - - if len(ancestors_objs) != 1: - logging.error( - ('Child objects cannot be contained within multiple ancestors!.' - ' task=%s'), - task, - ) - return - - for child_obj in child_objs: - cur = ancestors_objs[0].copy() - for key in child_obj: - if key not in cur: - cur[key] = child_obj[key] - if self.index_by_parent: - location_idx = task[0] - else: - location_idx = (f'{child_obj[self.child].location} -' - f' {log_uid(child_obj[self.child])}') - yield (location_idx, cur) + ancestors_objs = task[1][self.ancestors] + child_objs = task[1][self.child] + + # Skip named values that are not directly contained by ancestors. + if not ancestors_objs: + return + + if len(ancestors_objs) != 1: + logging.error( + ('Child objects cannot be contained within multiple ancestors!.' + ' task=%s'), + task, + ) + return + + for child_obj in child_objs: + cur = ancestors_objs[0].copy() + for key in child_obj: + if key not in cur: + cur[key] = child_obj[key] + if self.index_by_parent: + location_idx = task[0] + else: + location_idx = (f'{child_obj[self.child].location} -' + f' {log_uid(child_obj[self.child])}') + yield (location_idx, cur) class SimulationStateNamedValuesToObjects(beam.DoFn): - """Converts named value sub-logs within simulation containers into objects. + """Converts named value sub-logs within simulation containers into objects. Attributes: ancestors: Key of the ancestors object within the task dicts. value_objects: Key of the value_objects within the task dicts. """ - def __init__( - self, - ancestors: str, - value_objects: str, - ): - self.ancestors = ancestors - self.value_objects = value_objects + def __init__( + self, + ancestors: str, + value_objects: str, + ): + self.ancestors = ancestors + self.value_objects = value_objects - def process( - self, task: Tuple[str, Dict[str, Sequence[Dict[str, - sight_pb2.Object]]]] - ) -> Iterator[KeyedObjMap]: - """Converts named value sub-logs within simulation containers into values. + def process( + self, task: Tuple[str, Dict[str, Sequence[Dict[str, sight_pb2.Object]]]] + ) -> Iterator[KeyedObjMap]: + """Converts named value sub-logs within simulation containers into values. Args: task: A simulation container and the start of a named object, paired with @@ -440,38 +431,38 @@ def process( Tuples where the first element is the location of the container object and the second maps the container and the value object. """ - # Skip named values that are not directly contained by a simulation - # block (parameters or state). - if not task[1][self.ancestors]: - return - - if len(task[1][self.ancestors]) != 1: - logging.error( - ('Named values sub-logs cannot be contained within multiple named' - ' values or containers!. task=%s'), - task, - ) - return - - if isinstance(task[1][self.ancestors][0], dict): - log_and_obj: ObjMap = task[1][self.ancestors][0].copy() - else: - log_and_obj: ObjMap = {} - log_and_obj['object'] = data_structures.from_log( - [o['object'] for o in task[1][self.value_objects]]) - yield ( - (f'{log_and_obj["named_value"].location} -' - f' {log_uid(log_and_obj["named_value"])}'), - log_and_obj, - ) + # Skip named values that are not directly contained by a simulation + # block (parameters or state). + if not task[1][self.ancestors]: + return + + if len(task[1][self.ancestors]) != 1: + logging.error( + ('Named values sub-logs cannot be contained within multiple named' + ' values or containers!. task=%s'), + task, + ) + return + + if isinstance(task[1][self.ancestors][0], dict): + log_and_obj: ObjMap = task[1][self.ancestors][0].copy() + else: + log_and_obj: ObjMap = {} + log_and_obj['object'] = data_structures.from_log( + [o['object'] for o in task[1][self.value_objects]]) + yield ( + (f'{log_and_obj["named_value"].location} -' + f' {log_uid(log_and_obj["named_value"])}'), + log_and_obj, + ) class NamedObjectsToSequence(beam.DoFn): - """Converts sets of named value objects to time-ordered sequences.""" + """Converts sets of named value objects to time-ordered sequences.""" - def process(self, task: Tuple[Any, - Iterable[AnyObjMap]]) -> Iterator[AnyObjMap]: - """Time-orders the sequence of objects for a given simulation attribute. + def process(self, task: Tuple[Any, + Iterable[AnyObjMap]]) -> Iterator[AnyObjMap]: + """Time-orders the sequence of objects for a given simulation attribute. Args: task: A sequence of objects that describe the state of some simulation @@ -480,45 +471,44 @@ def process(self, task: Tuple[Any, Yields: A time-ordered version of the input sequence. """ - ordered_seq = sorted( - task[1], - key=lambda x: list(x['simulation_time_step'].block_start. - simulation_time_step_start.time_step_index), - ) - ts_indexes = np.array([ - x['simulation_time_step'].block_start.simulation_time_step_start. - time_step_index for x in ordered_seq - ], ) - time_steps = np.array([ - x['simulation_time_step'].block_start.simulation_time_step_start. - time_step for x in ordered_seq - ], ) - values = np.array([x['object'][1] for x in ordered_seq]) - - yield { - 'simulation': ordered_seq[0]['simulation'], - 'cluster_id': ordered_seq[0].get('cluster_id'), - 'variable': ordered_seq[0]['named_value'].block_start.label, - 'values': values, - 'ts_indexes': ts_indexes, - 'time_steps': time_steps, - } + ordered_seq = sorted( + task[1], + key=lambda x: list(x['simulation_time_step'].block_start. + simulation_time_step_start.time_step_index), + ) + ts_indexes = np.array([ + x['simulation_time_step'].block_start.simulation_time_step_start. + time_step_index for x in ordered_seq + ],) + time_steps = np.array([ + x['simulation_time_step'].block_start.simulation_time_step_start. + time_step for x in ordered_seq + ],) + values = np.array([x['object'][1] for x in ordered_seq]) + + yield { + 'simulation': ordered_seq[0]['simulation'], + 'cluster_id': ordered_seq[0].get('cluster_id'), + 'variable': ordered_seq[0]['named_value'].block_start.label, + 'values': values, + 'ts_indexes': ts_indexes, + 'time_steps': time_steps, + } class CombineParametersAndTimeSeries(beam.DoFn): - """Combines the parameters and variable state time series of a simulation.""" + """Combines the parameters and variable state time series of a simulation.""" - def __init__( - self, - params_label: str, - variables_label: str, - ): - self.params_label = params_label - self.variables_label = variables_label + def __init__( + self, + params_label: str, + variables_label: str, + ): + self.params_label = params_label + self.variables_label = variables_label - def process(self, task: Tuple[Any, Dict[str, - List[ObjMap]]]) -> Iterator[Log]: - """Combines the parameters and variable state time series of a simulation. + def process(self, task: Tuple[Any, Dict[str, List[ObjMap]]]) -> Iterator[Log]: + """Combines the parameters and variable state time series of a simulation. Args: task: A sequence of objects that describe the state of some simulation @@ -527,15 +517,15 @@ def process(self, task: Tuple[Any, Dict[str, Yields: A time-ordered version of the input sequence. """ - parameters = list(task[1][self.params_label]) - variables = list(task[1][self.variables_label]) + parameters = list(task[1][self.params_label]) + variables = list(task[1][self.variables_label]) - all_parameters = [p['object'] for p in parameters] + all_parameters = [p['object'] for p in parameters] - for v in variables: - res = v.copy() - res['parameters'] = all_parameters - yield res + for v in variables: + res = v.copy() + res['parameters'] = all_parameters + yield res def combine_parent_and_child( @@ -545,7 +535,7 @@ def combine_parent_and_child( child_pcol: beam.pvalue.PCollection[KeyedObjMap], index_by_parent: bool, ) -> beam.pvalue.PCollection[KeyedObjMap]: - """Joins a parent Objects to child Objects. + """Joins a parent Objects to child Objects. Args: parent_label: identifies the parent PCollection. @@ -562,15 +552,13 @@ def combine_parent_and_child( (the parent Object's location and log_uid). This collection is keyed by the location and log_uid of the child Object. """ - return ( - { - parent_label: parent_pcol, - child_label: child_pcol, - } - | parent_label + ' ' + child_label + ' CoGroupByKey' >> - beam.CoGroupByKey() - | parent_label + ' ' + child_label + ' ParentChildPairs' >> beam.ParDo( - ParentChildPairs(parent_label, child_label, index_by_parent))) + return ({ + parent_label: parent_pcol, + child_label: child_pcol, + } | parent_label + ' ' + child_label + ' CoGroupByKey' >> beam.CoGroupByKey() + | + parent_label + ' ' + child_label + ' ParentChildPairs' >> beam.ParDo( + ParentChildPairs(parent_label, child_label, index_by_parent))) def named_values_to_objects( @@ -579,7 +567,7 @@ def named_values_to_objects( child_label: str, objects_with_ancestors: beam.pvalue.PCollection[KeyedObjMap], ) -> beam.pvalue.PCollection[KeyedObjMap]: - """Converts named value log regions into their corresponding Python objects. + """Converts named value log regions into their corresponding Python objects. Args: parent_label: Unique label (among pipeline stages) for the collection of @@ -594,16 +582,13 @@ def named_values_to_objects( Maps that contain the ST_NAMED_VALUES and their corresponding Python value objects and with the key of the ST_NAMED_VALUES Object. """ - return ( - { - parent_label: parent_pcol, - child_label: objects_with_ancestors, - } - | parent_label + ' ' + child_label + ' CoGroupByKey' >> - beam.CoGroupByKey() - | parent_label + ' ' + child_label + - ' SimulationStateNamedValuesToObjects' >> beam.ParDo( - SimulationStateNamedValuesToObjects(parent_label, child_label))) + return ({ + parent_label: parent_pcol, + child_label: objects_with_ancestors, + } | parent_label + ' ' + child_label + ' CoGroupByKey' >> beam.CoGroupByKey() + | parent_label + ' ' + child_label + + ' SimulationStateNamedValuesToObjects' >> beam.ParDo( + SimulationStateNamedValuesToObjects(parent_label, child_label))) def create_simulation_and_parameter_objects( @@ -614,7 +599,7 @@ def create_simulation_and_parameter_objects( named_value: beam.pvalue.PCollection[KeyedObjMap], log_file_path_prefix: Optional[str], ) -> beam.pvalue.PCollection[AnyObjMap]: - """Combines simulations and their parameter values. + """Combines simulations and their parameter values. Args: log: All log objects. @@ -630,32 +615,32 @@ def create_simulation_and_parameter_objects( AnyObjMaps that contain simulation objects, their contained simulation parameter objects, and the named values of those parameters. """ - simulations_and_parameters = combine_parent_and_child( - 'simulation', - simulation, - 'simulation_parameters', - simulation_parameters, - index_by_parent=False, - ) - - simulation_parameters_and_named_values_key_named_value = ( - combine_parent_and_child( - 'simulations_and_parameters', - simulations_and_parameters, - 'named_value', - named_value, - index_by_parent=False, - )) - - return remove_key( - 'simulation_and_parameter_objects', - named_values_to_objects( - 'simulation_parameters_and_named_values_key_named_value_objects', - simulation_parameters_and_named_values_key_named_value, - 'objects', - objects_with_ancestors, - ), - ) + simulations_and_parameters = combine_parent_and_child( + 'simulation', + simulation, + 'simulation_parameters', + simulation_parameters, + index_by_parent=False, + ) + + simulation_parameters_and_named_values_key_named_value = ( + combine_parent_and_child( + 'simulations_and_parameters', + simulations_and_parameters, + 'named_value', + named_value, + index_by_parent=False, + )) + + return remove_key( + 'simulation_and_parameter_objects', + named_values_to_objects( + 'simulation_parameters_and_named_values_key_named_value_objects', + simulation_parameters_and_named_values_key_named_value, + 'objects', + objects_with_ancestors, + ), + ) def create_simulation_states_params_and_named_value_objects( @@ -666,7 +651,7 @@ def create_simulation_states_params_and_named_value_objects( log_file_path_prefix: Optional[str], ) -> Tuple[beam.pvalue.PCollection[AnyObjMap], beam.pvalue.PCollection[AnyObjMap]]: - """Combines simulation states and the named values within them. + """Combines simulation states and the named values within them. Args: objects_with_ancestors: Objects, keyed by the start locations of any blocks @@ -681,37 +666,35 @@ def create_simulation_states_params_and_named_value_objects( AnyObjMaps that contain simulation state objects and their associated named values. """ - named_value_objects = named_values_to_objects( - 'named_value', - change_key_to_self('named_value_to_key_self', 'named_value', - named_value), - 'objects', - objects_with_ancestors, - ) - named_value_objects_to_key_parent = change_key_to_parent( - 'named_value_objects_to_key_parent', 'named_value', - named_value_objects) - - sim_state_named_values_key_state = combine_parent_and_child( - 'simulation_state', - change_key_to_self('simulation_state_to_key_self', 'simulation_state', - simulation_state), - 'named_value', - named_value_objects_to_key_parent, - index_by_parent=True, - ) - sim_params_named_values_key_params = combine_parent_and_child( - 'simulation_parameters', - change_key_to_self( - 'simulation_parameters_to_key_self', - 'simulation_parameters', - simulation_parameters, - ), - 'named_value', - named_value_objects_to_key_parent, - index_by_parent=True, - ) - return sim_state_named_values_key_state, sim_params_named_values_key_params + named_value_objects = named_values_to_objects( + 'named_value', + change_key_to_self('named_value_to_key_self', 'named_value', named_value), + 'objects', + objects_with_ancestors, + ) + named_value_objects_to_key_parent = change_key_to_parent( + 'named_value_objects_to_key_parent', 'named_value', named_value_objects) + + sim_state_named_values_key_state = combine_parent_and_child( + 'simulation_state', + change_key_to_self('simulation_state_to_key_self', 'simulation_state', + simulation_state), + 'named_value', + named_value_objects_to_key_parent, + index_by_parent=True, + ) + sim_params_named_values_key_params = combine_parent_and_child( + 'simulation_parameters', + change_key_to_self( + 'simulation_parameters_to_key_self', + 'simulation_parameters', + simulation_parameters, + ), + 'named_value', + named_value_objects_to_key_parent, + index_by_parent=True, + ) + return sim_state_named_values_key_state, sim_params_named_values_key_params def create_simulation_time_step_state_objects( @@ -722,7 +705,7 @@ def create_simulation_time_step_state_objects( named_value: beam.pvalue.PCollection[KeyedObjMap], log_file_path_prefix: Optional[str], ) -> beam.pvalue.PCollection[AnyObjMap]: - """Combines simulations and their time step values. + """Combines simulations and their time step values. Args: objects_with_ancestors: Objects, keyed by the start locations of any blocks @@ -739,59 +722,58 @@ def create_simulation_time_step_state_objects( time step objects, the simulation state objects within those and their associated named values. """ - named_value_objects = named_values_to_objects( - 'named_value', - change_key_to_self('named_value_to_key_self', 'named_value', - named_value), - 'objects', - objects_with_ancestors, - ) - - # Connect simulation states to the named values logged within them. - sim_state_named_values_key_state = combine_parent_and_child( - 'simulation_state', - change_key_to_self('simulation_state_to_key_self', 'simulation_state', - simulation_state), - 'named_value', - change_key_to_parent( - 'named_value_objects_to_key_parent', - 'named_value', - named_value_objects, - ), - index_by_parent=True, - ) - - # Connect simulation time steps to their logged states and their named values. - sim_ts_state_named_values_key_state = combine_parent_and_child( - 'simulation_time_step', - change_key_to_self( - 'simulation_time_step_to_key_self', - 'simulation_time_step', - simulation_time_step, - ), - 'sim_state_named_values_key_state', - change_key_to_parent( - 'sim_state_named_values_key_state_to_key_parent', - 'simulation_state', - sim_state_named_values_key_state, - ), - index_by_parent=True, - ) - - # Connect simulations to their timesteps and logged states. - sim_simul_ts_state_named_values_key_state = combine_parent_and_child( - 'simulation', - change_key_to_self('simulation_to_key_self', 'simulation', simulation), - 'simulation_time_step', - change_key_to_parent( - 'sim_ts_state_named_values_key_state_to_key_parent', - 'simulation_time_step', - sim_ts_state_named_values_key_state, - ), - index_by_parent=True, - ) - - return remove_key( - 'sim_simul_ts_state_named_values_key_state', - sim_simul_ts_state_named_values_key_state, - ) + named_value_objects = named_values_to_objects( + 'named_value', + change_key_to_self('named_value_to_key_self', 'named_value', named_value), + 'objects', + objects_with_ancestors, + ) + + # Connect simulation states to the named values logged within them. + sim_state_named_values_key_state = combine_parent_and_child( + 'simulation_state', + change_key_to_self('simulation_state_to_key_self', 'simulation_state', + simulation_state), + 'named_value', + change_key_to_parent( + 'named_value_objects_to_key_parent', + 'named_value', + named_value_objects, + ), + index_by_parent=True, + ) + + # Connect simulation time steps to their logged states and their named values. + sim_ts_state_named_values_key_state = combine_parent_and_child( + 'simulation_time_step', + change_key_to_self( + 'simulation_time_step_to_key_self', + 'simulation_time_step', + simulation_time_step, + ), + 'sim_state_named_values_key_state', + change_key_to_parent( + 'sim_state_named_values_key_state_to_key_parent', + 'simulation_state', + sim_state_named_values_key_state, + ), + index_by_parent=True, + ) + + # Connect simulations to their timesteps and logged states. + sim_simul_ts_state_named_values_key_state = combine_parent_and_child( + 'simulation', + change_key_to_self('simulation_to_key_self', 'simulation', simulation), + 'simulation_time_step', + change_key_to_parent( + 'sim_ts_state_named_values_key_state_to_key_parent', + 'simulation_time_step', + sim_ts_state_named_values_key_state, + ), + index_by_parent=True, + ) + + return remove_key( + 'sim_simul_ts_state_named_values_key_state', + sim_simul_ts_state_named_values_key_state, + ) diff --git a/py/sight/widgets/simulation/apply_lstm_surrogate.py b/py/sight/widgets/simulation/apply_lstm_surrogate.py index ed4c310..4d8487f 100644 --- a/py/sight/widgets/simulation/apply_lstm_surrogate.py +++ b/py/sight/widgets/simulation/apply_lstm_surrogate.py @@ -3,40 +3,33 @@ TODO(bronevet): DO NOT SUBMIT without a detailed description of train_lstm_surrogate. """ +import os from typing import Sequence from absl import app from absl import flags -import numpy as np -import pandas as pd +from google.cloud import bigquery import keras -from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM -import tensorflow as tf +from keras.models import Sequential from keras.preprocessing.sequence import TimeseriesGenerator -import os - -from google.cloud import bigquery +import numpy as np +import pandas as pd +import tensorflow as tf -_PROJECT_ID = flags.DEFINE_string( - 'project_id', os.environ['PROJECT_ID'], "ID of the current GCP project." -) -_LOG_ID = flags.DEFINE_string( - 'log_id', '', "ID of the log being analyzed." -) -_MODEL_IN_PATH = flags.DEFINE_string( - 'model_in_path', '', 'Path where the trained model is stored.' -) +_PROJECT_ID = flags.DEFINE_string('project_id', os.environ['PROJECT_ID'], + "ID of the current GCP project.") +_LOG_ID = flags.DEFINE_string('log_id', '', "ID of the log being analyzed.") +_MODEL_IN_PATH = flags.DEFINE_string('model_in_path', '', + 'Path where the trained model is stored.') _NUM_STEPS = flags.DEFINE_integer( - 'num_steps', 50, 'Number of steps of history in the prediction.' -) -_BATCH_SIZE = flags.DEFINE_integer( - 'batch_size', 128, 'Batch size.' -) + 'num_steps', 50, 'Number of steps of history in the prediction.') +_BATCH_SIZE = flags.DEFINE_integer('batch_size', 128, 'Batch size.') + def build_query(raw_query, params: dict = None): - """Format query using given parameters. + """Format query using given parameters. If no parameters are provided the query is returned as is. @@ -47,21 +40,24 @@ def build_query(raw_query, params: dict = None): Returns: query with parameters inserted """ - query = raw_query - if params is not None: - query = query.format(**params) - return query + query = raw_query + if params is not None: + query = query.format(**params) + return query + def main(argv: Sequence[str]) -> None: if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") current_script_directory = os.path.dirname(os.path.abspath(__file__)) - _SCHEMA_FILE_PATH = os.path.join(current_script_directory, 'simulation_time_series.sql') + _SCHEMA_FILE_PATH = os.path.join(current_script_directory, + 'simulation_time_series.sql') - with open(f'x-sight/py/sight/widgets/simulation/simulation_time_series.sql') as file: + with open(f'x-sight/py/sight/widgets/simulation/simulation_time_series.sql' + ) as file: query_template = file.read() - + query = build_query(query_template, {'log_id': _LOG_ID.value}) print('query=%s' % query) bq_client = bigquery.Client(project=_PROJECT_ID.value) @@ -74,43 +70,43 @@ def main(argv: Sequence[str]) -> None: df = pd.DataFrame(sim_data['values'].to_list()) input_data = df[:-_NUM_STEPS.value] targets = df[_NUM_STEPS.value:] - + cur_dataset = keras.utils.timeseries_dataset_from_array( - input_data, - targets, - sequence_length=_NUM_STEPS.value, + input_data, + targets, + sequence_length=_NUM_STEPS.value, batch_size=_BATCH_SIZE.value) - + if sim_dataset is None: sim_dataset = cur_dataset else: sim_dataset = sim_dataset.concatenate(cur_dataset) - + next_state = [] for i, d in enumerate(sim_dataset): next_state.append(d[1]) next_state = tf.concat(next_state, 0) # next_state = np.array(next_state) # print('next_state(#%d)=%s' % (len(next_state), next_state)) - + model = keras.models.load_model(_MODEL_IN_PATH.value) prediction = np.array(model.predict(sim_dataset)) print('prediction=%s=%s' % (prediction.shape, prediction)) - + error_sum = 0 num_pred = 0 for i, p in enumerate(prediction): - # print ('%s: next_state=%s prediction=%s' % + # print ('%s: next_state=%s prediction=%s' % # ( # np.linalg.norm(next_state[i] - p, ord=2), # next_state[i], p # )) error_sum += np.linalg.norm(next_state[i] - p, ord=2) num_pred += 1 - print ('error = %s' % (error_sum / num_pred)) - - print ('error = %s' % np.linalg.norm(next_state - prediction, ord=2)) - + print('error = %s' % (error_sum / num_pred)) + + print('error = %s' % np.linalg.norm(next_state - prediction, ord=2)) + # model.save(_MODEL_OUT_PATH.value) diff --git a/py/sight/widgets/simulation/bulk_inference.py b/py/sight/widgets/simulation/bulk_inference.py index b4258cd..0a29566 100644 --- a/py/sight/widgets/simulation/bulk_inference.py +++ b/py/sight/widgets/simulation/bulk_inference.py @@ -1,12 +1,12 @@ +from datetime import datetime +import json +import os +import subprocess from typing import Sequence, Tuple from absl import app from absl import flags from helpers.logs.logs_handler import logger as logging -from datetime import datetime -import json -import os -import subprocess _LOG_ID = flags.DEFINE_string( 'log_id', @@ -61,47 +61,47 @@ def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') - with open(_INPUT_META_PATH.value) as f: - meta = json.load(f) - max_input_len = meta['max_input_len'] - max_pred_len = meta['max_pred_len'] + with open(_INPUT_META_PATH.value) as f: + meta = json.load(f) + max_input_len = meta['max_input_len'] + max_pred_len = meta['max_pred_len'] - cmd = [ - '/google/bin/releases/tunelab/public/bulk_inference_jax_on_beam', - f'--input_spec=arrayrecord:/cns/oj-d/home/bronevet/kokua/experiments/bronevet/dataset/simulation_transformer_{_LOG_ID.value}/simulation_transformer_{_LOG_ID.value}/validation/simulation_transformer_{_LOG_ID.value}.array_record-00000-of-00001', - f'--output_spec=arrayrecord:/cns/oj-d/home/bronevet/kokua/experiments/bronevet/dataset/simulation_transformer_{_LOG_ID.value}/simulation_transformer_{_LOG_ID.value}/validation/predictions/model_output.recordio@*', - f'--batch_size={_BATCH_SIZE.value}', - '--extra_inputs=EXTRA_INPUTS:{\'temperature\': 0.0}', - f'--extra_inputs=INPUT_SEQ_LEN:{max_input_len}', - f'--extra_inputs=MAX_DECODE_STEPS:{max_pred_len}', - '--prompt_feature_name=input', - f'--accelerator_priority_range={_PRIORITY.value},{_PRIORITY.value}', - f'--flume_priority={_PRIORITY.value}', - # '--xm_resource_alloc=x/early-pipeline-alloc', - # '--xm_resource_pool=x', - '--run_on_xm', - f'--flume_borg_cells={_CELL.value}', - f'--tpu_borg_cell={_CELL.value}', - # '--charged_alloc=group:x/early-pipeline-alloc', - f'--platform={_PLATFORM_NAME.value}', - f'--topology={_PLATFORM_MESH.value}', - f'--ici_mesh_shape="{_MESH.value}"', - ] - if _CHECKPOINT_PATH.value: - cmd.append(f'--model_checkpoint={_CHECKPOINT_PATH.value}') - elif _TRAINER_XID.value: - cmd.append(f'--trainer_xid={_TRAINER_XID.value}') - print(' '.join(cmd)) - out = subprocess.run( - cmd, - capture_output=True, - check=True, - ) - print(out) + cmd = [ + '/google/bin/releases/tunelab/public/bulk_inference_jax_on_beam', + f'--input_spec=arrayrecord:/cns/oj-d/home/bronevet/kokua/experiments/bronevet/dataset/simulation_transformer_{_LOG_ID.value}/simulation_transformer_{_LOG_ID.value}/validation/simulation_transformer_{_LOG_ID.value}.array_record-00000-of-00001', + f'--output_spec=arrayrecord:/cns/oj-d/home/bronevet/kokua/experiments/bronevet/dataset/simulation_transformer_{_LOG_ID.value}/simulation_transformer_{_LOG_ID.value}/validation/predictions/model_output.recordio@*', + f'--batch_size={_BATCH_SIZE.value}', + '--extra_inputs=EXTRA_INPUTS:{\'temperature\': 0.0}', + f'--extra_inputs=INPUT_SEQ_LEN:{max_input_len}', + f'--extra_inputs=MAX_DECODE_STEPS:{max_pred_len}', + '--prompt_feature_name=input', + f'--accelerator_priority_range={_PRIORITY.value},{_PRIORITY.value}', + f'--flume_priority={_PRIORITY.value}', + # '--xm_resource_alloc=x/early-pipeline-alloc', + # '--xm_resource_pool=x', + '--run_on_xm', + f'--flume_borg_cells={_CELL.value}', + f'--tpu_borg_cell={_CELL.value}', + # '--charged_alloc=group:x/early-pipeline-alloc', + f'--platform={_PLATFORM_NAME.value}', + f'--topology={_PLATFORM_MESH.value}', + f'--ici_mesh_shape="{_MESH.value}"', + ] + if _CHECKPOINT_PATH.value: + cmd.append(f'--model_checkpoint={_CHECKPOINT_PATH.value}') + elif _TRAINER_XID.value: + cmd.append(f'--trainer_xid={_TRAINER_XID.value}') + print(' '.join(cmd)) + out = subprocess.run( + cmd, + capture_output=True, + check=True, + ) + print(out) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/simulation/delta_across_time_steps.py b/py/sight/widgets/simulation/delta_across_time_steps.py index e3be00d..4cd6b1e 100644 --- a/py/sight/widgets/simulation/delta_across_time_steps.py +++ b/py/sight/widgets/simulation/delta_across_time_steps.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Analyze Sight logs to track changes in simulation states across time steps.""" import ast @@ -22,13 +21,12 @@ from absl import logging import apache_beam as beam from apache_beam.coders import ProtoCoder -import pandas as pd - -from proto import sight_pb2 from google3.pipeline.flume.py import runner from google3.pipeline.flume.py.io import capacitorio from google3.pyglib import gfile from google3.pyglib.contrib.gpathlib import gpath_flag +import pandas as pd +from proto import sight_pb2 from sight.proto import example_pb2 from sight.proto import feature_pb2 from sight.widgets.simulation import analysis_utils @@ -36,10 +34,8 @@ _IN_LOG_FILE = flags.DEFINE_list( 'in_log_file', None, - ( - 'Input file(s) that contain the Sight log that documents the simulation' - ' run.' - ), + ('Input file(s) that contain the Sight log that documents the simulation' + ' run.'), required=True, ) @@ -56,9 +52,8 @@ class SimulationTimeSeries(beam.DoFn): """Turns dicts from TimeStepPairDataRow into TensorFlow Example protos.""" - def process( - self, task: Tuple[Any, Dict[str, Any]] - ) -> Iterator[Tuple[int, Any]]: + def process(self, task: Tuple[Any, Dict[str, + Any]]) -> Iterator[Tuple[int, Any]]: if not task[1]['simulation']: return @@ -81,12 +76,10 @@ def process( state_vars = set() for state_named_val in task[1]['simulation_states_and_named_value_objects']: for ancestor_loc in reversed( - state_named_val['simulation_state'].ancestor_start_location - ): + state_named_val['simulation_state'].ancestor_start_location): if ancestor_loc in time_steps: time_steps[ancestor_loc]['objects'][state_named_val['object'][0]] = ( - state_named_val['object'][1] - ) + state_named_val['object'][1]) break state_vars.add(state_named_val['object'][0]) state_vars = list(sorted(state_vars)) @@ -94,21 +87,18 @@ def process( decision_point_vars = None for dp in task[1]['decision_point']: for ancestor_loc in reversed( - dp['decision_point'].ancestor_start_location - ): + dp['decision_point'].ancestor_start_location): if ancestor_loc in time_steps: time_steps[ancestor_loc]['decision_point'] = dp['decision_point'] break if decision_point_vars is None: decision_point_vars = dp[ - 'decision_point' - ].decision_point.choice_params.keys() + 'decision_point'].decision_point.choice_params.keys() decision_point_vars = sorted(decision_point_vars) for do in task[1]['decision_outcome']: for ancestor_loc in reversed( - do['decision_outcome'].ancestor_start_location - ): + do['decision_outcome'].ancestor_start_location): if ancestor_loc in time_steps: time_steps[ancestor_loc]['decision_outcome'] = do['decision_outcome'] break @@ -127,9 +117,8 @@ def process( if last_decision_point_ts: logging.info( 'choice_params=%s', - last_decision_point_ts[ - 'decision_point' - ].decision_point.choice_params, + last_decision_point_ts['decision_point'].decision_point. + choice_params, ) if last_ts: @@ -146,23 +135,15 @@ def process( for k in decision_point_vars: if last_decision_point_ts: inputs_decision[k].append( - float( - last_decision_point_ts[ - 'decision_point' - ].decision_point.choice_params[k] - ) - ) - cur_input[k] = float( - last_decision_point_ts[ - 'decision_point' - ].decision_point.choice_params[k] - ) + float(last_decision_point_ts['decision_point'].decision_point. + choice_params[k])) + cur_input[k] = float(last_decision_point_ts['decision_point']. + decision_point.choice_params[k]) else: inputs_decision[k].append(0) cur_input[k] = 0 outputs_outcome['decision_outcome'].append( - ts['decision_outcome'].decision_outcome.outcome_value - ) + ts['decision_outcome'].decision_outcome.outcome_value) cur_output[k] = ts['decision_outcome'].decision_outcome.outcome_value yield 0, ( @@ -175,19 +156,16 @@ def process( last_ts = ts yield 1, { - 'inputs_params': pd.DataFrame(data=inputs_params, columns=param_vars), - 'inputs_state_vars': pd.DataFrame( - data=inputs_state_vars, columns=state_vars - ), - 'inputs_decision': pd.DataFrame( - data=inputs_decision, columns=decision_point_vars - ), - 'outputs_state_vars': pd.DataFrame( - data=outputs_state_vars, columns=state_vars - ), - 'outputs_outcome': pd.DataFrame( - data=outputs_outcome, columns=['decision_outcome'] - ), + 'inputs_params': + pd.DataFrame(data=inputs_params, columns=param_vars), + 'inputs_state_vars': + pd.DataFrame(data=inputs_state_vars, columns=state_vars), + 'inputs_decision': + pd.DataFrame(data=inputs_decision, columns=decision_point_vars), + 'outputs_state_vars': + pd.DataFrame(data=outputs_state_vars, columns=state_vars), + 'outputs_outcome': + pd.DataFrame(data=outputs_outcome, columns=['decision_outcome']), } @@ -217,8 +195,7 @@ def _dict_to_ex(self, data: Dict[str, float]) -> example_pb2.Example: feature = feature_pb2.Features() for key, value in data.items(): feature.feature[key].CopyFrom( - feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[value])) - ) + feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[value]))) return example_pb2.Example(features=feature) def process( @@ -239,17 +216,12 @@ def main(argv): raise app.UsageError('Too many command-line arguments.') root = beam.Pipeline( - runner=runner.FlumeRunner() - ) # beam.runners.DirectRunner()) + runner=runner.FlumeRunner()) # beam.runners.DirectRunner()) if gfile.Glob(str(_OUT_FILE.value) + '.simulation_time_series*'): - simulation_time_series = ( - root - | beam.io.textio.ReadFromText( - str(_OUT_FILE.value) + '.simulation_time_series*' - ) - | beam.Map(ast.literal_eval) - ) + simulation_time_series = (root | beam.io.textio.ReadFromText( + str(_OUT_FILE.value) + '.simulation_time_series*') | + beam.Map(ast.literal_eval)) else: reads = [] for file_path in _IN_LOG_FILE.value: @@ -265,32 +237,23 @@ def main(argv): ) if gfile.Glob(cur_file_path): reads.append( - root - | f'Read {cur_file_path}' - >> capacitorio.ReadFromCapacitor( - cur_file_path, ['*'], ProtoCoder(sight_pb2.Object) - ) - ) + root | + f'Read {cur_file_path}' >> capacitorio.ReadFromCapacitor( + cur_file_path, ['*'], ProtoCoder(sight_pb2.Object))) else: if not file_path.endswith('.capacitor'): file_path = f'{file_path}.capacitor' logging.info('file_path=%s', file_path) - reads.append( - root - | f'Read {file_path}' - >> capacitorio.ReadFromCapacitor( - file_path, ['*'], ProtoCoder(sight_pb2.Object) - ) - ) + reads.append(root | + f'Read {file_path}' >> capacitorio.ReadFromCapacitor( + file_path, ['*'], ProtoCoder(sight_pb2.Object))) log = reads | beam.Flatten() objects_with_ancestors = log | beam.ParDo( - analysis_utils.ExtractAncestorBlockStartLocations() - ) + analysis_utils.ExtractAncestorBlockStartLocations()) simulation = analysis_utils.block_start_objects_key_self( - log, sight_pb2.BlockStart.ST_SIMULATION, 'simulation' - ) + log, sight_pb2.BlockStart.ST_SIMULATION, 'simulation') simulation_parameters = analysis_utils.block_start_objects_key_parent( log, sight_pb2.BlockStart.ST_SIMULATION_PARAMETERS, @@ -306,30 +269,24 @@ def main(argv): ) simulation_state = analysis_utils.block_start_objects_key_parent( - log, sight_pb2.BlockStart.ST_SIMULATION_STATE, 'simulation_state' - ) + log, sight_pb2.BlockStart.ST_SIMULATION_STATE, 'simulation_state') named_value = analysis_utils.block_start_objects_key_parent( - log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value' - ) + log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value') decision_point = analysis_utils.objs_with_ancestor_keys( analysis_utils.single_objects_key_parent( - log, sight_pb2.Object.ST_DECISION_POINT, 'decision_point' - ), + log, sight_pb2.Object.ST_DECISION_POINT, 'decision_point'), 'decision_point', ) _ = decision_point | 'decision_point' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.decision_point' - ) + str(_OUT_FILE.value) + '.decision_point') decision_outcome = analysis_utils.objs_with_ancestor_keys( analysis_utils.single_objects_key_parent( - log, sight_pb2.Object.ST_DECISION_OUTCOME, 'decision_outcome' - ), + log, sight_pb2.Object.ST_DECISION_OUTCOME, 'decision_outcome'), 'decision_outcome', ) _ = decision_outcome | 'decision_outcome' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.decision_outcome' - ) + str(_OUT_FILE.value) + '.decision_outcome') ( simulation_states_and_named_value_objects, @@ -342,50 +299,38 @@ def main(argv): str(_OUT_FILE.value), ) - simulation_time_series = ( - { - 'simulation': simulation, - 'simulation_time_step': simulation_time_step, - 'decision_point': decision_point, - 'decision_outcome': decision_outcome, - 'simulation_states_and_named_value_objects': ( - analysis_utils.objs_with_ancestor_keys( - simulation_states_and_named_value_objects, - 'simulation_state', - ) - ), - 'simulation_params_and_named_value_objects': ( - analysis_utils.objs_with_ancestor_keys( - simulation_params_and_named_value_objects, - 'simulation_parameters', - ) - ), - } - | 'simulations_and_contents CoGroupByKey' >> beam.CoGroupByKey() - | beam.ParDo(SimulationTimeSeries()) - | beam.GroupByKey() - ) - - _ = ( - simulation_time_series - | 'simulation_time_series' - >> beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_time_series') - ) - - _ = ( - simulation_time_series - | beam.Filter(lambda x: x[0] == 0) - | beam.ParDo(TimeSeriesToTfExample()) - | capacitorio.WriteToCapacitor( - str(_OUT_FILE.value) + '.examples', ProtoCoder(sight_pb2.Object) - ) - ) - - _ = ( - simulation_time_series - | beam.Filter(lambda x: x[0] == 1) - | beam.ParDo(GatherAllTimeSeriesToCsv()) - ) + simulation_time_series = ({ + 'simulation': + simulation, + 'simulation_time_step': + simulation_time_step, + 'decision_point': + decision_point, + 'decision_outcome': + decision_outcome, + 'simulation_states_and_named_value_objects': + (analysis_utils.objs_with_ancestor_keys( + simulation_states_and_named_value_objects, + 'simulation_state', + )), + 'simulation_params_and_named_value_objects': + (analysis_utils.objs_with_ancestor_keys( + simulation_params_and_named_value_objects, + 'simulation_parameters', + )), + } | 'simulations_and_contents CoGroupByKey' >> beam.CoGroupByKey() | + beam.ParDo(SimulationTimeSeries()) | + beam.GroupByKey()) + + _ = (simulation_time_series | 'simulation_time_series' >> + beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_time_series')) + + _ = (simulation_time_series | beam.Filter(lambda x: x[0] == 0) | + beam.ParDo(TimeSeriesToTfExample()) | capacitorio.WriteToCapacitor( + str(_OUT_FILE.value) + '.examples', ProtoCoder(sight_pb2.Object))) + + _ = (simulation_time_series | beam.Filter(lambda x: x[0] == 1) | + beam.ParDo(GatherAllTimeSeriesToCsv())) results = root.run() results.wait_until_finish() diff --git a/py/sight/widgets/simulation/fine_tune_gemini.py b/py/sight/widgets/simulation/fine_tune_gemini.py index b55a2d3..3a2d024 100644 --- a/py/sight/widgets/simulation/fine_tune_gemini.py +++ b/py/sight/widgets/simulation/fine_tune_gemini.py @@ -1,12 +1,12 @@ +from datetime import datetime +import json +import os +import subprocess from typing import Sequence, Tuple from absl import app from absl import flags from helpers.logs.logs_handler import logger as logging -from datetime import datetime -import json -import os -import subprocess _LOG_ID = flags.DEFINE_string( 'log_id', @@ -92,20 +92,20 @@ def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') - date = datetime.today().strftime('%Y-%m-%d.%H:%M:%S') - dataset_id = f'simulation_transformer_{_LOG_ID.value}' - output_path = f'{_BASE_OUTPUT_PATH.value}/experiments/{os.environ["USER"]}/{dataset_id}/{_VARIANT.value}/{date}' + date = datetime.today().strftime('%Y-%m-%d.%H:%M:%S') + dataset_id = f'simulation_transformer_{_LOG_ID.value}' + output_path = f'{_BASE_OUTPUT_PATH.value}/experiments/{os.environ["USER"]}/{dataset_id}/{_VARIANT.value}/{date}' - with open(_INPUT_META_PATH.value) as f: - meta = json.load(f) - max_input_len = meta['max_input_len'] - max_pred_len = meta['max_pred_len'] + with open(_INPUT_META_PATH.value) as f: + meta = json.load(f) + max_input_len = meta['max_input_len'] + max_pred_len = meta['max_pred_len'] - with open('/tmp/mixtures.textproto', 'w') as f: - f.write(f""" + with open('/tmp/mixtures.textproto', 'w') as f: + f.write(f""" # proto-file: google3/learning/language/tunelab/tunekit/api/common/proto/task.proto # proto-message: Task @@ -125,39 +125,39 @@ def main(argv: Sequence[str]) -> None: label_key: "pred" }} """) - cmd = [ - '/google/bin/releases/tunelab/public/finetune', - f'--family={_FAMILY.value}', - f'--variant={_VARIANT.value}', - f'--task_proto_data_path=/tmp/mixtures.textproto', - f'--train_dataset_name={dataset_id}', - f'--eval_dataset_name={dataset_id}', - f'--output_dir={output_path}', - f'--batch_size={_BATCH_SIZE.value}', - f'--inputs_length={max_input_len}', - f'--targets_length={max_pred_len}', - f'--num_train_steps={_NUM_TRAIN_STEPS.value}', - f'--eval_interval_steps={_NUM_EVAL_STEPS.value}', - f'--save_interval_steps={_SAVE_INTERVAL_STEPS.value}', - f'--learning_rate={_LEARNING_RATE.value}', - f'--xm_resource_alloc=x/early-pipeline-alloc', - f'--xm_resource_pool=x', - f'--priority={_PRIORITY.value}', - f'--learning_rate=0.0005', - f'--cell={_CELL.value}', - f'--platform={_PLATFORM_NAME.value}_{_PLATFORM_MESH.value}', - f'--mesh={_MESH.value}', - ] - if _CHECKPOINT_PATH.value: - cmd.append(f'--checkpoint_path={_CHECKPOINT_PATH.value}') - print(' '.join(cmd)) - out = subprocess.run( - cmd, - capture_output=True, - # check=True, - ) - print(out) + cmd = [ + '/google/bin/releases/tunelab/public/finetune', + f'--family={_FAMILY.value}', + f'--variant={_VARIANT.value}', + f'--task_proto_data_path=/tmp/mixtures.textproto', + f'--train_dataset_name={dataset_id}', + f'--eval_dataset_name={dataset_id}', + f'--output_dir={output_path}', + f'--batch_size={_BATCH_SIZE.value}', + f'--inputs_length={max_input_len}', + f'--targets_length={max_pred_len}', + f'--num_train_steps={_NUM_TRAIN_STEPS.value}', + f'--eval_interval_steps={_NUM_EVAL_STEPS.value}', + f'--save_interval_steps={_SAVE_INTERVAL_STEPS.value}', + f'--learning_rate={_LEARNING_RATE.value}', + f'--xm_resource_alloc=x/early-pipeline-alloc', + f'--xm_resource_pool=x', + f'--priority={_PRIORITY.value}', + f'--learning_rate=0.0005', + f'--cell={_CELL.value}', + f'--platform={_PLATFORM_NAME.value}_{_PLATFORM_MESH.value}', + f'--mesh={_MESH.value}', + ] + if _CHECKPOINT_PATH.value: + cmd.append(f'--checkpoint_path={_CHECKPOINT_PATH.value}') + print(' '.join(cmd)) + out = subprocess.run( + cmd, + capture_output=True, + # check=True, + ) + print(out) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/simulation/generate_log_trans_dataset.py b/py/sight/widgets/simulation/generate_log_trans_dataset.py index 5eaf9cd..8cfdabc 100644 --- a/py/sight/widgets/simulation/generate_log_trans_dataset.py +++ b/py/sight/widgets/simulation/generate_log_trans_dataset.py @@ -1,14 +1,14 @@ +import csv +from dataclasses import dataclass +import json +import random as rn from typing import Sequence, Tuple from absl import app from absl import flags from helpers.logs.logs_handler import logger as logging -import csv -from dataclasses import dataclass -import json import numpy as np import pandas as pd -import random as rn _INPUT_PATH = flags.DEFINE_string( 'input_path', @@ -54,72 +54,72 @@ @dataclass class Dataset: - train_df: pd.DataFrame - train_data_df: pd.DataFrame - validate_df: pd.DataFrame - validate_data_df: pd.DataFrame - max_input_len: int - max_pred_len: int + train_df: pd.DataFrame + train_data_df: pd.DataFrame + validate_df: pd.DataFrame + validate_data_df: pd.DataFrame + max_input_len: int + max_pred_len: int def generate_prediction(row: np.ndarray, columns: Sequence[str]) -> str: - """Returns the representation of row to use as the string the transformer will predict. + """Returns the representation of row to use as the string the transformer will predict. Arguments: row: The data row, containing data for all the columns. columns: The names of the columns, one for each row element. Their names include the prefix 'autoreg:', 'boundary:' or 'initial:' to indicate their role in the simulation. """ - data = [] - for i, c in enumerate(columns): - if c.startswith('autoreg:'): - data.append(str(row[i])) - return ' '.join(data) + data = [] + for i, c in enumerate(columns): + if c.startswith('autoreg:'): + data.append(str(row[i])) + return ' '.join(data) def generate_input(rows: Sequence[np.ndarray], next_row: np.ndarray, columns: Sequence[str]) -> str: - """Returns the representation of rows to use as the string the transformer will take as input. + """Returns the representation of rows to use as the string the transformer will take as input. Arguments: rows: The data rows, containing data for all columns. columns: The names of the columns, one for each row element. Their names include the prefix 'autoreg:', 'boundary:' or 'initial:' to indicate their role in the simulation. """ - # print('rows: ', type(rows)) - # print('columns: ', type(columns)) - out = '' - for row_idx, row in enumerate(rows): - if row_idx == 0: - out += 'initial:' - # print('row=', row) - # print('columns=', columns) - for i, c in enumerate(columns): - # print(i,': ', i, ' row[i]=', row[i], ' c=', c) - if c.startswith('initial:'): - out += ' ' + str(row[i]) - out += ', ' - else: - out += '| ' - out += 'boundary:' - for i, c in enumerate(columns): - if c.startswith('boundary:'): - out += ' ' + str(row[i]) - out += ', autoreg:' - for i, c in enumerate(columns): - if c.startswith('autoreg:'): - out += ' ' + str(row[i]) - out += '| boundary:' + # print('rows: ', type(rows)) + # print('columns: ', type(columns)) + out = '' + for row_idx, row in enumerate(rows): + if row_idx == 0: + out += 'initial:' + # print('row=', row) + # print('columns=', columns) + for i, c in enumerate(columns): + # print(i,': ', i, ' row[i]=', row[i], ' c=', c) + if c.startswith('initial:'): + out += ' ' + str(row[i]) + out += ', ' + else: + out += '| ' + out += 'boundary:' for i, c in enumerate(columns): - if c.startswith('boundary:'): - out += ' ' + str(next_row[i]) + if c.startswith('boundary:'): + out += ' ' + str(row[i]) + out += ', autoreg:' + for i, c in enumerate(columns): + if c.startswith('autoreg:'): + out += ' ' + str(row[i]) + out += '| boundary:' + for i, c in enumerate(columns): + if c.startswith('boundary:'): + out += ' ' + str(next_row[i]) - return out + return out def build_dataset(sim_log: pd.DataFrame, hist_len: int, train_frac: float) -> Dataset: - """Loads the simulation log dataset and splits it into a training and a validation set. + """Loads the simulation log dataset and splits it into a training and a validation set. Arguments: sim_log: The full log that contains the time series of all simiulation runs. @@ -130,98 +130,98 @@ def build_dataset(sim_log: pd.DataFrame, hist_len: int, Returns: The training and validation datasets, each of which has columns input and target. """ - simulations = sim_log.groupby(['sim_location']) - - train_inputs = [] - train_preds = [] - train_data = [] - validate_inputs = [] - validate_preds = [] - validate_data = [] - max_input_len = 0 - max_pred_len = 0 - - for _, sim_log in simulations: - if rn.random() < train_frac: - inputs = train_inputs - preds = train_preds - data = train_data - else: - inputs = validate_inputs - preds = validate_preds - data = validate_data - - hist = [] - data_columns = list(sim_log.columns[3:]) - for idx in range(sim_log.shape[0]): - cur_row = sim_log.iloc[idx].values.astype(str) - data.append(cur_row) - # logging.info('inputs(#%d)=%s', len(cur_row), cur_row) - if len(hist) == hist_len: - # next_input = ' '.join(hist) - input = generate_input(hist, cur_row[3:], data_columns) - prediction = generate_prediction(cur_row[3:], data_columns) - - max_input_len = max(max_input_len, len(input)) - inputs.append(input) - - max_pred_len = max(max_pred_len, len(prediction)) - preds.append(prediction) - - hist.pop(0) - hist.append(cur_row[3:]) - # logging.info('inputs(#%d)=%s', len(inputs), inputs) - # logging.info('preds(#%d)=%s', len(preds), preds) - - train_df = pd.DataFrame({ - 'input': train_inputs, - 'pred': train_preds, - }) - - validate_df = pd.DataFrame({ - 'input': validate_inputs, - 'pred': validate_preds, - }) - - return Dataset( - train_df, - pd.DataFrame(train_data), - validate_df, - pd.DataFrame(validate_data), - max_input_len, - max_pred_len, - ) + simulations = sim_log.groupby(['sim_location']) + + train_inputs = [] + train_preds = [] + train_data = [] + validate_inputs = [] + validate_preds = [] + validate_data = [] + max_input_len = 0 + max_pred_len = 0 + + for _, sim_log in simulations: + if rn.random() < train_frac: + inputs = train_inputs + preds = train_preds + data = train_data + else: + inputs = validate_inputs + preds = validate_preds + data = validate_data + + hist = [] + data_columns = list(sim_log.columns[3:]) + for idx in range(sim_log.shape[0]): + cur_row = sim_log.iloc[idx].values.astype(str) + data.append(cur_row) + # logging.info('inputs(#%d)=%s', len(cur_row), cur_row) + if len(hist) == hist_len: + # next_input = ' '.join(hist) + input = generate_input(hist, cur_row[3:], data_columns) + prediction = generate_prediction(cur_row[3:], data_columns) + + max_input_len = max(max_input_len, len(input)) + inputs.append(input) + + max_pred_len = max(max_pred_len, len(prediction)) + preds.append(prediction) + + hist.pop(0) + hist.append(cur_row[3:]) + # logging.info('inputs(#%d)=%s', len(inputs), inputs) + # logging.info('preds(#%d)=%s', len(preds), preds) + + train_df = pd.DataFrame({ + 'input': train_inputs, + 'pred': train_preds, + }) + + validate_df = pd.DataFrame({ + 'input': validate_inputs, + 'pred': validate_preds, + }) + + return Dataset( + train_df, + pd.DataFrame(train_data), + validate_df, + pd.DataFrame(validate_data), + max_input_len, + max_pred_len, + ) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - dataset = build_dataset( - sim_log=pd.read_csv(_INPUT_PATH.value), - hist_len=_HIST_LEN.value, - train_frac=_TRAIN_FRAC.value, - ) - dataset.train_df.to_csv(_OUTPUT_PATH_TRAIN.value, - index=False, - quoting=csv.QUOTE_ALL) - dataset.train_data_df.to_csv(_OUTPUT_PATH_TRAIN_DATA.value, - index=False, - quoting=csv.QUOTE_ALL) - dataset.validate_df.to_csv(_OUTPUT_PATH_VAL.value, + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + dataset = build_dataset( + sim_log=pd.read_csv(_INPUT_PATH.value), + hist_len=_HIST_LEN.value, + train_frac=_TRAIN_FRAC.value, + ) + dataset.train_df.to_csv(_OUTPUT_PATH_TRAIN.value, + index=False, + quoting=csv.QUOTE_ALL) + dataset.train_data_df.to_csv(_OUTPUT_PATH_TRAIN_DATA.value, + index=False, + quoting=csv.QUOTE_ALL) + dataset.validate_df.to_csv(_OUTPUT_PATH_VAL.value, + index=False, + quoting=csv.QUOTE_ALL) + dataset.train_data_df.to_csv(_OUTPUT_PATH_VAL_DATA.value, index=False, quoting=csv.QUOTE_ALL) - dataset.train_data_df.to_csv(_OUTPUT_PATH_VAL_DATA.value, - index=False, - quoting=csv.QUOTE_ALL) - with open(_OUTPUT_META_PATH.value, 'w') as f: - json.dump( - { - 'max_input_len': dataset.max_input_len, - 'max_pred_len': dataset.max_pred_len, - }, f) + with open(_OUTPUT_META_PATH.value, 'w') as f: + json.dump( + { + 'max_input_len': dataset.max_input_len, + 'max_pred_len': dataset.max_pred_len, + }, f) if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/simulation/load_log_time_series.py b/py/sight/widgets/simulation/load_log_time_series.py index 12a4386..284dd76 100644 --- a/py/sight/widgets/simulation/load_log_time_series.py +++ b/py/sight/widgets/simulation/load_log_time_series.py @@ -4,16 +4,16 @@ train_darts_surrogate. """ +import glob import os -from typing import Optional, Dict, Sequence, Tuple +import os.path +import subprocess +from typing import Dict, Optional, Sequence, Tuple from absl import app from absl import flags -import glob from google.cloud import bigquery import pandas as pd -import os.path -import subprocess _LOG_ID = flags.DEFINE_string( 'log_id', @@ -25,9 +25,8 @@ '', 'Path of the file where the run simulation time series will be stored.', ) -_PROJECT_ID = flags.DEFINE_string( - 'project_id', os.environ['PROJECT_ID'], 'ID of the current GCP project.' -) +_PROJECT_ID = flags.DEFINE_string('project_id', os.environ['PROJECT_ID'], + 'ID of the current GCP project.') def build_query(raw_query, params: dict = None): @@ -48,11 +47,13 @@ def build_query(raw_query, params: dict = None): print(query) return query -def run_query(query_file_name: str, bq_client: bigquery.Client, params: Dict=dict()) -> pd.DataFrame: + +def run_query( + query_file_name: str, bq_client: bigquery.Client, + params: Dict = dict()) -> pd.DataFrame: current_script_directory = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.join( - current_script_directory, query_file_name+'.sql' - )) as file: + with open(os.path.join(current_script_directory, + query_file_name + '.sql')) as file: query_template = file.read() return bq_client.query( @@ -61,6 +62,7 @@ def run_query(query_file_name: str, bq_client: bigquery.Client, params: Dict=dic {'log_id': _LOG_ID.value} | params, )).to_dataframe() + def load_table(table_name: str, bq_client: bigquery.Client) -> pd.DataFrame: """Queries a table and returns a DataFrame with its contents. @@ -68,45 +70,54 @@ def load_table(table_name: str, bq_client: bigquery.Client) -> pd.DataFrame: table_name: The fully-qualified name of the table to be read. bq_client: Object via which the BigQuery API will be accessed. """ - return bq_client.query( - build_query( - f'SELECT * FROM `{table_name}`', - )).to_dataframe() + return bq_client.query(build_query( + f'SELECT * FROM `{table_name}`',)).to_dataframe() + - def main(argv: Sequence[str]) -> None: if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') bq_client = bigquery.Client(project=_PROJECT_ID.value) - if os.path.isfile(_OUTFILE.value+'.states'): - states = pd.read_csv(_OUTFILE.value+'.states') - variables = pd.read_csv(_OUTFILE.value+'.variables') + if os.path.isfile(_OUTFILE.value + '.states'): + states = pd.read_csv(_OUTFILE.value + '.states') + variables = pd.read_csv(_OUTFILE.value + '.variables') else: for type in ['autoreg', 'boundary', 'initial']: run_query(f'sim_named_{type}_var', bq_client) run_query('sim_value', bq_client, {'type': type}) run_query('sim_all_vars', bq_client, {'type': type}) - - autoreg_variables = load_table(f'cameltrain.sight_logs.{_LOG_ID.value}_autoreg_all_vars_log', bq_client)['label'].tolist() - boundary_variables = load_table(f'cameltrain.sight_logs.{_LOG_ID.value}_boundary_all_vars_log', bq_client)['label'].tolist() - initial_variables = load_table(f'cameltrain.sight_logs.{_LOG_ID.value}_initial_all_vars_log', bq_client)['label'].tolist() - print('autoreg_variables(#%d)=%s' % (len(autoreg_variables), autoreg_variables)) - print('boundary_variables(#%d)=%s' % (len(boundary_variables), boundary_variables)) - print('initial_variables(#%d)=%s' % (len(initial_variables), initial_variables)) + autoreg_variables = load_table( + f'cameltrain.sight_logs.{_LOG_ID.value}_autoreg_all_vars_log', + bq_client)['label'].tolist() + boundary_variables = load_table( + f'cameltrain.sight_logs.{_LOG_ID.value}_boundary_all_vars_log', + bq_client)['label'].tolist() + initial_variables = load_table( + f'cameltrain.sight_logs.{_LOG_ID.value}_initial_all_vars_log', + bq_client)['label'].tolist() + print('autoreg_variables(#%d)=%s' % + (len(autoreg_variables), autoreg_variables)) + print('boundary_variables(#%d)=%s' % + (len(boundary_variables), boundary_variables)) + print('initial_variables(#%d)=%s' % + (len(initial_variables), initial_variables)) for type in ['autoreg', 'boundary', 'initial']: run_query('sim_unordered_time_series', bq_client, {'type': type}) - run_query('sim_ordered_time_series', bq_client, {'num_autoreg_vars': len(autoreg_variables), - 'num_boundary_vars': len(boundary_variables), - 'num_initial_vars': len(initial_variables), - }) - + run_query( + 'sim_ordered_time_series', bq_client, { + 'num_autoreg_vars': len(autoreg_variables), + 'num_boundary_vars': len(boundary_variables), + 'num_initial_vars': len(initial_variables), + }) + extract_job = bq_client.extract_table( - bigquery.DatasetReference(_PROJECT_ID.value, 'sight_logs').table(f'{_LOG_ID.value}_simulation_ordered_time_series_log'), + bigquery.DatasetReference(_PROJECT_ID.value, 'sight_logs').table( + f'{_LOG_ID.value}_simulation_ordered_time_series_log'), f'gs://{_PROJECT_ID.value}-sight/sight-logs/{_LOG_ID.value}.sim_ordered_time_series.*.csv', # Location must match that of the source table. location="US", @@ -114,39 +125,46 @@ def main(argv: Sequence[str]) -> None: extract_job.result() # Waits for job to complete. out = subprocess.run( - ['gsutil', 'cp', - f'gs://{_PROJECT_ID.value}-sight/sight-logs/*{_LOG_ID.value}.sim_ordered_time_series.*.csv', - '/tmp'], + [ + 'gsutil', 'cp', + f'gs://{_PROJECT_ID.value}-sight/sight-logs/*{_LOG_ID.value}.sim_ordered_time_series.*.csv', + '/tmp' + ], capture_output=True, # check=True, ) print(out) - + time_series = [] - for i, ts_file in enumerate(glob.glob(f'/tmp/*{_LOG_ID.value}.sim_ordered_time_series.*.csv')): + for i, ts_file in enumerate( + glob.glob(f'/tmp/*{_LOG_ID.value}.sim_ordered_time_series.*.csv')): print(ts_file) # print(pd.read_csv(ts_file)) cur_ts = pd.read_csv(ts_file) # cur_ts.set_index(['sim_location', 'time_step_index'], inplace=True) # print('cur_ts.dtypes=', cur_ts.dtypes) - cur_ts[[f'autoreg:{v}' for v in autoreg_variables]] = cur_ts['autoreg_values'].str.split(',', expand=True) + cur_ts[[f'autoreg:{v}' for v in autoreg_variables + ]] = cur_ts['autoreg_values'].str.split(',', expand=True) cur_ts.drop(columns='autoreg_values', inplace=True) - cur_ts[[f'boundary:{v}' for v in boundary_variables]] = cur_ts['boundary_values'].str.split(',', expand=True) + cur_ts[[f'boundary:{v}' for v in boundary_variables + ]] = cur_ts['boundary_values'].str.split(',', expand=True) cur_ts.drop(columns='boundary_values', inplace=True) - cur_ts[[f'initial:{v}' for v in initial_variables]] = cur_ts['initial_values'].str.split(',', expand=True) + cur_ts[[f'initial:{v}' for v in initial_variables + ]] = cur_ts['initial_values'].str.split(',', expand=True) cur_ts.drop(columns='initial_values', inplace=True) - + # tree_species_dummies = pd.get_dummies(cur_ts['Tree Species'], columns=['TreeSpecies', ]) # cur_ts.drop(columns='Tree Species', inplace=True) # cur_ts = pd.concat([cur_ts, tree_species_dummies], axis=1) - print('cur_ts.columns=%s=%s' %(len(cur_ts.columns), cur_ts.columns.tolist())) - cur_ts.to_csv(_OUTFILE.value+'.'+str(i), index=False) + print('cur_ts.columns=%s=%s' % + (len(cur_ts.columns), cur_ts.columns.tolist())) + cur_ts.to_csv(_OUTFILE.value + '.' + str(i), index=False) print('cur_ts=', cur_ts) time_series.append(cur_ts) ts_df = pd.concat(time_series, axis=0) # ts_df[variables] = ts_df['values'].str.split(',', expand=True) - + print(ts_df) # return @@ -155,14 +173,14 @@ def main(argv: Sequence[str]) -> None: # 'SELECT * FROM `cameltrain.sight_logs.{log_id}_simulation_ordered_time_series_log`', # {'log_id': _LOG_ID.value}, # )).to_dataframe() - + # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', 1) # print('states=%s' % states) if _OUTFILE.value: ts_df.reset_index().to_csv(_OUTFILE.value, index=False) # variables.to_csv(_OUTFILE.value+'.variables', index=False) - + # value_types = pd.DataFrame(states['value_types'].tolist(), columns=variables).reset_index() # double_values = pd.DataFrame(states['double_values'].tolist(), columns=variables).reset_index() # string_values = pd.DataFrame(states['string_values'].tolist(), columns=variables).reset_index() @@ -190,5 +208,6 @@ def main(argv: Sequence[str]) -> None: # if _OUTFILE.value: # states.to_csv(_OUTFILE.value, index=False) + if __name__ == '__main__': app.run(main) diff --git a/py/sight/widgets/simulation/run_trace.py b/py/sight/widgets/simulation/run_trace.py index 8e389b2..4843d81 100644 --- a/py/sight/widgets/simulation/run_trace.py +++ b/py/sight/widgets/simulation/run_trace.py @@ -17,15 +17,16 @@ from absl import app from absl import flags -from helpers.logs.logs_handler import logger as logging import apache_beam as beam -import numpy as np - -from google3.analysis.dremel.core.capacitor.public.python import pywrap_record_reader +from google3.analysis.dremel.core.capacitor.public.python import ( + pywrap_record_reader +) from google3.pipeline.flume.py import runner from google3.pipeline.flume.py.io import capacitorio from google3.pyglib import gfile from google3.pyglib.contrib.gpathlib import gpath_flag +from helpers.logs.logs_handler import logger as logging +import numpy as np from sight import data_structures from sight.attribute import Attribute from sight.proto import sight_pb2 @@ -63,18 +64,18 @@ class LogVarSequence(beam.DoFn): - """Converts sets of named value objects to time-ordered sequences.""" + """Converts sets of named value objects to time-ordered sequences.""" - def __init__( - self, - file_name_prefix: str, - ): - self.file_name_prefix = file_name_prefix + def __init__( + self, + file_name_prefix: str, + ): + self.file_name_prefix = file_name_prefix - def process( - self, task: Tuple[Any, Iterable[Any]] - ) -> Iterator[Tuple[str, Tuple[str, List[numpy_sight.LabeledNpArray]]]]: - """Time-orders the sequence of objects for a given simulation attribute. + def process( + self, task: Tuple[Any, Iterable[Any]] + ) -> Iterator[Tuple[str, Tuple[str, List[numpy_sight.LabeledNpArray]]]]: + """Time-orders the sequence of objects for a given simulation attribute. Args: task: A sequence of objects that describe the state of some simulation @@ -83,128 +84,124 @@ def process( Yields: A time-ordered version of the input sequence. """ - variables = list(task[1]) - logging.info('LogVarSequence task=%s', task) - logging.info('LogVarSequence variables=%s', list(task[1])) - - # Group variables by their cluster - cluster_to_variables = {} - for v in variables: - cluster_id = data_structures.from_ordered_log([v['cluster_id']]) - if cluster_id not in cluster_to_variables: - cluster_to_variables[cluster_id] = [] - cluster_to_variables[cluster_id].append(v) - - logging.info('LogVarSequence cluster_to_variables=%s', - cluster_to_variables) - cluster_vars = [] - for cluster_id in sorted(cluster_to_variables): - cluster_variables = cluster_to_variables[cluster_id] - all_parameter_values = {} - for v in cluster_variables: - for p in v['parameters']: - if p[0] not in all_parameter_values: - all_parameter_values[p[0]] = set() - all_parameter_values[p[0]].add(p[1]) - logging.info('all_parameter_values=%s', all_parameter_values) - - varied_parameters = set() - for param_name, values in all_parameter_values.items(): - if len(values) > 1: - varied_parameters.add(param_name) - logging.info('varied_parameters=%s', varied_parameters) - ordered_varied_parameters = sorted(varied_parameters) - - all_time_steps = {} - for v in cluster_variables: - logging.info('v["time_steps"]=%s', v['time_steps']) - for ts in v['time_steps']: - if ts not in all_time_steps: - all_time_steps[ts] = 0 - all_time_steps[ts] += 1 - logging.info('all_time_steps=%s', all_time_steps) - - for v in cluster_variables: - logging.info( - '%s: v["values"](%s)=%s', - v['parameters'], - v['values'].shape, - v['values'], - ) - - all_values = np.array([v['values'] for v in cluster_variables]) - logging.info('all_values(%s)=%s', all_values.shape, all_values) - - var_params = [] - for v in cluster_variables: - var_params.append(', '.join([ - f'{p}={dict(v["parameters"])[p]}' - for p in ordered_varied_parameters - ])) - - cluster_vars.append( - numpy_sight.LabeledNpArray( - all_values, - ['params', 'time_steps'], - [var_params, [str(i) for i in all_time_steps.keys()]], - )) - - yield ('', (task[0], cluster_vars)) + variables = list(task[1]) + logging.info('LogVarSequence task=%s', task) + logging.info('LogVarSequence variables=%s', list(task[1])) + + # Group variables by their cluster + cluster_to_variables = {} + for v in variables: + cluster_id = data_structures.from_ordered_log([v['cluster_id']]) + if cluster_id not in cluster_to_variables: + cluster_to_variables[cluster_id] = [] + cluster_to_variables[cluster_id].append(v) + + logging.info('LogVarSequence cluster_to_variables=%s', cluster_to_variables) + cluster_vars = [] + for cluster_id in sorted(cluster_to_variables): + cluster_variables = cluster_to_variables[cluster_id] + all_parameter_values = {} + for v in cluster_variables: + for p in v['parameters']: + if p[0] not in all_parameter_values: + all_parameter_values[p[0]] = set() + all_parameter_values[p[0]].add(p[1]) + logging.info('all_parameter_values=%s', all_parameter_values) + + varied_parameters = set() + for param_name, values in all_parameter_values.items(): + if len(values) > 1: + varied_parameters.add(param_name) + logging.info('varied_parameters=%s', varied_parameters) + ordered_varied_parameters = sorted(varied_parameters) + + all_time_steps = {} + for v in cluster_variables: + logging.info('v["time_steps"]=%s', v['time_steps']) + for ts in v['time_steps']: + if ts not in all_time_steps: + all_time_steps[ts] = 0 + all_time_steps[ts] += 1 + logging.info('all_time_steps=%s', all_time_steps) + + for v in cluster_variables: + logging.info( + '%s: v["values"](%s)=%s', + v['parameters'], + v['values'].shape, + v['values'], + ) + + all_values = np.array([v['values'] for v in cluster_variables]) + logging.info('all_values(%s)=%s', all_values.shape, all_values) + + var_params = [] + for v in cluster_variables: + var_params.append(', '.join([ + f'{p}={dict(v["parameters"])[p]}' for p in ordered_varied_parameters + ])) + + cluster_vars.append( + numpy_sight.LabeledNpArray( + all_values, + ['params', 'time_steps'], + [var_params, [str(i) for i in all_time_steps.keys()]], + )) + + yield ('', (task[0], cluster_vars)) class AggregateLogs(beam.DoFn): - """Collects the logs for multiple variables into a single log.""" + """Collects the logs for multiple variables into a single log.""" - def process( - self, - task: Tuple[str, Iterable[Tuple[str, - List[numpy_sight.LabeledNpArray]]]], - ) -> None: - """Time-orders the sequence of objects for a given simulation attribute. + def process( + self, + task: Tuple[str, Iterable[Tuple[str, List[numpy_sight.LabeledNpArray]]]], + ) -> None: + """Time-orders the sequence of objects for a given simulation attribute. Args: task: A sequence of objects that describe the state of some simulation attribute over time. """ - with Sight( - sight_pb2.Params( - label='Simulation', - log_owner='bronevet@google.com', - capacitor_output=True, - log_dir_path='/tmp/', - )) as sight: - for var in task[1]: - logging.info('AggregateLogs: var=%s', var) - with Attribute('variable', var[0], sight): - var_clusters = var[1] - for i in range(len(var_clusters)): - with Attribute('cluster', str(i), sight): - data_structures.log_var(var[0], var_clusters[i], - sight) + with Sight( + sight_pb2.Params( + label='Simulation', + log_owner='bronevet@google.com', + capacitor_output=True, + log_dir_path='/tmp/', + )) as sight: + for var in task[1]: + logging.info('AggregateLogs: var=%s', var) + with Attribute('variable', var[0], sight): + var_clusters = var[1] + for i in range(len(var_clusters)): + with Attribute('cluster', str(i), sight): + data_structures.log_var(var[0], var_clusters[i], sight) def read_capacitor_file(filename: str, - fields: Sequence[str] = ('*', ), + fields: Sequence[str] = ('*',), timeout: float = 60.0) -> Iterator[Any]: - """Yields all records from a capacitor file. + """Yields all records from a capacitor file. Args: filename: May be single file, or pattern. fields: Subset of fields to read. Default is to read all fields. timeout: I/O timeout. """ - filenames = gfile.Glob(filename) - if not filenames: - raise ValueError(f'No such file: {filename}') - for filename in filenames: - reader = pywrap_record_reader.RecordReader.CreateFromPath( - filename, fields, timeout) - for r in reader.IterRecords(): - yield r + filenames = gfile.Glob(filename) + if not filenames: + raise ValueError(f'No such file: {filename}') + for filename in filenames: + reader = pywrap_record_reader.RecordReader.CreateFromPath( + filename, fields, timeout) + for r in reader.IterRecords(): + yield r def sight_encode_value(val: int) -> sight_pb2.Object: - """Encodes a value as a Sight object. + """Encodes a value as a Sight object. This is done to ensure type consistency among the many data structures being used to describe simulation behavior. @@ -215,16 +212,15 @@ def sight_encode_value(val: int) -> sight_pb2.Object: Returns: The single Sight log object that encodes val. """ - with Sight(sight_pb2.Params(capacitor_output=True, - in_memory=True)) as sight: - data_structures.log(val, sight) - return sight.get_in_memory_log().obj[0] + with Sight(sight_pb2.Params(capacitor_output=True, in_memory=True)) as sight: + data_structures.log(val, sight) + return sight.get_in_memory_log().obj[0] def load_log_uid_clusters( root: beam.Pipeline, simulation_log_uid: beam.pvalue.PCollection ) -> beam.pvalue.PCollection[Tuple[str, Dict[str, sight_pb2.Object]]]: - """Loads clusters of logs into a PCollection. + """Loads clusters of logs into a PCollection. Args: root: The Beam pipeline. @@ -236,194 +232,175 @@ def load_log_uid_clusters( cluster that log was assigned to. """ - if _IN_CLUSTERS_FILE.value: - cluster_assignment = {} - for clusters_fname in gfile.Glob(_IN_CLUSTERS_FILE.value): - for message in read_capacitor_file( - clusters_fname, - [ - '*', - ], - 60, - ): - cluster_assignment_log = sight_pb2.Log() - cluster_assignment_log.ParseFromString( - message.SerializeToString()) - cluster_assignment = data_structures.from_log( - list(cluster_assignment_log.obj)) - if cluster_assignment['num_clusters'] == _NUM_CLUSTERS.value: - for key, value in data_structures.from_log( - list(cluster_assignment_log.obj)).items(): - cluster_assignment[key] = value - break - if not cluster_assignment: - logging.error('Failed to find a clustering with %d clusters.', - _NUM_CLUSTERS.value) - return - logging.info('cluster_assignment=%s', cluster_assignment) - - log_to_cluster_id = [] - for log_uid, cluster_id in cluster_assignment[ - 'cluster_assignment'].items(): - log_to_cluster_id.append((log_uid, { - 'cluster_id': - sight_encode_value(cluster_id) - })) - return root | beam.Create(log_to_cluster_id) - else: - return simulation_log_uid | beam.Map( - lambda x: (x[0], { - 'cluster_id': sight_encode_value(0) - })) + if _IN_CLUSTERS_FILE.value: + cluster_assignment = {} + for clusters_fname in gfile.Glob(_IN_CLUSTERS_FILE.value): + for message in read_capacitor_file( + clusters_fname, + [ + '*', + ], + 60, + ): + cluster_assignment_log = sight_pb2.Log() + cluster_assignment_log.ParseFromString(message.SerializeToString()) + cluster_assignment = data_structures.from_log( + list(cluster_assignment_log.obj)) + if cluster_assignment['num_clusters'] == _NUM_CLUSTERS.value: + for key, value in data_structures.from_log( + list(cluster_assignment_log.obj)).items(): + cluster_assignment[key] = value + break + if not cluster_assignment: + logging.error('Failed to find a clustering with %d clusters.', + _NUM_CLUSTERS.value) + return + logging.info('cluster_assignment=%s', cluster_assignment) + + log_to_cluster_id = [] + for log_uid, cluster_id in cluster_assignment['cluster_assignment'].items(): + log_to_cluster_id.append((log_uid, { + 'cluster_id': sight_encode_value(cluster_id) + })) + return root | beam.Create(log_to_cluster_id) + else: + return simulation_log_uid | beam.Map(lambda x: (x[0], { + 'cluster_id': sight_encode_value(0) + })) def main(argv: Sequence[str]) -> None: - if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') - - root = beam.Pipeline( - runner=runner.FlumeRunner()) # beam.runners.DirectRunner()) - reads = [] - for file_path in _IN_LOG_FILE.value: - reads.append( - root - | f'Read {file_path}' >> capacitorio.ReadFromCapacitor( - file_path, ['*'], beam.coders.ProtoCoder(sight_pb2.Object))) - - log: beam.pvalue.PCollection[sight_pb2.Object] = reads | beam.Flatten() - - simulation = analysis_utils.objects(log, - sight_pb2.BlockStart.ST_SIMULATION, - 'simulation') - - simulation_log_uid: beam.pvalue.PCollection[analysis_utils.KeyedObjMap] = ( - analysis_utils.create_log_uid_key('simulation_and_cluster', - 'simulation', simulation)) - - clusters_key_log_uid = load_log_uid_clusters(root, simulation_log_uid) - _ = clusters_key_log_uid | 'clusters_key_log_uid' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.clusters_key_log_uid') - - objects_with_ancestors = log | beam.ParDo( - analysis_utils.ExtractAncestorBlockStartLocations()) - - simulation_and_cluster: beam.pvalue.PCollection[analysis_utils.ObjMap] = ( - { - 'simulation': simulation_log_uid, - 'clusters_key_log_uid': clusters_key_log_uid, - } - | 'simulation_and_cluster CoGroupByKey' >> beam.CoGroupByKey() - | beam.ParDo( - analysis_utils.CombineRecords('simulation', - 'clusters_key_log_uid'))) - simulation_and_cluster_sim_loc_uid: beam.pvalue.PCollection[ - analysis_utils.KeyedObjMap] = analysis_utils.create_loc_log_uid_key( - 'simulation_and_cluster', 'simulation', simulation_and_cluster) - - _ = (simulation_and_cluster - | 'write simulation_and_cluster' >> - beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_and_cluster')) - _ = (simulation_and_cluster_sim_loc_uid - | 'write simulation_and_cluster_sim_loc_uid' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_and_cluster_sim_loc_uid')) - - simulation_parameters = analysis_utils.block_start_objects_key_parent( - log, - sight_pb2.BlockStart.ST_SIMULATION_PARAMETERS, - 'simulation_parameters', - ) - _ = (simulation_parameters - | 'write simulation_parameters' >> - beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_parameters')) - simulation_time_step = analysis_utils.block_start_objects_key_parent( - log, sight_pb2.BlockStart.ST_SIMULATION_TIME_STEP, - 'simulation_time_step') - simulation_state = analysis_utils.block_start_objects_key_parent( - log, sight_pb2.BlockStart.ST_SIMULATION_STATE, 'simulation_state') - named_value = analysis_utils.block_start_objects_key_parent( - log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value') - _ = simulation_time_step | 'simulation_time_step' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_time_step') - - simulation_and_parameter_objects = ( - analysis_utils.create_simulation_and_parameter_objects( - log, - objects_with_ancestors, - simulation_and_cluster_sim_loc_uid, - simulation_parameters, - named_value, - str(_OUT_FILE.value), - )) - _ = (simulation_and_parameter_objects - | 'simulation_and_parameter_objects' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_and_parameter_objects')) - - simulation_time_step_state_objects = ( - analysis_utils.create_simulation_time_step_state_objects( - objects_with_ancestors, - simulation_and_cluster_sim_loc_uid, - simulation_time_step, - simulation_state, - named_value, - str(_OUT_FILE.value), - )) - _ = (simulation_time_step_state_objects - | 'simulation_time_step_state_objects' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_time_step_state_objects')) - - simulation_time_step_state_objects_in_time_order = ( - analysis_utils.create_named_value_label_log_uid_key( - 'simulation_time_step_state_objects', - simulation_time_step_state_objects, - ) - | 'simulation_time_step_state_objects GroupByKey' >> beam.GroupByKey() - | beam.ParDo(analysis_utils.NamedObjectsToSequence())) - - _ = (simulation_time_step_state_objects_in_time_order - | 'simulation_time_step_state_objects_in_time_order' >> - beam.io.WriteToText( - str(_OUT_FILE.value) + - '.simulation_time_step_state_objects_in_time_order')) - - simulation_params_and_vars = ( - { - 'simulation_and_parameter_objects': - (analysis_utils.create_loc_log_uid_key( - 'simulation_and_parameter_objects', - 'simulation', - simulation_and_parameter_objects, - )), - 'simulation_time_step_state_objects_in_time_order': - (analysis_utils.create_loc_log_uid_key( - 'simulation_time_step_state_objects_in_time_order', - 'simulation', - simulation_time_step_state_objects_in_time_order, - )), - } - | 'simulation_params_steps_objects_in_time_order CoGroupByKey' >> - beam.CoGroupByKey() - | beam.ParDo( - analysis_utils.CombineParametersAndTimeSeries( - 'simulation_and_parameter_objects', - 'simulation_time_step_state_objects_in_time_order', - ))) - - _ = (simulation_params_and_vars - | 'simulation_params_and_vars' >> beam.io.WriteToText( - str(_OUT_FILE.value) + '.simulation_params_and_vars')) - - _ = ( - analysis_utils.create_var_key('simulation_params_and_vars', - simulation_params_and_vars) - | - 'simulation_params_and_vars GroupByKey - varname' >> beam.GroupByKey() - | beam.ParDo(LogVarSequence(str(_OUT_FILE.value))) - | 'log var sequences gather all' >> beam.GroupByKey() - | beam.ParDo(AggregateLogs())) - - results = root.run() - results.wait_until_finish() + if len(argv) > 1: + raise app.UsageError('Too many command-line arguments.') + + root = beam.Pipeline( + runner=runner.FlumeRunner()) # beam.runners.DirectRunner()) + reads = [] + for file_path in _IN_LOG_FILE.value: + reads.append(root | f'Read {file_path}' >> capacitorio.ReadFromCapacitor( + file_path, ['*'], beam.coders.ProtoCoder(sight_pb2.Object))) + + log: beam.pvalue.PCollection[sight_pb2.Object] = reads | beam.Flatten() + + simulation = analysis_utils.objects(log, sight_pb2.BlockStart.ST_SIMULATION, + 'simulation') + + simulation_log_uid: beam.pvalue.PCollection[analysis_utils.KeyedObjMap] = ( + analysis_utils.create_log_uid_key('simulation_and_cluster', 'simulation', + simulation)) + + clusters_key_log_uid = load_log_uid_clusters(root, simulation_log_uid) + _ = clusters_key_log_uid | 'clusters_key_log_uid' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.clusters_key_log_uid') + + objects_with_ancestors = log | beam.ParDo( + analysis_utils.ExtractAncestorBlockStartLocations()) + + simulation_and_cluster: beam.pvalue.PCollection[analysis_utils.ObjMap] = ({ + 'simulation': simulation_log_uid, + 'clusters_key_log_uid': clusters_key_log_uid, + } | 'simulation_and_cluster CoGroupByKey' >> beam.CoGroupByKey() | beam.ParDo( + analysis_utils.CombineRecords('simulation', 'clusters_key_log_uid'))) + simulation_and_cluster_sim_loc_uid: beam.pvalue.PCollection[ + analysis_utils.KeyedObjMap] = analysis_utils.create_loc_log_uid_key( + 'simulation_and_cluster', 'simulation', simulation_and_cluster) + + _ = (simulation_and_cluster | 'write simulation_and_cluster' >> + beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_and_cluster')) + _ = (simulation_and_cluster_sim_loc_uid | + 'write simulation_and_cluster_sim_loc_uid' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_and_cluster_sim_loc_uid')) + + simulation_parameters = analysis_utils.block_start_objects_key_parent( + log, + sight_pb2.BlockStart.ST_SIMULATION_PARAMETERS, + 'simulation_parameters', + ) + _ = (simulation_parameters | 'write simulation_parameters' >> + beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_parameters')) + simulation_time_step = analysis_utils.block_start_objects_key_parent( + log, sight_pb2.BlockStart.ST_SIMULATION_TIME_STEP, 'simulation_time_step') + simulation_state = analysis_utils.block_start_objects_key_parent( + log, sight_pb2.BlockStart.ST_SIMULATION_STATE, 'simulation_state') + named_value = analysis_utils.block_start_objects_key_parent( + log, sight_pb2.BlockStart.ST_NAMED_VALUE, 'named_value') + _ = simulation_time_step | 'simulation_time_step' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_time_step') + + simulation_and_parameter_objects = ( + analysis_utils.create_simulation_and_parameter_objects( + log, + objects_with_ancestors, + simulation_and_cluster_sim_loc_uid, + simulation_parameters, + named_value, + str(_OUT_FILE.value), + )) + _ = (simulation_and_parameter_objects | + 'simulation_and_parameter_objects' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_and_parameter_objects')) + + simulation_time_step_state_objects = ( + analysis_utils.create_simulation_time_step_state_objects( + objects_with_ancestors, + simulation_and_cluster_sim_loc_uid, + simulation_time_step, + simulation_state, + named_value, + str(_OUT_FILE.value), + )) + _ = (simulation_time_step_state_objects | + 'simulation_time_step_state_objects' >> beam.io.WriteToText( + str(_OUT_FILE.value) + '.simulation_time_step_state_objects')) + + simulation_time_step_state_objects_in_time_order = ( + analysis_utils.create_named_value_label_log_uid_key( + 'simulation_time_step_state_objects', + simulation_time_step_state_objects, + ) | 'simulation_time_step_state_objects GroupByKey' >> beam.GroupByKey() | + beam.ParDo(analysis_utils.NamedObjectsToSequence())) + + _ = ( + simulation_time_step_state_objects_in_time_order | + 'simulation_time_step_state_objects_in_time_order' >> beam.io.WriteToText( + str(_OUT_FILE.value) + + '.simulation_time_step_state_objects_in_time_order')) + + simulation_params_and_vars = ( + { + 'simulation_and_parameter_objects': + (analysis_utils.create_loc_log_uid_key( + 'simulation_and_parameter_objects', + 'simulation', + simulation_and_parameter_objects, + )), + 'simulation_time_step_state_objects_in_time_order': + (analysis_utils.create_loc_log_uid_key( + 'simulation_time_step_state_objects_in_time_order', + 'simulation', + simulation_time_step_state_objects_in_time_order, + )), + } | 'simulation_params_steps_objects_in_time_order CoGroupByKey' >> + beam.CoGroupByKey() | beam.ParDo( + analysis_utils.CombineParametersAndTimeSeries( + 'simulation_and_parameter_objects', + 'simulation_time_step_state_objects_in_time_order', + ))) + + _ = ( + simulation_params_and_vars | 'simulation_params_and_vars' >> + beam.io.WriteToText(str(_OUT_FILE.value) + '.simulation_params_and_vars')) + + _ = (analysis_utils.create_var_key('simulation_params_and_vars', + simulation_params_and_vars) | + 'simulation_params_and_vars GroupByKey - varname' >> beam.GroupByKey() | + beam.ParDo(LogVarSequence(str(_OUT_FILE.value))) | + 'log var sequences gather all' >> beam.GroupByKey() | + beam.ParDo(AggregateLogs())) + + results = root.run() + results.wait_until_finish() if __name__ == '__main__': - app.run(main) + app.run(main) diff --git a/py/sight/widgets/simulation/simulation.py b/py/sight/widgets/simulation/simulation.py index 73ef6de..8df7c4b 100644 --- a/py/sight/widgets/simulation/simulation.py +++ b/py/sight/widgets/simulation/simulation.py @@ -15,16 +15,17 @@ import inspect from typing import Any, Callable, Dict, Optional, Text, Tuple + from helpers.logs.logs_handler import logger as logging -from sight.proto import sight_pb2 from sight.exception import exception +from sight.proto import sight_pb2 from sight.trace import Trace from sight.widgets.decision import decision from sight.widgets.simulation.simulation_parameters import SimulationParameters class Simulation(object): - """Encapsulates start and stop points where a Simulation is active. + """Encapsulates start and stop points where a Simulation is active. Attributes: sight: Reference to the Sight logger via which this simulation is logged. @@ -33,14 +34,14 @@ class Simulation(object): this particular run should be compared. """ - def __init__( - self, - label: str, - sight: Any, - parameters: Optional[Dict[Text, Any]], - reference_trace_file_path: Optional[str] = None, - ): - """Creates and enters a simulation block with a given label and parameters. + def __init__( + self, + label: str, + sight: Any, + parameters: Optional[Dict[Text, Any]], + reference_trace_file_path: Optional[str] = None, + ): + """Creates and enters a simulation block with a given label and parameters. Args: label: The label that identifies this block. @@ -50,85 +51,82 @@ def __init__( reference_trace_file_path: Path of the file that contains the Sight log of a reference simulation run to compare this run to. """ - self.sight = sight - if sight is None: - logging.info('<<>> %s', self.label) - if self.sight is None: - logging.info('>>> %s', self.label) - return - - # Unregister the associated simulation parameters object with Sight. - self.sight.widget_simulation_state.simulation_parameters = None - - # Unregister this simulation object with Sight. - self.sight.widget_simulation_state.simulation = None - self.sight.widget_simulation_state.state = {} - - # pytype: disable=attribute-error - self.sight.exit_block(self.label, sight_pb2.Object(), - inspect.currentframe().f_back) - # pytype: enable=attribute-error - - @classmethod - def run_decision_configuration( - cls, - label: str, - parameters: Optional[Dict[Text, Any]], - driver_fn: Callable[[Any], Any], - state_attrs: Dict[str, Tuple[float, float]], - action_attrs: Dict[str, Tuple[float, float]], - sight: Any, - reference_trace_file_path: Optional[str] = None, - ): - """Runs this simulation, using the Decision API to configure it. + self.sight = sight + if sight is None: + logging.info('<<>> %s', self.label) + if self.sight is None: + logging.info('>>> %s', self.label) + return + + # Unregister the associated simulation parameters object with Sight. + self.sight.widget_simulation_state.simulation_parameters = None + + # Unregister this simulation object with Sight. + self.sight.widget_simulation_state.simulation = None + self.sight.widget_simulation_state.state = {} + + # pytype: disable=attribute-error + self.sight.exit_block(self.label, sight_pb2.Object(), + inspect.currentframe().f_back) + # pytype: enable=attribute-error + + @classmethod + def run_decision_configuration( + cls, + label: str, + parameters: Optional[Dict[Text, Any]], + driver_fn: Callable[[Any], Any], + state_attrs: Dict[str, Tuple[float, float]], + action_attrs: Dict[str, Tuple[float, float]], + sight: Any, + reference_trace_file_path: Optional[str] = None, + ): + """Runs this simulation, using the Decision API to configure it. Args: label: The label that identifies this simulation. @@ -148,14 +146,13 @@ def run_decision_configuration( a reference simulation run to compare this run to. """ - def run(sight): - with Simulation(label, sight, parameters, - reference_trace_file_path): - driver_fn(sight) - - decision.run( - driver_fn=run, - state_attrs=state_attrs.copy(), - action_attrs=action_attrs, - sight=sight, - ) + def run(sight): + with Simulation(label, sight, parameters, reference_trace_file_path): + driver_fn(sight) + + decision.run( + driver_fn=run, + state_attrs=state_attrs.copy(), + action_attrs=action_attrs, + sight=sight, + ) diff --git a/py/sight/widgets/simulation/simulation_parameters.py b/py/sight/widgets/simulation/simulation_parameters.py index a9d43aa..b067fa1 100644 --- a/py/sight/widgets/simulation/simulation_parameters.py +++ b/py/sight/widgets/simulation/simulation_parameters.py @@ -15,18 +15,18 @@ import inspect from typing import Any, Dict, Text -from helpers.logs.logs_handler import logger as logging -from sight.proto import sight_pb2 +from helpers.logs.logs_handler import logger as logging from sight import data_structures from sight.exception import exception +from sight.proto import sight_pb2 class SimulationParameters(object): - """Encapsulates log region that documents a simulation's parameters.""" + """Encapsulates log region that documents a simulation's parameters.""" - def __init__(self, parameters: Dict[Text, Any], sight: Any) -> None: - """Creates and enters a block of a simulation's parameters. + def __init__(self, parameters: Dict[Text, Any], sight: Any) -> None: + """Creates and enters a block of a simulation's parameters. Args: parameters: Key-value pairs that identify this block and all of its @@ -36,71 +36,69 @@ def __init__(self, parameters: Dict[Text, Any], sight: Any) -> None: Returns: The starting location of this simulation parameters block. """ - self.parameters = {} - self.sight = sight - if sight is None: - logging.info('<<>>') - return - - # Unregister this simulation parameters object with Sight. - # self.sight.widget_simulation_state.simulation_parameters = None - - # pytype: disable=attribute-error - self.sight.exit_block( - 'SimulationParameters', - sight_pb2.Object(block_end=sight_pb2.BlockEnd( - sub_type=sight_pb2.BlockEnd.ST_SIMULATION_PARAMETERS)), - inspect.currentframe().f_back, - ) - # pytype: enable=attribute-error + self.parameters = {} + self.sight = sight + if sight is None: + logging.info('<<>>') + return + + # Unregister this simulation parameters object with Sight. + # self.sight.widget_simulation_state.simulation_parameters = None + + # pytype: disable=attribute-error + self.sight.exit_block( + 'SimulationParameters', + sight_pb2.Object(block_end=sight_pb2.BlockEnd( + sub_type=sight_pb2.BlockEnd.ST_SIMULATION_PARAMETERS)), + inspect.currentframe().f_back, + ) + # pytype: enable=attribute-error diff --git a/py/sight/widgets/simulation/simulation_state.py b/py/sight/widgets/simulation/simulation_state.py index 66afbb5..c4af98d 100644 --- a/py/sight/widgets/simulation/simulation_state.py +++ b/py/sight/widgets/simulation/simulation_state.py @@ -16,26 +16,27 @@ from enum import Enum import inspect from typing import Any, Dict, Text + from helpers.logs.logs_handler import logger as logging -from sight.proto import sight_pb2 from sight import data_structures from sight.exception import exception +from sight.proto import sight_pb2 from sight.widgets.decision import decision class SimulationState(object): - """Encapsulates log region that documents a simulation's state.""" + """Encapsulates log region that documents a simulation's state.""" - class Type(Enum): - INITIAL = 1 - BOUNDARY = 2 - DYNAMIC = 3 + class Type(Enum): + INITIAL = 1 + BOUNDARY = 2 + DYNAMIC = 3 - def __init__(self, - state: Dict[Text, Any], - sight: Any, - type: Type = Type.DYNAMIC) -> None: - """Creates and enters a block of a simulation's state. + def __init__(self, + state: Dict[Text, Any], + sight: Any, + type: Type = Type.DYNAMIC) -> None: + """Creates and enters a block of a simulation's state. Args: state: Key-value pairs that identify this block and all of its contained @@ -45,126 +46,120 @@ def __init__(self, Returns: The starting location of this simulation state block. """ - self.sight = sight - if sight is None: - logging.info('<<>>') - return - - self.type = type - proto_type = sight_pb2.BlockEnd.ST_SIMULATION_STATE - if type == self.Type.INITIAL: - proto_type = sight_pb2.BlockEnd.ST_SIMULATION_INITIAL_STATE - elif type == self.Type.BOUNDARY: - proto_type = sight_pb2.BlockEnd.ST_SIMULATION_BOUNDARY_STATE - - # pytype: disable=attribute-error - self.sight.exit_block( - 'SimulationState', - sight_pb2.Object(block_end=sight_pb2.BlockEnd( - sub_type=proto_type)), - inspect.currentframe().f_back, - ) - # pytype: enable=attribute-error - - # If there is a reference trace, report the difference between - # this trace and the reference trace via the Decision API. - reference_trace = self.sight.widget_simulation_state.reference_trace - if reference_trace: - reference_state = {} - while True: - cur_named_var = reference_trace.advance_to_within_block([ - sight_pb2.Object.ST_BLOCK_START, - sight_pb2.BlockStart.ST_NAMED_VALUE, - ]) - if not cur_named_var: - break - name, value = data_structures.from_ordered_log( - reference_trace.collect_current_block()) - reference_state[name] = value - - observed_state_vars = reference_state.keys() - sum_relative_errors = 0 - num_vars = 0 - for name in observed_state_vars: - if (max( - abs(self.sight.widget_simulation_state.state[name]), - abs(reference_state[name]), - ) > 0): - sum_relative_errors += abs( - (self.sight.widget_simulation_state.state[name] - - reference_state[name]) / max( - abs(self.sight.widget_simulation_state.state[name] - ), - abs(reference_state[name]), - )) - num_vars += 1 - - error_relative_to_reference_run = (sum_relative_errors / - num_vars if num_vars > 0 else 0) - decision.decision_outcome('distance', - 0 - error_relative_to_reference_run, - self.sight) - - # Unregister this simulation state object with Sight. - if self.sight.widget_simulation_state.reference_trace: - self.sight.widget_simulation_state.reference_trace.collect_current_block( - ) - self.sight.widget_simulation_state.simulation_state = None + self.sight = sight + if sight is None: + logging.info('<<>>') + return + + self.type = type + proto_type = sight_pb2.BlockEnd.ST_SIMULATION_STATE + if type == self.Type.INITIAL: + proto_type = sight_pb2.BlockEnd.ST_SIMULATION_INITIAL_STATE + elif type == self.Type.BOUNDARY: + proto_type = sight_pb2.BlockEnd.ST_SIMULATION_BOUNDARY_STATE + + # pytype: disable=attribute-error + self.sight.exit_block( + 'SimulationState', + sight_pb2.Object(block_end=sight_pb2.BlockEnd(sub_type=proto_type)), + inspect.currentframe().f_back, + ) + # pytype: enable=attribute-error + + # If there is a reference trace, report the difference between + # this trace and the reference trace via the Decision API. + reference_trace = self.sight.widget_simulation_state.reference_trace + if reference_trace: + reference_state = {} + while True: + cur_named_var = reference_trace.advance_to_within_block([ + sight_pb2.Object.ST_BLOCK_START, + sight_pb2.BlockStart.ST_NAMED_VALUE, + ]) + if not cur_named_var: + break + name, value = data_structures.from_ordered_log( + reference_trace.collect_current_block()) + reference_state[name] = value + + observed_state_vars = reference_state.keys() + sum_relative_errors = 0 + num_vars = 0 + for name in observed_state_vars: + if (max( + abs(self.sight.widget_simulation_state.state[name]), + abs(reference_state[name]), + ) > 0): + sum_relative_errors += abs( + (self.sight.widget_simulation_state.state[name] - + reference_state[name]) / max( + abs(self.sight.widget_simulation_state.state[name]), + abs(reference_state[name]), + )) + num_vars += 1 + + error_relative_to_reference_run = (sum_relative_errors / + num_vars if num_vars > 0 else 0) + decision.decision_outcome('distance', 0 - error_relative_to_reference_run, + self.sight) + + # Unregister this simulation state object with Sight. + if self.sight.widget_simulation_state.reference_trace: + self.sight.widget_simulation_state.reference_trace.collect_current_block() + self.sight.widget_simulation_state.simulation_state = None def state_updated( @@ -172,13 +167,13 @@ def state_updated( obj_to_log: Any, sight: Any, ) -> None: - """Informs the Simulation API that the current state has been updated. + """Informs the Simulation API that the current state has been updated. Args: name: The name of the updated state variable. obj_to_log: The value of the state variable. sight: Instance of a Sight logger. """ - if (sight.widget_simulation_state - and sight.widget_simulation_state.simulation_state): - sight.widget_simulation_state.state[name] = obj_to_log + if (sight.widget_simulation_state and + sight.widget_simulation_state.simulation_state): + sight.widget_simulation_state.state[name] = obj_to_log diff --git a/py/sight/widgets/simulation/simulation_time_step.py b/py/sight/widgets/simulation/simulation_time_step.py index 13ddf8d..abdea7a 100644 --- a/py/sight/widgets/simulation/simulation_time_step.py +++ b/py/sight/widgets/simulation/simulation_time_step.py @@ -15,23 +15,24 @@ import inspect from typing import Any, Sequence + from helpers.logs.logs_handler import logger as logging -from sight.proto import sight_pb2 from sight.exception import exception +from sight.proto import sight_pb2 class SimulationTimeStep(object): - """Encapsulates a single simulation time step un the Sight log.""" - - def __init__( - self, - time_step_index: Sequence[int], - time_step: float, - time_step_size: float, - time_step_units: sight_pb2.SimulationTimeStepStart.TimeStepUnits, - sight: Any, - ): - """Creates and enters a simulation time step block. + """Encapsulates a single simulation time step un the Sight log.""" + + def __init__( + self, + time_step_index: Sequence[int], + time_step: float, + time_step_size: float, + time_step_units: sight_pb2.SimulationTimeStepStart.TimeStepUnits, + sight: Any, + ): + """Creates and enters a simulation time step block. Args: time_step_index: Integral index of the time step within the overall @@ -46,77 +47,75 @@ def __init__( Returns: The starting location of this time step block. """ - self.sight = sight - if sight is None: - logging.info( - '<<>>') - return - - # Unregister this simulation time step object with Sight. - if self.sight.widget_simulation_state.reference_trace: - self.sight.widget_simulation_state.reference_trace.collect_current_block( - ) - self.sight.widget_simulation_state.simulation_time_step = None - - # pytype: disable=attribute-error - self.sight.exit_block( - 'SimulationTimeStep', - sight_pb2.Object(block_end=sight_pb2.BlockEnd( - sub_type=sight_pb2.BlockEnd.ST_SIMULATION_TIME_STEP)), - inspect.currentframe().f_back, - ) - # pytype: enable=attribute-error - self.sight.unset_attribute('SimulationTimeStep') + self.sight = sight + if sight is None: + logging.info( + '<<>>') + return + + # Unregister this simulation time step object with Sight. + if self.sight.widget_simulation_state.reference_trace: + self.sight.widget_simulation_state.reference_trace.collect_current_block() + self.sight.widget_simulation_state.simulation_time_step = None + + # pytype: disable=attribute-error + self.sight.exit_block( + 'SimulationTimeStep', + sight_pb2.Object(block_end=sight_pb2.BlockEnd( + sub_type=sight_pb2.BlockEnd.ST_SIMULATION_TIME_STEP)), + inspect.currentframe().f_back, + ) + # pytype: enable=attribute-error + self.sight.unset_attribute('SimulationTimeStep') diff --git a/py/sight/widgets/simulation/simulation_widget_state.py b/py/sight/widgets/simulation/simulation_widget_state.py index b599a9f..dd964cc 100644 --- a/py/sight/widgets/simulation/simulation_widget_state.py +++ b/py/sight/widgets/simulation/simulation_widget_state.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Encapsulates the state of the Simulation Sight widget.""" import dataclasses diff --git a/py/sight/widgets/simulation/split_log_time_series.py b/py/sight/widgets/simulation/split_log_time_series.py index 630d072..6d419f1 100644 --- a/py/sight/widgets/simulation/split_log_time_series.py +++ b/py/sight/widgets/simulation/split_log_time_series.py @@ -4,16 +4,16 @@ train_darts_surrogate. """ +import glob import os -from typing import Optional, Dict, Sequence, Tuple +import os.path +import subprocess +from typing import Dict, Optional, Sequence, Tuple from absl import app from absl import flags -import glob from google.cloud import bigquery import pandas as pd -import os.path -import subprocess _LOG_ID = flags.DEFINE_string( 'log_id', @@ -47,61 +47,59 @@ 'The value for the split column that will be assigned to the validation dataset.', ) -_PROJECT_ID = flags.DEFINE_string( - 'project_id', os.environ['PROJECT_ID'], 'ID of the current GCP project.' -) +_PROJECT_ID = flags.DEFINE_string('project_id', os.environ['PROJECT_ID'], + 'ID of the current GCP project.') - def main(argv: Sequence[str]) -> None: if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') - + data_df = pd.read_csv(_INFILE.value) - train_df = data_df[data_df[_SPLIT_COL.value]!=_SPLIT_VALIDATE_VAL.value] + train_df = data_df[data_df[_SPLIT_COL.value] != _SPLIT_VALIDATE_VAL.value] train_fname = f'{_LOG_ID.value}.train.${_SPLIT_COL.value}.${_SPLIT_VALIDATE_VAL.value}.csv' train_df.to_csv(train_fname, index=False) out = subprocess.run( - ['fileutil', 'cp', - f'/tmp/{train_fname', - # f'gs://{_PROJECT_ID.value}-sight/sight-logs/*{_LOG_ID.value}/{train_fname}' - f'/cns/oj-d/home/{os.environ["USER"]}/{_LOG_ID.value}/{train_fname}' + [ + 'fileutil', + 'cp', + f'/tmp/{train_fname}', + # f'gs://{_PROJECT_ID.value}-sight/sight-logs/*{_LOG_ID.value}/{train_fname}' + f'/cns/oj-d/home/{os.environ["USER"]}/{_LOG_ID.value}/{train_fname}' ], capture_output=True, check=True, ) - validate_df = data_df[data_df[_SPLIT_COL.value]!=_SPLIT_VALIDATE_VAL.value] + validate_df = data_df[data_df[_SPLIT_COL.value] != _SPLIT_VALIDATE_VAL.value] validate_fname = f'{_LOG_ID.value}.validate.${_SPLIT_COL.value}.${_SPLIT_VALIDATE_VAL.value}.csv' validate_df.to_csv(validate_fname, index=False) out = subprocess.run( - ['gsutil', 'cp', - f'/tmp/{validate_fname', - # f'gs://{_PROJECT_ID.value}-sight/sight-logs/*{_LOG_ID.value}/{validate_fname}' - f'/cns/oj-d/home/{os.environ["USER"]}/{_LOG_ID.value}/{validate_fname}' + [ + 'gsutil', + 'cp', + f'/tmp/{validate_fname}', + # f'gs://{_PROJECT_ID.value}-sight/sight-logs/*{_LOG_ID.value}/{validate_fname}' + f'/cns/oj-d/home/{os.environ["USER"]}/{_LOG_ID.value}/{validate_fname}' ], capture_output=True, check=True, ) - out = subprocess.run( - ['/google/bin/releases/tunelab/public/ingest_csv', - ---train_csv_file="${FLAGS_log_prediction_train_path}" \ ---validation_csv_file="${FLAGS_log_prediction_val_path}" \ ---col_names="input,target" \ ---dataset_name="predictsubsequent" \ ---output_dir="${FLAGS_basepath}" - - - 'gsutil', 'cp', - f'/tmp/{validate_fname', - f'gs://{_PROJECT_ID.value}-sight/sight-logs/*{_LOG_ID.value}/{validate_fname}' + [ + '/google/bin/releases/tunelab/public/ingest_csv', + '--train_csv_file="${FLAGS_log_prediction_train_path}"', + '--validation_csv_file="${FLAGS_log_prediction_val_path}"', + '--col_names="input,target"', '--dataset_name="predictsubsequent"', + '--output_dir="${FLAGS_basepath}"', 'gsutil', 'cp', + f'/tmp/{validate_fname}', + f'gs://{_PROJECT_ID.value}-sight/sight-logs/*{_LOG_ID.value}/{validate_fname}' ], capture_output=True, check=True, ) + if __name__ == '__main__': app.run(main) diff --git a/py/sight/widgets/simulation/train_darts_surrogate.py b/py/sight/widgets/simulation/train_darts_surrogate.py index 3c54497..36ce148 100644 --- a/py/sight/widgets/simulation/train_darts_surrogate.py +++ b/py/sight/widgets/simulation/train_darts_surrogate.py @@ -10,7 +10,11 @@ from absl import flags from darts import TimeSeries from darts.metrics.metrics import mae -from darts.models import LinearRegressionModel, RandomForest, RNNModel, NBEATSModel, LightGBMModel +from darts.models import LightGBMModel +from darts.models import LinearRegressionModel +from darts.models import NBEATSModel +from darts.models import RandomForest +from darts.models import RNNModel from darts.models.forecasting.block_rnn_model import BlockRNNModel import numpy as np import pandas as pd @@ -34,6 +38,7 @@ ) #'GF': '1601933670369823365' + def create_list_of_timeseries(df: pd.DataFrame) -> List[pd.DataFrame]: series = [] for idx, sim_data in df.groupby('sim_location'): @@ -69,16 +74,13 @@ def create_darts_time_series( row.append(s_row[v]) s_data.append(row) - lagged_s = pd.DataFrame( - s_data, columns=boundary_cond_vars + state_vars - ).reset_index() + lagged_s = pd.DataFrame(s_data, columns=boundary_cond_vars + + state_vars).reset_index() # print('lagged_s', lagged_s) time_series.append( - TimeSeries.from_dataframe( - lagged_s, 'index', boundary_cond_vars + state_vars - ) - ) - + TimeSeries.from_dataframe(lagged_s, 'index', + boundary_cond_vars + state_vars)) + return time_series @@ -91,7 +93,7 @@ def train_model(time_series: List[TimeSeries]): output_chunk_length=10, multi_models=True, ) - + # model = LightGBMModel(lags=[-10, -9, -8, -7, -6, -5, -4, -3, -2, -1], # output_chunk_length=10, # multi_models=True) @@ -119,9 +121,9 @@ def eval_model(model, time_series, split_point) -> float: total_steps = len(time_series[pred_idx]) # print(time_series[pred_idx]) train, val = time_series[pred_idx].split_before(split_point) - prediction = model.predict( - total_steps - split_point, series=train, num_samples=1 - ) + prediction = model.predict(total_steps - split_point, + series=train, + num_samples=1) # print (prediction) avg_err += mae(prediction, val) / len(time_series) @@ -144,24 +146,23 @@ def main(argv: Sequence[str]) -> None: raise app.UsageError('Too many command-line arguments.') dataset_df = pd.read_csv(_INFILE.value) - + control_vars = ['sim_location', 'time_step_index', 'next_time_step_index'] state_vars = [ - v - for v in dataset_df.columns + v for v in dataset_df.columns if v not in _BOUNDARY_COND_VARS.value and v not in control_vars ] # pd.set_option('display.max_columns', None) # print(dataset_df) # return - - time_series = create_darts_time_series( - create_list_of_timeseries(dataset_df), state_vars, _BOUNDARY_COND_VARS.value) + time_series = create_darts_time_series(create_list_of_timeseries(dataset_df), + state_vars, _BOUNDARY_COND_VARS.value) model = train_model(time_series) error = eval_model(model, time_series, 10) - + + if __name__ == '__main__': app.run(main) diff --git a/py/sight/widgets/simulation/train_lstm_surrogate.py b/py/sight/widgets/simulation/train_lstm_surrogate.py index 96028e9..106caba 100644 --- a/py/sight/widgets/simulation/train_lstm_surrogate.py +++ b/py/sight/widgets/simulation/train_lstm_surrogate.py @@ -3,40 +3,32 @@ TODO(bronevet): DO NOT SUBMIT without a detailed description of train_lstm_surrogate. """ +import os from typing import Sequence from absl import app from absl import flags -import pandas as pd +from google.cloud import bigquery import keras -from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM -import tensorflow as tf +from keras.models import Sequential from keras.preprocessing.sequence import TimeseriesGenerator -import os - -from google.cloud import bigquery +import pandas as pd +import tensorflow as tf -_PROJECT_ID = flags.DEFINE_string( - 'project_id', os.environ['PROJECT_ID'], "ID of the current GCP project." -) -_LOG_ID = flags.DEFINE_string( - 'log_id', '', "ID of the log being analyzed." -) +_PROJECT_ID = flags.DEFINE_string('project_id', os.environ['PROJECT_ID'], + "ID of the current GCP project.") +_LOG_ID = flags.DEFINE_string('log_id', '', "ID of the log being analyzed.") _MODEL_OUT_PATH = flags.DEFINE_string( - 'model_out_path', '', 'Path where the trained model should be stored.' -) + 'model_out_path', '', 'Path where the trained model should be stored.') _NUM_STEPS = flags.DEFINE_integer( - 'num_steps', 50, 'Number of steps of history in the prediction.' -) -_BATCH_SIZE = flags.DEFINE_integer( - 'batch_size', 128, 'Batch size.' -) + 'num_steps', 50, 'Number of steps of history in the prediction.') +_BATCH_SIZE = flags.DEFINE_integer('batch_size', 128, 'Batch size.') def build_query(raw_query, params: dict = None): - """Format query using given parameters. + """Format query using given parameters. If no parameters are provided the query is returned as is. @@ -47,26 +39,29 @@ def build_query(raw_query, params: dict = None): Returns: query with parameters inserted """ - query = raw_query - if params is not None: - query = query.format(**params) - return query + query = raw_query + if params is not None: + query = query.format(**params) + return query + def main(argv: Sequence[str]) -> None: if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") current_script_directory = os.path.dirname(os.path.abspath(__file__)) - _SCHEMA_FILE_PATH = os.path.join(current_script_directory, 'simulation_time_series.sql') + _SCHEMA_FILE_PATH = os.path.join(current_script_directory, + 'simulation_time_series.sql') - with open(f'x-sight/py/sight/widgets/simulation/simulation_time_series.sql') as file: + with open(f'x-sight/py/sight/widgets/simulation/simulation_time_series.sql' + ) as file: query_template = file.read() - + query = build_query(query_template, {'log_id': _LOG_ID.value}) print('query=%s' % query) bq_client = bigquery.Client(project=_PROJECT_ID.value) df = bq_client.query(query).to_dataframe() - + sim_dataset = None simulations = df.groupby(by=['sim_location']) for sim_location, sim_data in simulations: @@ -74,21 +69,21 @@ def main(argv: Sequence[str]) -> None: df = pd.DataFrame(sim_data['values'].to_list()) input_data = df[:-_NUM_STEPS.value] targets = df[_NUM_STEPS.value:] - + cur_dataset = keras.utils.timeseries_dataset_from_array( - input_data, - targets, - sequence_length=_NUM_STEPS.value, + input_data, + targets, + sequence_length=_NUM_STEPS.value, batch_size=_BATCH_SIZE.value) - + if sim_dataset is None: sim_dataset = cur_dataset else: sim_dataset = sim_dataset.concatenate(cur_dataset) - + # sim_dataset = tf.concat(sim_datasets, axis=0) print('sim_dataset=%s' % sim_dataset) - # return + # return # data = pd.DataFrame(df['values'].to_list()) # l = data.values.tolist() # for i in range(16): @@ -99,13 +94,13 @@ def main(argv: Sequence[str]) -> None: # model.add(LSTM(100, activation='relu', input_shape=(None, 100))) model.add(Dense(20)) model.compile(optimizer='adam', loss='mse') - + h = model.fit(sim_dataset, steps_per_epoch=1, epochs=600, verbose=0) print('hist=%s' % h.history) - + # prediction = model.predict(dataset) # print('prediction=%s' % prediction) - + model.save(_MODEL_OUT_PATH.value) diff --git a/py/sight/widgets/simulation/train_surrogate.py b/py/sight/widgets/simulation/train_surrogate.py index 5693a26..60e9a2b 100644 --- a/py/sight/widgets/simulation/train_surrogate.py +++ b/py/sight/widgets/simulation/train_surrogate.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Trains a surrogate model to capture the observed dynamics of simulations.""" import math @@ -20,25 +19,23 @@ from absl import app from absl import flags -from helpers.logs.logs_handler import logger as loggingimport apache_beam as beam -import numpy as np -from sklearn import metrics -from sklearn.ensemble import GradientBoostingRegressor - +import apache_beam as beam from google3.pipeline.flume.py import runner from google3.pipeline.flume.py.io import capacitorio from google3.pyglib import gfile from google3.pyglib.contrib.gpathlib import gpath_flag +from helpers.logs.logs_handler import logger as logging +import numpy as np from sight.proto import example_pb2 from sight.proto import sight_pb2 +from sklearn import metrics +from sklearn.ensemble import GradientBoostingRegressor _IN_LOG_FILE = flags.DEFINE_list( 'in_log_file', None, - ( - 'Input file(s) that contain the Sight log that documents the simulation' - ' run.' - ), + ('Input file(s) that contain the Sight log that documents the simulation' + ' run.'), required=True, ) @@ -75,11 +72,8 @@ def process( if task.tensor_flow_example.input_example: for feat_name in task.tensor_flow_example.input_example.features.feature: # if feat_name in {'tai', 'fiald', 'dcph'}: - input_row.append( - task.tensor_flow_example.input_example.features.feature[ - feat_name - ].float_list.value[0] - ) + input_row.append(task.tensor_flow_example.input_example.features. + feature[feat_name].float_list.value[0]) # Emit a single output for each output feature. if task.tensor_flow_example.output_example: @@ -88,9 +82,8 @@ def process( feat_key, ( input_row, - task.tensor_flow_example.output_example.features.feature[ - feat_key - ].float_list.value[0], + task.tensor_flow_example.output_example.features. + feature[feat_key].float_list.value[0], ), ) @@ -118,10 +111,10 @@ def process( logging.info( '%s: mae=%s, rmse=%s', task[0], - metrics.mean_absolute_error(output_array, predicted_array) - / np.mean(output_array), - math.sqrt(metrics.mean_squared_error(output_array, predicted_array)) - / np.mean(output_array), + metrics.mean_absolute_error(output_array, predicted_array) / + np.mean(output_array), + math.sqrt(metrics.mean_squared_error(output_array, predicted_array)) / + np.mean(output_array), ) @@ -130,8 +123,7 @@ def main(argv): raise app.UsageError('Too many command-line arguments.') root = beam.Pipeline( - runner=runner.FlumeRunner() - ) # beam.runners.DirectRunner()) + runner=runner.FlumeRunner()) # beam.runners.DirectRunner()) reads = [] for file_path in _IN_LOG_FILE.value: @@ -139,30 +131,18 @@ def main(argv): with gfile.GFile(file_path, 'r') as inputs_f: for cur_file_path in inputs_f: logging.info('cur_file_path=%s', cur_file_path) - reads.append( - root - | f'Read {cur_file_path}' - >> capacitorio.ReadFromCapacitor( - cur_file_path, ['*'], beam.coders.ProtoCoder(sight_pb2.Object) - ) - ) + reads.append(root | + f'Read {cur_file_path}' >> capacitorio.ReadFromCapacitor( + cur_file_path, ['*'], + beam.coders.ProtoCoder(sight_pb2.Object))) else: logging.info('file_path=%s', file_path) - reads.append( - root - | f'Read {file_path}' - >> capacitorio.ReadFromCapacitor( - file_path, ['*'], beam.coders.ProtoCoder(sight_pb2.Object) - ) - ) + reads.append(root | f'Read {file_path}' >> capacitorio.ReadFromCapacitor( + file_path, ['*'], beam.coders.ProtoCoder(sight_pb2.Object))) log = reads | beam.Flatten() - _ = ( - log - | beam.ParDo(BigExamplesToSingleOutputRows()) - | beam.GroupByKey() - | beam.ParDo(TrainModel()) - ) + _ = (log | beam.ParDo(BigExamplesToSingleOutputRows()) | beam.GroupByKey() | + beam.ParDo(TrainModel())) results = root.run() results.wait_until_finish() diff --git a/py/sight/widgets/tensorflow_sight/demo.py b/py/sight/widgets/tensorflow_sight/demo.py index 0a722e2..4244ded 100644 --- a/py/sight/widgets/tensorflow_sight/demo.py +++ b/py/sight/widgets/tensorflow_sight/demo.py @@ -11,17 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Demo for the tensorflow bindings to the Sight logging library.""" from absl import app from absl import flags import numpy as np -import tensorflow as tf - from proto import sight_pb2 from py.sight import Sight from py.widgets.tensorflow_sight import tensorflow_sight +import tensorflow as tf FLAGS = flags.FLAGS @@ -42,9 +40,10 @@ def main(argv): with Sight(params) as sight: with tensorflow_sight.TfModelTraining(label="Model Training", sight=sight): for epoch in range(0, 3): - with tensorflow_sight.TfModelTrainingEpoch( - label="Model Epoch", epoch_num=epoch, batch_size=10, sight=sight - ): + with tensorflow_sight.TfModelTrainingEpoch(label="Model Epoch", + epoch_num=epoch, + batch_size=10, + sight=sight): sight.text("hello") with tensorflow_sight.TfModelApplication("Model Application", sight): a = np.array( @@ -55,9 +54,8 @@ def main(argv): dtype=np.float32, ) for i in range(0, 5): - tensorflow_sight.log( - "tensor %d" % i, tf.convert_to_tensor(a), sight - ) + tensorflow_sight.log("tensor %d" % i, tf.convert_to_tensor(a), + sight) a = a * 2 diff --git a/py/sight/widgets/tensorflow_sight/tensorflow_sight.py b/py/sight/widgets/tensorflow_sight/tensorflow_sight.py index 8470ab3..04d5153 100644 --- a/py/sight/widgets/tensorflow_sight/tensorflow_sight.py +++ b/py/sight/widgets/tensorflow_sight/tensorflow_sight.py @@ -11,18 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Documentation of numpy events and data in the Sight log.""" import inspect from typing import Any, Optional -from helpers.logs.logs_handler import logger as loggingimport tensorflow as tf - +from helpers.logs.logs_handler import logger as logging from proto import sight_pb2 from py.exception import exception from py.location import Location from py.widgets.numpy_sight import numpy_sight +import tensorflow as tf class TfModelApplication(object): @@ -40,11 +39,8 @@ def __init__(self, label: str, sight: Any): # pytype: disable=attribute-error self.sight.enter_block( self.label, - sight_pb2.Object( - block_start=sight_pb2.BlockStart( - sub_type=sight_pb2.BlockStart.ST_TENSORFLOW_MODEL_APPLICATION - ) - ), + sight_pb2.Object(block_start=sight_pb2.BlockStart( + sub_type=sight_pb2.BlockStart.ST_TENSORFLOW_MODEL_APPLICATION)), inspect.currentframe().f_back, ) # pytype: enable=attribute-error @@ -58,9 +54,8 @@ def __exit__(self, exc_type: Any, value: Any, traceback: Any): if exc_type is not None: # pytype: disable=attribute-error - exception( - exc_type, value, traceback, self.sight, inspect.currentframe().f_back - ) + exception(exc_type, value, traceback, self.sight, + inspect.currentframe().f_back) # pytype: enable=attribute-error if self.sight is None: @@ -70,11 +65,8 @@ def __exit__(self, exc_type: Any, value: Any, traceback: Any): # pytype: disable=attribute-error self.sight.exit_block( self.label, - sight_pb2.Object( - block_end=sight_pb2.BlockEnd( - sub_type=sight_pb2.BlockEnd.ST_TENSORFLOW_MODEL_APPLICATION - ) - ), + sight_pb2.Object(block_end=sight_pb2.BlockEnd( + sub_type=sight_pb2.BlockEnd.ST_TENSORFLOW_MODEL_APPLICATION)), inspect.currentframe().f_back, ) # pytype: enable=attribute-error @@ -95,11 +87,8 @@ def __init__(self, label: str, sight: Any): # pytype: disable=attribute-error self.sight.enter_block( self.label, - sight_pb2.Object( - block_start=sight_pb2.BlockStart( - sub_type=sight_pb2.BlockStart.ST_TENSORFLOW_MODEL_TRAINING - ) - ), + sight_pb2.Object(block_start=sight_pb2.BlockStart( + sub_type=sight_pb2.BlockStart.ST_TENSORFLOW_MODEL_TRAINING)), inspect.currentframe().f_back, ) @@ -114,9 +103,8 @@ def __exit__(self, exc_type: Any, value: Any, traceback: Any): if exc_type is not None: # pytype: disable=attribute-error - exception( - exc_type, value, traceback, self.sight, inspect.currentframe().f_back - ) + exception(exc_type, value, traceback, self.sight, + inspect.currentframe().f_back) # pytype: enable=attribute-error if self.sight is None: @@ -126,11 +114,8 @@ def __exit__(self, exc_type: Any, value: Any, traceback: Any): # pytype: disable=attribute-error self.sight.exit_block( self.label, - sight_pb2.Object( - block_end=sight_pb2.BlockEnd( - sub_type=sight_pb2.BlockEnd.ST_TENSORFLOW_MODEL_TRAINING - ) - ), + sight_pb2.Object(block_end=sight_pb2.BlockEnd( + sub_type=sight_pb2.BlockEnd.ST_TENSORFLOW_MODEL_TRAINING)), inspect.currentframe().f_back, ) # pytype: enable=attribute-error @@ -152,14 +137,12 @@ def __init__(self, label: str, epoch_num: int, batch_size: int, sight: Any): # pytype: disable=attribute-error self.sight.enter_block( self.label, - sight_pb2.Object( - block_start=sight_pb2.BlockStart( - sub_type=sight_pb2.BlockStart.ST_TENSORFLOW_MODEL_TRAINING_EPOCH, - tensor_flow_model_training_epoch=sight_pb2.TensorFlowModelTrainingEpochStart( - epoch_num=epoch_num, batch_size=batch_size - ), - ) - ), + sight_pb2.Object(block_start=sight_pb2.BlockStart( + sub_type=sight_pb2.BlockStart.ST_TENSORFLOW_MODEL_TRAINING_EPOCH, + tensor_flow_model_training_epoch=sight_pb2. + TensorFlowModelTrainingEpochStart(epoch_num=epoch_num, + batch_size=batch_size), + )), inspect.currentframe().f_back, ) # pytype: enable=attribute-error @@ -177,9 +160,8 @@ def __exit__(self, exc_type: Any, value: Any, traceback: Any): if exc_type is not None: # pytype: disable=attribute-error - exception( - exc_type, value, traceback, self.sight, inspect.currentframe().f_back - ) + exception(exc_type, value, traceback, self.sight, + inspect.currentframe().f_back) # pytype: enable=attribute-error if self.sight is None: @@ -190,19 +172,17 @@ def __exit__(self, exc_type: Any, value: Any, traceback: Any): # pytype: disable=attribute-error self.sight.exit_block( self.label, - sight_pb2.Object( - block_end=sight_pb2.BlockEnd( - sub_type=sight_pb2.BlockEnd.ST_TENSORFLOW_MODEL_TRAINING_EPOCH - ) - ), + sight_pb2.Object(block_end=sight_pb2.BlockEnd( + sub_type=sight_pb2.BlockEnd.ST_TENSORFLOW_MODEL_TRAINING_EPOCH)), inspect.currentframe().f_back, ) # pytype: enable=attribute-error -def log( - label: str, tensor: tf.Tensor, sight: Any, frame: Optional[Any] = None -) -> Optional[Location]: +def log(label: str, + tensor: tf.Tensor, + sight: Any, + frame: Optional[Any] = None) -> Optional[Location]: """Documents a TensorFlow tensor in the Sight log if Sight is being used. Args: diff --git a/py/sight/widgets/tensorflow_sight/tensorflow_sight_test.py b/py/sight/widgets/tensorflow_sight/tensorflow_sight_test.py index 64b5d0f..15bc478 100644 --- a/py/sight/widgets/tensorflow_sight/tensorflow_sight_test.py +++ b/py/sight/widgets/tensorflow_sight/tensorflow_sight_test.py @@ -11,21 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Tests for tensorflow_sight.""" import inspect from typing import Any, Optional, Sequence +from absl.testing import absltest import numpy as np -import tensorflow as tf - # from google3.analysis.dremel.core.capacitor.public.python import pywrap_record_reader from proto import sight_pb2 from py.sight import Sight from py.widgets.tensorflow_sight import tensorflow_sight +import tensorflow as tf from tensorflow.python.util.protobuf import compare -from absl.testing import absltest def _read_text_file(file_path: str) -> str: @@ -39,8 +37,7 @@ def _read_text_file(file_path: str) -> str: def _read_capacitor_file(file_path: str) -> Optional[Any]: protos = [] record_reader = pywrap_record_reader.RecordReader.CreateFromPath( - file_path, ['*'], 60.0 - ) + file_path, ['*'], 60.0) protos.extend(record_reader.IterRecords()) return sorted(protos, key=lambda x: x.index) @@ -49,20 +46,17 @@ def _create_attributes(sight: Sight) -> Sequence[sight_pb2.Attribute]: attribute = [] if hasattr(sight, 'change_list_number'): attribute.append( - sight_pb2.Attribute( - key='change_list_number', value=str(sight.change_list_number) - ) - ) + sight_pb2.Attribute(key='change_list_number', + value=str(sight.change_list_number))) if hasattr(sight, 'citc_snapshot'): attribute.append( - sight_pb2.Attribute(key='citc_snapshot', value=str(sight.citc_snapshot)) - ) + sight_pb2.Attribute(key='citc_snapshot', + value=str(sight.citc_snapshot))) return attribute -def _create_attributes_text( - base_attributes: Sequence[sight_pb2.Attribute], sight: Sight -) -> str: +def _create_attributes_text(base_attributes: Sequence[sight_pb2.Attribute], + sight: Sight) -> str: attribute = [] if hasattr(sight, 'change_list_number'): attribute.append(f'change_list_number={sight.change_list_number}') @@ -94,8 +88,7 @@ def testLogFloatArrayToText(self): tensorflow_sight.log( 'tensor', tf.convert_to_tensor( - np.array([[1, 2.2, 3.333], [4.1, 5, 6.2]], dtype=np.float32) - ), + np.array([[1, 2.2, 3.333], [4.1, 5, 6.2]], dtype=np.float32)), sight, ) @@ -104,9 +97,8 @@ def testLogFloatArrayToText(self): expected_log = """Model Application<<<%s Model Application>>>%s """ % (block_attrs, block_attrs) - actual_log = _read_text_file( - params.log_dir_path + '/testLogFloatArrayToText.txt' - ) + actual_log = _read_text_file(params.log_dir_path + + '/testLogFloatArrayToText.txt') self.assertEqual( expected_log, actual_log, @@ -130,8 +122,7 @@ def testLogFloatArrayToCapacitorFile(self): tensorflow_sight.log( 'tensor', tf.convert_to_tensor( - np.array([[1, 2.5, 3], [4, 5.5, 6]], dtype=np.float32) - ), + np.array([[1, 2.5, 3], [4, 5.5, 6]], dtype=np.float32)), sight, ) @@ -165,8 +156,7 @@ def testLogFloatArrayToCapacitorFile(self): label='tensor', shape=[2, 3], double_values=sight_pb2.Tensor.DoubleValues( - value=[1, 2.5, 3, 4, 5.5, 6] - ), + value=[1, 2.5, 3, 4, 5.5, 6]), ), ), sight_pb2.Object( @@ -189,8 +179,7 @@ def testLogFloatArrayToCapacitorFile(self): ] actual_log = _read_capacitor_file( - params.log_dir_path + '/testLogFloatArrayToCapacitorFile.capacitor' - ) + params.log_dir_path + '/testLogFloatArrayToCapacitorFile.capacitor') self.assertEqual(len(expected_log), len(actual_log)) for i in range(0, len(expected_log)): @@ -199,8 +188,8 @@ def testLogFloatArrayToCapacitorFile(self): expected_log[i], actual_log[i], 'Target code and generated logs are different. Expected' - ' log[%d]:\n%s\nActual log[%d]:\n%s\n' - % (i, expected_log[i], i, actual_log[i]), + ' log[%d]:\n%s\nActual log[%d]:\n%s\n' % + (i, expected_log[i], i, actual_log[i]), ignored_fields=['line'], ) @@ -220,8 +209,7 @@ def testLogIntArrayToCapacitorFile(self): tensorflow_sight.log( 'tensor', tf.convert_to_tensor( - np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64) - ), + np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)), sight, ) @@ -255,8 +243,7 @@ def testLogIntArrayToCapacitorFile(self): label='tensor', shape=[2, 3], int64_values=sight_pb2.Tensor.Int64Values( - value=[1, 2, 3, 4, 5, 6] - ), + value=[1, 2, 3, 4, 5, 6]), ), ), sight_pb2.Object( @@ -279,8 +266,7 @@ def testLogIntArrayToCapacitorFile(self): ] actual_log = _read_capacitor_file( - params.log_dir_path + '/testLogIntArrayToCapacitorFile.capacitor' - ) + params.log_dir_path + '/testLogIntArrayToCapacitorFile.capacitor') self.assertEqual(len(expected_log), len(actual_log)) for i in range(0, len(expected_log)): @@ -289,8 +275,8 @@ def testLogIntArrayToCapacitorFile(self): expected_log[i], actual_log[i], 'Target code and generated logs are different. Expected' - ' log[%d]:\n%s\nActual log[%d]:\n%s\n' - % (i, expected_log[i], i, actual_log[i]), + ' log[%d]:\n%s\nActual log[%d]:\n%s\n' % + (i, expected_log[i], i, actual_log[i]), ignored_fields=['line'], ) diff --git a/py/tests/colorful_tests.py b/py/tests/colorful_tests.py index 6061bba..4870e47 100644 --- a/py/tests/colorful_tests.py +++ b/py/tests/colorful_tests.py @@ -10,19 +10,18 @@ class ColorfulTestResult(unittest.TextTestResult): - def addSuccess(self, test): - super().addSuccess(test) - self.stream.write('\n' + Fore.GREEN + 'PASS' + Style.RESET_ALL + '\n') + def addSuccess(self, test): + super().addSuccess(test) + self.stream.write('\n' + Fore.GREEN + 'PASS' + Style.RESET_ALL + '\n') - def addFailure(self, test, err): - super().addFailure(test, err) - self.stream.write('\n' + Fore.RED + 'FAIL' + Style.RESET_ALL + '\n') + def addFailure(self, test, err): + super().addFailure(test, err) + self.stream.write('\n' + Fore.RED + 'FAIL' + Style.RESET_ALL + '\n') - def addError(self, test, err): - super().addError(test, err) - self.stream.write('\n' + Fore.YELLOW + 'ERROR' + Style.RESET_ALL + - '\n') + def addError(self, test, err): + super().addError(test, err) + self.stream.write('\n' + Fore.YELLOW + 'ERROR' + Style.RESET_ALL + '\n') class ColorfulTestRunner(unittest.TextTestRunner): - resultclass = ColorfulTestResult + resultclass = ColorfulTestResult diff --git a/sight_service/acme_optimizer.py b/sight_service/acme_optimizer.py index 9813c25..b6e6b53 100644 --- a/sight_service/acme_optimizer.py +++ b/sight_service/acme_optimizer.py @@ -14,25 +14,25 @@ """Acme reinforcement learning for driving Sight applications.""" import concurrent.futures -from helpers.logs.logs_handler import logger as logging -import time import json import pickle +import time + from acme import specs from acme.adders import reverb as adders_reverb from acme.jax import utils from acme.jax.experiments import config import dm_env +from helpers.logs.logs_handler import logger as logging import jax import numpy as np from overrides import overrides from readerwriterlock import rwlock import reverb from sight.proto import sight_pb2 -from sight_service.proto import service_pb2 +from sight_service.build_d4pg_learner import build_d4pg_config # from service import server_utils from sight_service.build_dqn_learner import build_dqn_config -from sight_service.build_d4pg_learner import build_d4pg_config # from sight_service.build_impala_learner import build_impala_config from sight_service.build_mdqn_learner import build_mdqn_config from sight_service.build_qrdqn_learner import build_qrdqn_config @@ -40,16 +40,17 @@ # from sight_service.build_mpo_learner import build_mpo_config # from sight_service.build_sac_learner import build_sac_config from sight_service.build_td3_learner import build_td3_config -from sight_service.proto.numproto.numproto import ndarray_to_proto -from sight_service.proto.numproto.numproto import proto_to_ndarray from sight_service.optimizer_instance import OptimizerInstance from sight_service.optimizer_instance import param_dict_to_proto +from sight_service.proto import service_pb2 +from sight_service.proto.numproto.numproto import ndarray_to_proto +from sight_service.proto.numproto.numproto import proto_to_ndarray _file_name = "acme_optimizer.py" class Acme(OptimizerInstance): - """Acme optimizer class to work with training methods. + """Acme optimizer class to work with training methods. Attributes: agents: Maps each worker_id to the Agent object that learns from the @@ -67,449 +68,444 @@ class Acme(OptimizerInstance): _learner_checkpointer: """ - def __init__(self): - super().__init__() - self._experiment = None - self._learner = None - self._learner_weights_lock = rwlock.RWLockFair() - self._learner_weights = {} - self._replay_server = None - self._replay_client = None - self._dataset = None - self._learner_checkpointer = None - self._learner_keys = ["policy", "critic"] - # self._last_updated_at = 0 - self._avg_insertion_time = [] - self._avg_updation_time = [] - - def print_insertion_time(self): - logging.info("all insertion times : %s", self._avg_insertion_time) - - def calculate_time(self, start_time, operation): - method_name = "calculate_time" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - current_time = time.time() - start_time - if (operation == 'insert'): - self._avg_insertion_time.append(round(current_time, 2)) - avg_time = sum(self._avg_insertion_time) / len( - self._avg_insertion_time) - elif (operation == 'update'): - self._avg_updation_time.append(current_time) - avg_time = sum(self._avg_updation_time) / len( - self._avg_updation_time) - logging.info( - "%s Time: - latest time : %s and Average time : %s seconds", - operation, round(current_time, 3), round(avg_time, 3)) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - - def fetch_replay_table_size(self): - method_name = "fetch_replay_table_size" - logging.debug(">>>> In %s of %s", method_name, _file_name) - table_info = self._replay_client.server_info() - table_size = table_info[ - adders_reverb. - DEFAULT_PRIORITY_TABLE].rate_limiter_info.insert_stats.completed - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return table_size - - def update_learner(self): - method_name = "update_learner" - logging.info(">>>> In %s of %s", method_name, _file_name) - try: - while True: - # If dataset iterator has enough data to sample - if self._dataset.ready(): - logging.info("updating learner................") - start_time = time.time() - self._learner.step() - # self.calculate_time(start_time, 'update') - - # transfering updated learner weights to _learner_weights - # variable with write lock - with self._learner_weights_lock.gen_wlock(): - all_weights = self._learner.get_variables( - self._learner_keys) - for i in range(len(all_weights)): - self._learner_weights[ - self._learner_keys[i]] = all_weights[i] - logging.info("<<<< Out %s of %s", method_name, _file_name) - except Exception as e: - logging.exception("Exception in learner thread : %s", e) - - def insert_to_replay(self, request): - method_name = "insert_to_replay" - logging.info(">>>> In %s of %s", method_name, _file_name) - try: - if request.acme_config.episode_observations: - # logging.info( - # "adding this many data into buffer via thread : %d", - # len(request.acme_config.episode_observations), - # ) - - start_time = time.time() - adder = self._experiment.builder.make_adder( - self._replay_client, self._environment_spec, self._policy) - - episode_observations = request.acme_config.episode_observations - for episode_obs in episode_observations: - if episode_obs.HasField("action"): - action = proto_to_ndarray(episode_obs.action) - else: - action = np.array(0, dtype=np.int64) - # todo : meetashah - changed dtyep from int64 to float64 for d4pg agent - # action = np.array(0, dtype=np.float32) - if episode_obs.HasField("reward"): - reward = proto_to_ndarray(episode_obs.reward) - if episode_obs.HasField("discount"): - discount = proto_to_ndarray(episode_obs.discount) - else: - discount = np.array(0, dtype=np.float64) - observation = proto_to_ndarray(episode_obs.observation) - steptype = episode_obs.steptype - - if steptype == dm_env.StepType.FIRST: - action = None - timestep = dm_env.TimeStep( - step_type=steptype, - reward=None, - discount=None, - observation=observation, - ) - # print("first timestep : ", timestep) - # raise SystemExit - adder.add_first(timestep) - else: - timestep = dm_env.TimeStep( - step_type=steptype, - reward=reward, - discount=discount, - observation=observation, - ) - # print("mid timestep : ", timestep) - # print("action : ", action, type(action), action.shape) - # raise SystemExit - adder.add(action, timestep) - # self.calculate_time(start_time, 'insert') - logging.info("table_size: %s", self.fetch_replay_table_size()) - else: - logging.info("no data to insert.....") - logging.debug("<<<< Out %s of %s", method_name, _file_name) - except Exception as e: - logging.exception("Exception in thread : %s", e) - - # @overrides - # def get_weights( - # self, request: service_pb2.GetWeightsRequest - # ) -> service_pb2.GetWeightsResponse: - # method_name = "get_weights" - # logging.debug(">>>> In %s of %s", method_name, _file_name) - - # executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) - # executor.submit(self.insert_to_replay, request) - # # Manually shutdown the executor after submitting tasks - # executor.shutdown(wait=False) - - # # logging.info("sending latest weights back to client") - # with self._learner_weights_lock.gen_rlock(): - # latest_weights = self._learner_weights - - # weights_msg = service_pb2.GetWeightsResponse() - # for layer_data in latest_weights: - # for layer_name, layer_info in layer_data.items(): - # layer_msg = weights_msg.layers.add() - # layer_msg.name = layer_name - - # weights_data_msg = layer_msg.weights - # if "offset" in layer_info: - # weights_data_msg.offset.extend(layer_info["offset"]) - # if "scale" in layer_info: - # weights_data_msg.scale.extend(layer_info["scale"]) - # if "b" in layer_info: - # weights_data_msg.b.extend(layer_info["b"]) - # if "w" in layer_info: - # weights_data_msg.w.CopyFrom(ndarray_to_proto(layer_info["w"])) - # # print(f"<<<< Out {method_name} of {_file_name}") - # logging.debug("<<<< Out %s of %s", method_name, _file_name) - # return weights_msg - - def generate_env_spec(self, state_attrs, action_attrs): - method_name = "generate_env_spec" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - default_dtype = np.float32 - - state_min = [] - state_max = [] - for key, attr_props in state_attrs.items(): - state_min.append(attr_props.min_value) - state_max.append(attr_props.max_value) - observations = specs.BoundedArray(shape=(len(state_max), ), - dtype=default_dtype, - name='observation', - minimum=state_min, - maximum=state_max) - action_min = [] - action_max = [] - for key, attr_props in action_attrs.items(): - action_min.append(attr_props.min_value) - action_max.append(attr_props.max_value) - - if (attr_props.valid_int_values): - actions = specs.DiscreteArray(num_values=len( - attr_props.valid_int_values), - dtype=np.int64, - name="action") - else: - if (attr_props.step_size): - default_dtype = np.int64 - actions = specs.BoundedArray(shape=(len(action_max), ), - dtype=default_dtype, - name='action', - minimum=action_min, - maximum=action_max) - - # print(state_min, state_max, len(state_max), state_dtype) - # print(action_min, action_max, len(action_max), action_dtype) - # print('actions : ', actions) - - new_env_spec = specs.EnvironmentSpec( - observations=observations, - actions=actions, - rewards=specs.Array(shape=(), dtype=np.float64, name='reward'), - discounts=specs.BoundedArray(shape=(), - dtype=np.float64, - name='discount', - minimum=0.0, - maximum=1.0)) - # print('new_env_spec : ', new_env_spec) - # raise SystemError - - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return new_env_spec - - def create_learner(self, client_id, acme_config, state_attrs, - action_attrs): - method_name = "create_learner" - logging.info(">>>> In %s of %s", method_name, _file_name) - - environment_spec = self.generate_env_spec(state_attrs, action_attrs) - - if (acme_config.acme_agent == - sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_DQN): - self._experiment = build_dqn_config() - elif (acme_config.acme_agent == - sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_D4PG): - self._experiment = build_d4pg_config() - # elif (acme_config.acme_agent == - # sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_IMPALA): - # self._experiment = build_impala_config() - elif (acme_config.acme_agent == - sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MDQN): - self._experiment = build_mdqn_config() - elif (acme_config.acme_agent == - sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_QRDQN): - self._experiment = build_qrdqn_config() - # elif (acme_config.acme_agent == - # sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_PPO): - # self._experiment = build_ppo_config() - # elif (acme_config.acme_agent == - # sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MPO): - # self._experiment = build_mpo_config() - # elif (acme_config.acme_agent == - # sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_SAC): - # self._experiment = build_sac_config(environment_spec) - elif (acme_config.acme_agent == - sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_TD3): - self._experiment = build_td3_config() - - # print('Else environment_spec : ', environment_spec) - - networks = self._experiment.network_factory(environment_spec) - policy = config.make_policy( - experiment=self._experiment, - networks=networks, - environment_spec=environment_spec, - evaluation=False, - ) - replay_tables = self._experiment.builder.make_replay_tables( - environment_spec, policy) - - replay_server = reverb.Server(replay_tables, port=None) - replay_client = reverb.Client(f"localhost:{replay_server.port}") - - dataset = self._experiment.builder.make_dataset_iterator(replay_client) - dataset = utils.prefetch(dataset, buffer_size=1) - - key = jax.random.PRNGKey(0) - learner_key, key = jax.random.split(key) - learner = self._experiment.builder.make_learner( - random_key=learner_key, - networks=networks, - dataset=dataset, - logger_fn=self._experiment.logger_factory, - environment_spec=environment_spec, - replay_client=replay_client, - ) - print("learner : ", learner) - - self._learner = learner - self._replay_server = replay_server - self._replay_client = replay_client - self._dataset = dataset - self._environment_spec = environment_spec - self._policy = policy - - # keeping weights in learner_weights dict so, future calls can directly get the 'at time updated weights' - with self._learner_weights_lock.gen_wlock(): + def __init__(self): + super().__init__() + self._experiment = None + self._learner = None + self._learner_weights_lock = rwlock.RWLockFair() + self._learner_weights = {} + self._replay_server = None + self._replay_client = None + self._dataset = None + self._learner_checkpointer = None + self._learner_keys = ["policy", "critic"] + # self._last_updated_at = 0 + self._avg_insertion_time = [] + self._avg_updation_time = [] + + def print_insertion_time(self): + logging.info("all insertion times : %s", self._avg_insertion_time) + + def calculate_time(self, start_time, operation): + method_name = "calculate_time" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + current_time = time.time() - start_time + if (operation == 'insert'): + self._avg_insertion_time.append(round(current_time, 2)) + avg_time = sum(self._avg_insertion_time) / len(self._avg_insertion_time) + elif (operation == 'update'): + self._avg_updation_time.append(current_time) + avg_time = sum(self._avg_updation_time) / len(self._avg_updation_time) + logging.info("%s Time: - latest time : %s and Average time : %s seconds", + operation, round(current_time, 3), round(avg_time, 3)) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + + def fetch_replay_table_size(self): + method_name = "fetch_replay_table_size" + logging.debug(">>>> In %s of %s", method_name, _file_name) + table_info = self._replay_client.server_info() + table_size = table_info[ + adders_reverb. + DEFAULT_PRIORITY_TABLE].rate_limiter_info.insert_stats.completed + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return table_size + + def update_learner(self): + method_name = "update_learner" + logging.info(">>>> In %s of %s", method_name, _file_name) + try: + while True: + # If dataset iterator has enough data to sample + if self._dataset.ready(): + logging.info("updating learner................") + start_time = time.time() + self._learner.step() + # self.calculate_time(start_time, 'update') + + # transfering updated learner weights to _learner_weights + # variable with write lock + with self._learner_weights_lock.gen_wlock(): all_weights = self._learner.get_variables(self._learner_keys) for i in range(len(all_weights)): - self._learner_weights[self._learner_keys[i]] = all_weights[i] - - # spinning a thread which update the learner when dataset iterator is ready - executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) - executor.submit(self.update_learner) - # Manually shutdown the executor after submitting tasks - executor.shutdown(wait=False) - logging.info("<<<< Out %s of %s", method_name, _file_name) - # print(f"<<<< Out {method_name} of {_file_name}") - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - method_name = "launch" - logging.debug(">>>> In %s of %s", method_name, _file_name) - super(Acme, self).launch(request) - - print('launch request : ', request) - - # self.create_learner(request.client_id, request.acme_config.env_name) - # self.create_learner(request.client_id, request.acme_config) - - self.create_learner( - request.client_id, - acme_config=request.decision_config_params.choice_config[ - request.label].acme_config, - state_attrs=request.decision_config_params.state_attrs, - action_attrs=request.decision_config_params.action_attrs) - - # TODO : meetashah : this is old version, might have to modify for now. - # storing client details in case server crashed - mid run - # client_details = {} - # client_details["sight_id"] = int(request.client_id) - # client_details["env"] = "CartPole-v1" - # client_details["network_path"] = ( - # f"gs://{FLAGS.project_id}-sight/learner/" + request.client_id + "/" - # ) - # client_details["learner_path"] = ( - # f"gs://{FLAGS.project_id}-sight/learner/" + request.client_id + "/" - # ) - # client_details["replay_address"] = "127.0.0.1" - # server_utils.Insert_In_ClientData_Table( - # client_details, "sight-data", "sight_db", "ClientData" + self._learner_weights[self._learner_keys[i]] = all_weights[i] + logging.info("<<<< Out %s of %s", method_name, _file_name) + except Exception as e: + logging.exception("Exception in learner thread : %s", e) + + def insert_to_replay(self, request): + method_name = "insert_to_replay" + logging.info(">>>> In %s of %s", method_name, _file_name) + try: + if request.acme_config.episode_observations: + # logging.info( + # "adding this many data into buffer via thread : %d", + # len(request.acme_config.episode_observations), # ) - response = service_pb2.LaunchResponse() - response.display_string = "ACME SUCCESS!" - logging.info("<<<< Out %s of %s", method_name, _file_name) - return response - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - method_name = "decision_point" - logging.info(">>>> In %s of %s", method_name, _file_name) - - #? start separate thread for the insertion to replay - executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) - executor.submit(self.insert_to_replay, request) - # Manually shutdown the executor after submitting tasks - executor.shutdown(wait=False) - - # logging.info("sending latest weights back to client") - latest_weights = [] - with self._learner_weights_lock.gen_rlock(): - if (len(request.acme_config.learner_keys) > 0): - for key in request.acme_config.learner_keys: - latest_weights.append(self._learner_weights[key]) - else: - latest_weights.append(self._learner_weights["policy"]) - - response = service_pb2.DecisionPointResponse() - - # Convert NumPy arrays to lists before serialization - def convert_np_to_list(obj): - if isinstance(obj, np.ndarray): - return {'data': obj.tolist(), 'shape': obj.shape} - return obj - - # directly serializing the weights structure - # serialized_weights = json.dumps( - # latest_weights, default=convert_np_to_list).encode('utf-8') - serialized_weights = pickle.dumps(latest_weights) - - response.weights = serialized_weights - - logging.info("<<<< Out %s of %s", method_name, _file_name) - return response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - method_name = "finalize_episode" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) - executor.submit(self.insert_to_replay, request) - # Manually shutdown the executor after submitting tasks - executor.shutdown(wait=False) - - # observation = np.array( - # list(param_proto_to_dict(request.decision_point.state_params).values()), - # dtype=np.float32, - # ) - # # logging.info('observation : %s', observation) - # with self.last_action_lock.gen_wlock(): - # if request.worker_id in self.last_action: - # action = self.last_action[request.worker_id] - - # timestep = dm_env.TimeStep( - # step_type=dm_env.StepType.LAST, - # reward=np.array( - # request.decision_outcome.outcome_value, dtype=np.float64 - # ), - # discount=np.array( - # request.decision_outcome.discount, dtype=np.float64 - # ), - # observation=np.frombuffer(observation, dtype=np.float32), - # ) - - # with self.agents_lock.gen_rlock(): - # self.agents[request.worker_id].observe( - # np.int64(action), next_timestep=timestep - # ) - # - # # self.agents[request.worker_id].observe( - # # np.float32(action), next_timestep=timestep - # # ) - # self.agents[request.worker_id].update() - # self._learner_checkpointer.save(force=True) - - # Resetting last action for agent since it is the end of the episode. - # del self.last_action[request.worker_id] - - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.FinalizeEpisodeResponse(response_str="Success!") - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - method_name = "current_status" - logging.debug(">>>> In %s of %s", method_name, _file_name) - response = "[ACME]\n" - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=response) + start_time = time.time() + adder = self._experiment.builder.make_adder(self._replay_client, + self._environment_spec, + self._policy) + + episode_observations = request.acme_config.episode_observations + for episode_obs in episode_observations: + if episode_obs.HasField("action"): + action = proto_to_ndarray(episode_obs.action) + else: + action = np.array(0, dtype=np.int64) + # todo : meetashah - changed dtyep from int64 to float64 for d4pg agent + # action = np.array(0, dtype=np.float32) + if episode_obs.HasField("reward"): + reward = proto_to_ndarray(episode_obs.reward) + if episode_obs.HasField("discount"): + discount = proto_to_ndarray(episode_obs.discount) + else: + discount = np.array(0, dtype=np.float64) + observation = proto_to_ndarray(episode_obs.observation) + steptype = episode_obs.steptype + + if steptype == dm_env.StepType.FIRST: + action = None + timestep = dm_env.TimeStep( + step_type=steptype, + reward=None, + discount=None, + observation=observation, + ) + # print("first timestep : ", timestep) + # raise SystemExit + adder.add_first(timestep) + else: + timestep = dm_env.TimeStep( + step_type=steptype, + reward=reward, + discount=discount, + observation=observation, + ) + # print("mid timestep : ", timestep) + # print("action : ", action, type(action), action.shape) + # raise SystemExit + adder.add(action, timestep) + # self.calculate_time(start_time, 'insert') + logging.info("table_size: %s", self.fetch_replay_table_size()) + else: + logging.info("no data to insert.....") + logging.debug("<<<< Out %s of %s", method_name, _file_name) + except Exception as e: + logging.exception("Exception in thread : %s", e) + + # @overrides + # def get_weights( + # self, request: service_pb2.GetWeightsRequest + # ) -> service_pb2.GetWeightsResponse: + # method_name = "get_weights" + # logging.debug(">>>> In %s of %s", method_name, _file_name) + + # executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + # executor.submit(self.insert_to_replay, request) + # # Manually shutdown the executor after submitting tasks + # executor.shutdown(wait=False) + + # # logging.info("sending latest weights back to client") + # with self._learner_weights_lock.gen_rlock(): + # latest_weights = self._learner_weights + + # weights_msg = service_pb2.GetWeightsResponse() + # for layer_data in latest_weights: + # for layer_name, layer_info in layer_data.items(): + # layer_msg = weights_msg.layers.add() + # layer_msg.name = layer_name + + # weights_data_msg = layer_msg.weights + # if "offset" in layer_info: + # weights_data_msg.offset.extend(layer_info["offset"]) + # if "scale" in layer_info: + # weights_data_msg.scale.extend(layer_info["scale"]) + # if "b" in layer_info: + # weights_data_msg.b.extend(layer_info["b"]) + # if "w" in layer_info: + # weights_data_msg.w.CopyFrom(ndarray_to_proto(layer_info["w"])) + # # print(f"<<<< Out {method_name} of {_file_name}") + # logging.debug("<<<< Out %s of %s", method_name, _file_name) + # return weights_msg + + def generate_env_spec(self, state_attrs, action_attrs): + method_name = "generate_env_spec" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + default_dtype = np.float32 + + state_min = [] + state_max = [] + for key, attr_props in state_attrs.items(): + state_min.append(attr_props.min_value) + state_max.append(attr_props.max_value) + observations = specs.BoundedArray(shape=(len(state_max),), + dtype=default_dtype, + name='observation', + minimum=state_min, + maximum=state_max) + action_min = [] + action_max = [] + for key, attr_props in action_attrs.items(): + action_min.append(attr_props.min_value) + action_max.append(attr_props.max_value) + + if (attr_props.valid_int_values): + actions = specs.DiscreteArray(num_values=len(attr_props.valid_int_values), + dtype=np.int64, + name="action") + else: + if (attr_props.step_size): + default_dtype = np.int64 + actions = specs.BoundedArray(shape=(len(action_max),), + dtype=default_dtype, + name='action', + minimum=action_min, + maximum=action_max) + + # print(state_min, state_max, len(state_max), state_dtype) + # print(action_min, action_max, len(action_max), action_dtype) + # print('actions : ', actions) + + new_env_spec = specs.EnvironmentSpec(observations=observations, + actions=actions, + rewards=specs.Array(shape=(), + dtype=np.float64, + name='reward'), + discounts=specs.BoundedArray( + shape=(), + dtype=np.float64, + name='discount', + minimum=0.0, + maximum=1.0)) + # print('new_env_spec : ', new_env_spec) + # raise SystemError + + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return new_env_spec + + def create_learner(self, client_id, acme_config, state_attrs, action_attrs): + method_name = "create_learner" + logging.info(">>>> In %s of %s", method_name, _file_name) + + environment_spec = self.generate_env_spec(state_attrs, action_attrs) + + if (acme_config.acme_agent == + sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_DQN): + self._experiment = build_dqn_config() + elif (acme_config.acme_agent == + sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_D4PG): + self._experiment = build_d4pg_config() + # elif (acme_config.acme_agent == + # sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_IMPALA): + # self._experiment = build_impala_config() + elif (acme_config.acme_agent == + sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MDQN): + self._experiment = build_mdqn_config() + elif (acme_config.acme_agent == + sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_QRDQN): + self._experiment = build_qrdqn_config() + # elif (acme_config.acme_agent == + # sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_PPO): + # self._experiment = build_ppo_config() + # elif (acme_config.acme_agent == + # sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_MPO): + # self._experiment = build_mpo_config() + # elif (acme_config.acme_agent == + # sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_SAC): + # self._experiment = build_sac_config(environment_spec) + elif (acme_config.acme_agent == + sight_pb2.DecisionConfigurationStart.AcmeConfig.AA_TD3): + self._experiment = build_td3_config() + + # print('Else environment_spec : ', environment_spec) + + networks = self._experiment.network_factory(environment_spec) + policy = config.make_policy( + experiment=self._experiment, + networks=networks, + environment_spec=environment_spec, + evaluation=False, + ) + replay_tables = self._experiment.builder.make_replay_tables( + environment_spec, policy) + + replay_server = reverb.Server(replay_tables, port=None) + replay_client = reverb.Client(f"localhost:{replay_server.port}") + + dataset = self._experiment.builder.make_dataset_iterator(replay_client) + dataset = utils.prefetch(dataset, buffer_size=1) + + key = jax.random.PRNGKey(0) + learner_key, key = jax.random.split(key) + learner = self._experiment.builder.make_learner( + random_key=learner_key, + networks=networks, + dataset=dataset, + logger_fn=self._experiment.logger_factory, + environment_spec=environment_spec, + replay_client=replay_client, + ) + print("learner : ", learner) + + self._learner = learner + self._replay_server = replay_server + self._replay_client = replay_client + self._dataset = dataset + self._environment_spec = environment_spec + self._policy = policy + + # keeping weights in learner_weights dict so, future calls can directly get the 'at time updated weights' + with self._learner_weights_lock.gen_wlock(): + all_weights = self._learner.get_variables(self._learner_keys) + for i in range(len(all_weights)): + self._learner_weights[self._learner_keys[i]] = all_weights[i] + + # spinning a thread which update the learner when dataset iterator is ready + executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + executor.submit(self.update_learner) + # Manually shutdown the executor after submitting tasks + executor.shutdown(wait=False) + logging.info("<<<< Out %s of %s", method_name, _file_name) + # print(f"<<<< Out {method_name} of {_file_name}") + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + method_name = "launch" + logging.debug(">>>> In %s of %s", method_name, _file_name) + super(Acme, self).launch(request) + + print('launch request : ', request) + + # self.create_learner(request.client_id, request.acme_config.env_name) + # self.create_learner(request.client_id, request.acme_config) + + self.create_learner( + request.client_id, + acme_config=request.decision_config_params.choice_config[ + request.label].acme_config, + state_attrs=request.decision_config_params.state_attrs, + action_attrs=request.decision_config_params.action_attrs) + + # TODO : meetashah : this is old version, might have to modify for now. + # storing client details in case server crashed - mid run + # client_details = {} + # client_details["sight_id"] = int(request.client_id) + # client_details["env"] = "CartPole-v1" + # client_details["network_path"] = ( + # f"gs://{FLAGS.project_id}-sight/learner/" + request.client_id + "/" + # ) + # client_details["learner_path"] = ( + # f"gs://{FLAGS.project_id}-sight/learner/" + request.client_id + "/" + # ) + # client_details["replay_address"] = "127.0.0.1" + # server_utils.Insert_In_ClientData_Table( + # client_details, "sight-data", "sight_db", "ClientData" + # ) + + response = service_pb2.LaunchResponse() + response.display_string = "ACME SUCCESS!" + logging.info("<<<< Out %s of %s", method_name, _file_name) + return response + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + method_name = "decision_point" + logging.info(">>>> In %s of %s", method_name, _file_name) + + #? start separate thread for the insertion to replay + executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + executor.submit(self.insert_to_replay, request) + # Manually shutdown the executor after submitting tasks + executor.shutdown(wait=False) + + # logging.info("sending latest weights back to client") + latest_weights = [] + with self._learner_weights_lock.gen_rlock(): + if (len(request.acme_config.learner_keys) > 0): + for key in request.acme_config.learner_keys: + latest_weights.append(self._learner_weights[key]) + else: + latest_weights.append(self._learner_weights["policy"]) + + response = service_pb2.DecisionPointResponse() + + # Convert NumPy arrays to lists before serialization + def convert_np_to_list(obj): + if isinstance(obj, np.ndarray): + return {'data': obj.tolist(), 'shape': obj.shape} + return obj + + # directly serializing the weights structure + # serialized_weights = json.dumps( + # latest_weights, default=convert_np_to_list).encode('utf-8') + serialized_weights = pickle.dumps(latest_weights) + + response.weights = serialized_weights + + logging.info("<<<< Out %s of %s", method_name, _file_name) + return response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + method_name = "finalize_episode" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + executor.submit(self.insert_to_replay, request) + # Manually shutdown the executor after submitting tasks + executor.shutdown(wait=False) + + # observation = np.array( + # list(param_proto_to_dict(request.decision_point.state_params).values()), + # dtype=np.float32, + # ) + # # logging.info('observation : %s', observation) + # with self.last_action_lock.gen_wlock(): + # if request.worker_id in self.last_action: + # action = self.last_action[request.worker_id] + + # timestep = dm_env.TimeStep( + # step_type=dm_env.StepType.LAST, + # reward=np.array( + # request.decision_outcome.outcome_value, dtype=np.float64 + # ), + # discount=np.array( + # request.decision_outcome.discount, dtype=np.float64 + # ), + # observation=np.frombuffer(observation, dtype=np.float32), + # ) + + # with self.agents_lock.gen_rlock(): + # self.agents[request.worker_id].observe( + # np.int64(action), next_timestep=timestep + # ) + # + # # self.agents[request.worker_id].observe( + # # np.float32(action), next_timestep=timestep + # # ) + # self.agents[request.worker_id].update() + # self._learner_checkpointer.save(force=True) + + # Resetting last action for agent since it is the end of the episode. + # del self.last_action[request.worker_id] + + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.FinalizeEpisodeResponse(response_str="Success!") + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + method_name = "current_status" + logging.debug(">>>> In %s of %s", method_name, _file_name) + response = "[ACME]\n" + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=response) diff --git a/sight_service/bayesian_opt.py b/sight_service/bayesian_opt.py index 90645bc..41bb75f 100644 --- a/sight_service/bayesian_opt.py +++ b/sight_service/bayesian_opt.py @@ -13,139 +13,135 @@ # limitations under the License. """LLM-based optimization for driving Sight applications.""" -from helpers.logs.logs_handler import logger as logging -from overrides import overrides +import json +import os +import random +import threading from typing import Any, Dict, List, Tuple from bayes_opt import BayesianOptimization from bayes_opt import UtilityFunction -from sight_service.optimizer_instance import param_dict_to_proto -from sight_service.optimizer_instance import OptimizerInstance -from sight_service.proto import service_pb2 -from sight.proto import sight_pb2 -import random -import requests import google.auth import google.auth.transport.requests -import json -import os -import threading +from helpers.logs.logs_handler import logger as logging +from overrides import overrides +import requests +from sight.proto import sight_pb2 +from sight_service.optimizer_instance import OptimizerInstance +from sight_service.optimizer_instance import param_dict_to_proto +from sight_service.proto import service_pb2 _file_name = "bayesian_opt.py" class BayesianOpt(OptimizerInstance): - """Uses an LLM to choose the parameters of the code. + """Uses an LLM to choose the parameters of the code. """ - def __init__(self): - super().__init__() - self._lock = threading.RLock() - self._total_count = 0 - self._completed_count = 0 - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - response = super(BayesianOpt, self).launch(request) - self._total_count = request.decision_config_params.num_trials - self._optimizer = BayesianOptimization( - f=None, - pbounds={ - key: (p.min_value, p.max_value) - for key, p in self.actions.items() - }, - verbose=2, - allow_duplicate_points=True, - # random_state=1, - ) - self._utility = UtilityFunction(kind='ucb', kappa=1.96, xi=0.01) - response.display_string = 'BayesianOpt Start' - return response - - # def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: - # """Returns the dict representation of a DecisionParams proto""" - # d = {} - # for a in dp: - # d[a.key] = a.value.double_value - # return d - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - logging.info('DecisionPoint request=%s', request) - print('DecisionPoint request=%s' % request) - - self._lock.acquire() - selected_actions = self._optimizer.suggest(self._utility) - self._lock.release() - - dp_response = service_pb2.DecisionPointResponse() - for key, value in selected_actions.items(): - a = dp_response.action.add() - a.key = key - a.value.sub_type = sight_pb2.Value.ST_DOUBLE - a.value.double_value = float(value) - - print('DecisionPoint response=%s' % dp_response) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - logging.info('FinalizeEpisode request=%s', request) - d = {} - for a in request.decision_point.choice_params: - d[a.key] = a.value.double_value - - self._lock.acquire() - logging.info('FinalizeEpisode outcome=%s / %s', - request.decision_outcome.reward, d) - self._optimizer.register(params=d, - target=request.decision_outcome.reward) - # self._completed_count += 1 - self._lock.release() - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - output = '[BayesianOpt (#%s trials)\n' % len(self._optimizer.res) - for trial in sorted(self._optimizer.res, - key=lambda x: x['target'], - reverse=True): - output += ' ' + str(trial) + '\n' - output += ']\n' - - if (self._completed_count == self._total_count): - status = service_pb2.CurrentStatusResponse.Status.SUCCESS - elif (self._completed_count < self._total_count): - status = service_pb2.CurrentStatusResponse.Status.IN_PROGRESS - else: - status = service_pb2.CurrentStatusResponse.Status.FAILURE - - return service_pb2.CurrentStatusResponse(response_str=output, - status=status) - - @overrides - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) - if (self._completed_count == self._total_count): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - # elif(not self.pending_samples): - # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY - else: - # Increasing count here so that multiple workers can't enter the dp call for same sample at last - self._completed_count += 1 - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) + def __init__(self): + super().__init__() + self._lock = threading.RLock() + self._total_count = 0 + self._completed_count = 0 + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(BayesianOpt, self).launch(request) + self._total_count = request.decision_config_params.num_trials + self._optimizer = BayesianOptimization( + f=None, + pbounds={ + key: (p.min_value, p.max_value) for key, p in self.actions.items() + }, + verbose=2, + allow_duplicate_points=True, + # random_state=1, + ) + self._utility = UtilityFunction(kind='ucb', kappa=1.96, xi=0.01) + response.display_string = 'BayesianOpt Start' + return response + + # def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: + # """Returns the dict representation of a DecisionParams proto""" + # d = {} + # for a in dp: + # d[a.key] = a.value.double_value + # return d + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + logging.info('DecisionPoint request=%s', request) + print('DecisionPoint request=%s' % request) + + self._lock.acquire() + selected_actions = self._optimizer.suggest(self._utility) + self._lock.release() + + dp_response = service_pb2.DecisionPointResponse() + for key, value in selected_actions.items(): + a = dp_response.action.add() + a.key = key + a.value.sub_type = sight_pb2.Value.ST_DOUBLE + a.value.double_value = float(value) + + print('DecisionPoint response=%s' % dp_response) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + logging.info('FinalizeEpisode request=%s', request) + d = {} + for a in request.decision_point.choice_params: + d[a.key] = a.value.double_value + + self._lock.acquire() + logging.info('FinalizeEpisode outcome=%s / %s', + request.decision_outcome.reward, d) + self._optimizer.register(params=d, target=request.decision_outcome.reward) + # self._completed_count += 1 + self._lock.release() + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + output = '[BayesianOpt (#%s trials)\n' % len(self._optimizer.res) + for trial in sorted(self._optimizer.res, + key=lambda x: x['target'], + reverse=True): + output += ' ' + str(trial) + '\n' + output += ']\n' + + if (self._completed_count == self._total_count): + status = service_pb2.CurrentStatusResponse.Status.SUCCESS + elif (self._completed_count < self._total_count): + status = service_pb2.CurrentStatusResponse.Status.IN_PROGRESS + else: + status = service_pb2.CurrentStatusResponse.Status.FAILURE + + return service_pb2.CurrentStatusResponse(response_str=output, status=status) + + @overrides + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) + if (self._completed_count == self._total_count): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + # elif(not self.pending_samples): + # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + else: + # Increasing count here so that multiple workers can't enter the dp call for same sample at last + self._completed_count += 1 + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/build_d4pg_learner.py b/sight_service/build_d4pg_learner.py index 4797962..c7c510c 100644 --- a/sight_service/build_d4pg_learner.py +++ b/sight_service/build_d4pg_learner.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Setting up configuration for DQN Experiment.""" from absl import flags @@ -25,7 +24,6 @@ import gym import haiku as hk - # SEED = flags.DEFINE_integer('seed', 0, 'Random seed.') # NUM_STEPS = flags.DEFINE_integer( # 'num_steps', 10, 'Number of env steps to run.' # 1_000_000 @@ -37,7 +35,7 @@ def build_d4pg_config(env_name: str = '', possible_action_values: int = 1): def env_factory(): if env_name: - env = wrappers.GymWrapper(gym.make(env_name)) + env = wrappers.GymWrapper(gym.make(env_name)) # env = wrappers.CanonicalSpecWrapper(env, clip=True) # env = wrappers.SinglePrecisionWrapper(env) return env diff --git a/sight_service/build_dqn_learner.py b/sight_service/build_dqn_learner.py index d48dc9b..6bf4fe4 100644 --- a/sight_service/build_dqn_learner.py +++ b/sight_service/build_dqn_learner.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Setting up configuration for DQN Experiment.""" from absl import flags from acme import specs @@ -32,7 +31,7 @@ def env_factory(): # if(env_name): # return wrappers.GymWrapper(gym.make(env_name)) # else: - return None + return None def net_factory(environment_spec: specs.EnvironmentSpec) -> dqn.DQNNetworks: """Creates networks for training DQN.""" @@ -51,8 +50,7 @@ def network(inputs): network_hk = hk.without_apply_rng(hk.transform(network)) obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations)) network = networks_lib.FeedForwardNetwork( - init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply - ) + init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply) typed_network = networks_lib.non_stochastic_network_to_typed(network) return dqn.DQNNetworks(policy_network=typed_network) diff --git a/sight_service/build_mdqn_learner.py b/sight_service/build_mdqn_learner.py index 4431e51..96fad65 100644 --- a/sight_service/build_mdqn_learner.py +++ b/sight_service/build_mdqn_learner.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Setting up configuration for DQN Experiment.""" from absl import flags @@ -47,21 +46,18 @@ def network(inputs): network_hk = hk.without_apply_rng(hk.transform(network)) obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations)) network = networks_lib.FeedForwardNetwork( - init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply - ) + init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply) typed_network = networks_lib.non_stochastic_network_to_typed(network) return dqn.DQNNetworks(policy_network=typed_network) - # Construct the agent. - config = dqn.DQNConfig( - discount=0.99, - n_step=1, - epsilon=0.1 - ) + # Construct the agent. + config = dqn.DQNConfig(discount=0.99, n_step=1, epsilon=0.1) - loss_fn = losses.MunchausenQLearning( - discount=config.discount, max_abs_reward=1., huber_loss_parameter=1., - entropy_temperature=0.03, munchausen_coefficient=0.9) + loss_fn = losses.MunchausenQLearning(discount=config.discount, + max_abs_reward=1., + huber_loss_parameter=1., + entropy_temperature=0.03, + munchausen_coefficient=0.9) dqn_builder = dqn.DQNBuilder(config, loss_fn=loss_fn) diff --git a/sight_service/build_qrdqn_learner.py b/sight_service/build_qrdqn_learner.py index 52203d8..5d7478b 100644 --- a/sight_service/build_qrdqn_learner.py +++ b/sight_service/build_qrdqn_learner.py @@ -21,51 +21,51 @@ from acme.jax import experiments from acme.jax import networks as networks_lib from acme.jax import utils -import jax.numpy as jnp import haiku as hk +import jax.numpy as jnp NUM_QUANTILES = flags.DEFINE_integer('num_quantiles', 20, 'Number of bins to use.') def build_qrdqn_config(): - """Builds QR-DQN experiment config which can be executed in different ways.""" + """Builds QR-DQN experiment config which can be executed in different ways.""" - def env_factory(seed): - # del seed - # return helpers.make_atari_environment( - # level=env_name, sticky_actions=True, zero_discount_on_life_loss=False) - return None + def env_factory(seed): + # del seed + # return helpers.make_atari_environment( + # level=env_name, sticky_actions=True, zero_discount_on_life_loss=False) + return None - def net_factory( - environment_spec: specs.EnvironmentSpec) -> dqn.DQNNetworks: - """Creates networks for training DQN on Gym Env.""" - num_quantiles = 20 + def net_factory(environment_spec: specs.EnvironmentSpec) -> dqn.DQNNetworks: + """Creates networks for training DQN on Gym Env.""" + num_quantiles = 20 - def network(inputs): - model = hk.Sequential([ - hk.nets.MLP([512, 128, environment_spec.actions.num_values * num_quantiles]), - ]) - q_dist = model(inputs).reshape(-1, environment_spec.actions.num_values, - num_quantiles) - q_values = jnp.mean(q_dist, axis=-1) - return q_values, q_dist + def network(inputs): + model = hk.Sequential([ + hk.nets.MLP( + [512, 128, environment_spec.actions.num_values * num_quantiles]), + ]) + q_dist = model(inputs).reshape(-1, environment_spec.actions.num_values, + num_quantiles) + q_values = jnp.mean(q_dist, axis=-1) + return q_values, q_dist - network_hk = hk.without_apply_rng(hk.transform(network)) - obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations)) - network = networks_lib.FeedForwardNetwork( - init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply) - typed_network = networks_lib.non_stochastic_network_to_typed(network) - return dqn.DQNNetworks(policy_network=typed_network) + network_hk = hk.without_apply_rng(hk.transform(network)) + obs = utils.add_batch_dim(utils.zeros_like(environment_spec.observations)) + network = networks_lib.FeedForwardNetwork( + init=lambda rng: network_hk.init(rng, obs), apply=network_hk.apply) + typed_network = networks_lib.non_stochastic_network_to_typed(network) + return dqn.DQNNetworks(policy_network=typed_network) - # Construct the agent. - config = dqn.DQNConfig(discount=0.99, n_step=1, epsilon=0.1) + # Construct the agent. + config = dqn.DQNConfig(discount=0.99, n_step=1, epsilon=0.1) - loss_fn = losses.QrDqn(num_atoms=2, huber_param=1.) - dqn_builder = dqn.DistributionalDQNBuilder(config, loss_fn=loss_fn) + loss_fn = losses.QrDqn(num_atoms=2, huber_param=1.) + dqn_builder = dqn.DistributionalDQNBuilder(config, loss_fn=loss_fn) - return experiments.ExperimentConfig(builder=dqn_builder, - environment_factory=env_factory, - network_factory=net_factory, - seed=0, - max_num_actor_steps=100) + return experiments.ExperimentConfig(builder=dqn_builder, + environment_factory=env_factory, + network_factory=net_factory, + seed=0, + max_num_actor_steps=100) diff --git a/sight_service/build_td3_learner.py b/sight_service/build_td3_learner.py index 6a5cc35..87b9e8c 100644 --- a/sight_service/build_td3_learner.py +++ b/sight_service/build_td3_learner.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Setting up configuration for DQN Experiment.""" from acme.agents.jax import td3 @@ -25,7 +24,7 @@ def env_factory(): # if env_name: # return wrappers.GymWrapper(gym.make(env_name)) # else: - return None + return None network_factory = ( lambda spec: td3.make_networks(spec, hidden_layer_sizes=(256, 256, 256))) @@ -37,10 +36,8 @@ def env_factory(): ) td3_builder = td3.TD3Builder(config) - return experiments.ExperimentConfig( - builder=td3_builder, - environment_factory=env_factory, - network_factory=network_factory, - seed=0, - max_num_actor_steps=10) - + return experiments.ExperimentConfig(builder=td3_builder, + environment_factory=env_factory, + network_factory=network_factory, + seed=0, + max_num_actor_steps=10) diff --git a/sight_service/exhaustive_search.py b/sight_service/exhaustive_search.py index 5b388eb..b8f12ab 100644 --- a/sight_service/exhaustive_search.py +++ b/sight_service/exhaustive_search.py @@ -13,239 +13,231 @@ # limitations under the License. """Exhaustive search for driving Sight applications.""" -from helpers.logs.logs_handler import logger as logging -from overrides import overrides +import threading from typing import Any, Dict, List, Tuple -from sight_service.proto import service_pb2 -from sight_service.optimizer_instance import param_dict_to_proto +from helpers.logs.logs_handler import logger as logging +from overrides import overrides from sight_service.optimizer_instance import OptimizerInstance -import threading +from sight_service.optimizer_instance import param_dict_to_proto +from sight_service.proto import service_pb2 _file_name = "exhaustive_search.py" class ExhaustiveSearch(OptimizerInstance): - """Exhaustively searches over all the possible values of the action attributes. + """Exhaustively searches over all the possible values of the action attributes. Attributes: possible_values: Maps each action attributes to the list of possible values of this attribute. """ - def __init__(self): - super().__init__() - self.next_sample_to_issue = [] - self.active_samples = {} - self.complete_samples = {} - self.last_sample = False - self.sweep_issue_done = False - self.possible_values = {} - self.max_reward_sample = {} - self._lock = threading.RLock() - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - method_name = "launch" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - response = super(ExhaustiveSearch, self).launch(request) - print("self.actions : ", self.actions) - self.next_sample_to_issue = [0] * len(self.actions) - print("self.next_sample_to_issue : ", self.next_sample_to_issue) - - self.possible_values = {} - for i, key in enumerate(sorted(self.actions.keys())): - if self.actions[key].valid_float_values: - self.possible_values[key] = list( - self.actions[key].valid_float_values) - elif self.actions[key].step_size: - self.possible_values[key] = [] - cur = self.actions[key].min_value - while cur <= self.actions[key].max_value: - self.possible_values[key].append(cur) - cur += self.actions[key].step_size - - logging.info('possible_values=%s', self.possible_values) - response.display_string = 'Exhaustive Search SUCCESS!' - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return response - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - method_name = "decision_point" - logging.debug(">>>> In %s of %s", method_name, _file_name) + def __init__(self): + super().__init__() + self.next_sample_to_issue = [] + self.active_samples = {} + self.complete_samples = {} + self.last_sample = False + self.sweep_issue_done = False + self.possible_values = {} + self.max_reward_sample = {} + self._lock = threading.RLock() + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + method_name = "launch" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + response = super(ExhaustiveSearch, self).launch(request) + print("self.actions : ", self.actions) + self.next_sample_to_issue = [0] * len(self.actions) + print("self.next_sample_to_issue : ", self.next_sample_to_issue) + + self.possible_values = {} + for i, key in enumerate(sorted(self.actions.keys())): + if self.actions[key].valid_float_values: + self.possible_values[key] = list(self.actions[key].valid_float_values) + elif self.actions[key].step_size: + self.possible_values[key] = [] + cur = self.actions[key].min_value + while cur <= self.actions[key].max_value: + self.possible_values[key].append(cur) + cur += self.actions[key].step_size + + logging.info('possible_values=%s', self.possible_values) + response.display_string = 'Exhaustive Search SUCCESS!' + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return response + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + method_name = "decision_point" + logging.debug(">>>> In %s of %s", method_name, _file_name) + logging.info( + ('Running for exhaustive search...., last_sample=%s,' + ' sweep_issue_done=%s'), + self.last_sample, + self.sweep_issue_done, + ) + logging.info('self.next_sample_to_issue=%s', self.next_sample_to_issue) + # logging.info('self.possible_values=%s', self.possible_values) + + if self.sweep_issue_done: + return service_pb2.DecisionPointResponse(action={}) + + next_action = {} + for i, key in enumerate(self.actions): + next_action[key] = self.possible_values[key][self.next_sample_to_issue[i]] + + self._lock.acquire() + self.active_samples[request.worker_id] = { + 'action': next_action, + 'sample': tuple(self.next_sample_to_issue), + } + if self.last_sample: + self.sweep_issue_done = True + else: + # Advance next_sample_to_issue + num_dims_advanced = 0 + keys = sorted(self.actions.keys()) + for i, key in reversed(list(enumerate(keys))): logging.info( - ('Running for exhaustive search...., last_sample=%s,' - ' sweep_issue_done=%s'), - self.last_sample, - self.sweep_issue_done, + 'Advancing i=%s, key=%s, next_sample=%s, possible_values=%s', + i, + key, + self.next_sample_to_issue[i], + self.possible_values[keys[i]], ) - logging.info('self.next_sample_to_issue=%s', self.next_sample_to_issue) - # logging.info('self.possible_values=%s', self.possible_values) - - if self.sweep_issue_done: - return service_pb2.DecisionPointResponse(action={}) - - next_action = {} - for i, key in enumerate(self.actions): - next_action[key] = self.possible_values[key][ - self.next_sample_to_issue[i]] - - self._lock.acquire() - self.active_samples[request.worker_id] = { - 'action': next_action, - 'sample': tuple(self.next_sample_to_issue), - } - if self.last_sample: - self.sweep_issue_done = True + if self.next_sample_to_issue[i] < len(self.possible_values[key]) - 1: + self.next_sample_to_issue[i] += 1 + break else: - # Advance next_sample_to_issue - num_dims_advanced = 0 - keys = sorted(self.actions.keys()) - for i, key in reversed(list(enumerate(keys))): - logging.info( - 'Advancing i=%s, key=%s, next_sample=%s, possible_values=%s', - i, - key, - self.next_sample_to_issue[i], - self.possible_values[keys[i]], - ) - if self.next_sample_to_issue[i] < len( - self.possible_values[key]) - 1: - self.next_sample_to_issue[i] += 1 - break - else: - self.next_sample_to_issue[i] = 0 - num_dims_advanced += 1 - - self.last_sample = num_dims_advanced == len(self.actions) - self._lock.release() - - logging.info('next_action=%s', next_action) - dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend(param_dict_to_proto(next_action)) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - method_name = "finalize_episode" - logging.debug(">>>> In %s of %s", method_name, _file_name) - # logging.info('Running for exhaustive search....') - # logging.info("req in finalize episode of exhaustive_search.py : %s", request) - - # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) - self._lock.acquire() - self.complete_samples[tuple( - self.active_samples[request.worker_id]['sample'])] = { - 'reward': request.decision_outcome.reward, - 'action': self.active_samples[request.worker_id]['action'], - 'outcome': request.decision_outcome.outcome_params - } - logging.info('FinalizeEpisode complete_samples=%s' % - self.complete_samples) - - # if(self.max_reward_sample == {} or self.max_reward_sample['outcome'] < request.decision_outcome.outcome_value): - if (self.max_reward_sample == {} or self.max_reward_sample['reward'] - < request.decision_outcome.reward): - self.max_reward_sample = { - # 'outcome': request.decision_outcome.outcome_value, - 'reward': request.decision_outcome.reward, - 'action': self.active_samples[request.worker_id]['action'], - } - self._lock.release() - - del self.active_samples[request.worker_id] - # logging.info('FinalizeEpisode active_samples=%s' % self.active_samples) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - method_name = "current_status" - logging.debug(">>>> In %s of %s", method_name, _file_name) - response = ( - '[ExhaustiveSearch: {"Done" if self.sweep_issue_done else "In' - ' Progress"}\n') - self._lock.acquire() - response += f' #active_samples={len(self.active_samples)}\n' - response += ' completed_samples=\n' - response += ', '.join(list(self.actions)) + ', outcome\n' - - cur = [0] * len(self.actions) - # action_keys = list(self.actions.keys()) - keys = sorted(self.actions.keys()) - logging.info('self.complete_samples=%s', self.complete_samples) - - reached_last = False - while not reached_last: - logging.info('cur(#%d)=%s', len(cur), cur) - response += ', '.join([ - str(self.possible_values[key][cur[i]]) - for i, key in enumerate(keys) - ]) - if tuple(cur) in self.complete_samples: - response += ', ' + str( - self.complete_samples[tuple(cur)]['outcome']) - else: - response += ', ?' - response += '\n' - - # Advance cur, starting from the last dimension and going to the first. - for i, key in reversed(list(enumerate(keys))): - logging.info( - 'i=%d, key=%s, cur=%s, self.possible_values[key]=%s', - i, - key, - cur[i], - self.possible_values[key], - ) - if cur[i] < len(self.possible_values[key]) - 1: - cur[i] += 1 - break - else: - cur[i] = 0 - if i == 0: - reached_last = True - self._lock.release() - - response += ']' - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=response) - - @overrides - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - method_name = "fetch_optimal_action" - logging.debug(">>>> In %s of %s", method_name, _file_name) - best_action = self.max_reward_sample - print(" : ", best_action) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=str(best_action)) - - @overrides - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) - if (self.sweep_issue_done): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - # elif(not self.pending_samples): - # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + self.next_sample_to_issue[i] = 0 + num_dims_advanced += 1 + + self.last_sample = num_dims_advanced == len(self.actions) + self._lock.release() + + logging.info('next_action=%s', next_action) + dp_response = service_pb2.DecisionPointResponse() + dp_response.action.extend(param_dict_to_proto(next_action)) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + method_name = "finalize_episode" + logging.debug(">>>> In %s of %s", method_name, _file_name) + # logging.info('Running for exhaustive search....') + # logging.info("req in finalize episode of exhaustive_search.py : %s", request) + + # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) + self._lock.acquire() + self.complete_samples[tuple( + self.active_samples[request.worker_id]['sample'])] = { + 'reward': request.decision_outcome.reward, + 'action': self.active_samples[request.worker_id]['action'], + 'outcome': request.decision_outcome.outcome_params + } + logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) + + # if(self.max_reward_sample == {} or self.max_reward_sample['outcome'] < request.decision_outcome.outcome_value): + if (self.max_reward_sample == {} or + self.max_reward_sample['reward'] < request.decision_outcome.reward): + self.max_reward_sample = { + # 'outcome': request.decision_outcome.outcome_value, + 'reward': request.decision_outcome.reward, + 'action': self.active_samples[request.worker_id]['action'], + } + self._lock.release() + + del self.active_samples[request.worker_id] + # logging.info('FinalizeEpisode active_samples=%s' % self.active_samples) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + method_name = "current_status" + logging.debug(">>>> In %s of %s", method_name, _file_name) + response = ('[ExhaustiveSearch: {"Done" if self.sweep_issue_done else "In' + ' Progress"}\n') + self._lock.acquire() + response += f' #active_samples={len(self.active_samples)}\n' + response += ' completed_samples=\n' + response += ', '.join(list(self.actions)) + ', outcome\n' + + cur = [0] * len(self.actions) + # action_keys = list(self.actions.keys()) + keys = sorted(self.actions.keys()) + logging.info('self.complete_samples=%s', self.complete_samples) + + reached_last = False + while not reached_last: + logging.info('cur(#%d)=%s', len(cur), cur) + response += ', '.join([ + str(self.possible_values[key][cur[i]]) for i, key in enumerate(keys) + ]) + if tuple(cur) in self.complete_samples: + response += ', ' + str(self.complete_samples[tuple(cur)]['outcome']) + else: + response += ', ?' + response += '\n' + + # Advance cur, starting from the last dimension and going to the first. + for i, key in reversed(list(enumerate(keys))): + logging.info( + 'i=%d, key=%s, cur=%s, self.possible_values[key]=%s', + i, + key, + cur[i], + self.possible_values[key], + ) + if cur[i] < len(self.possible_values[key]) - 1: + cur[i] += 1 + break else: - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) + cur[i] = 0 + if i == 0: + reached_last = True + self._lock.release() + + response += ']' + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=response) + + @overrides + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + method_name = "fetch_optimal_action" + logging.debug(">>>> In %s of %s", method_name, _file_name) + best_action = self.max_reward_sample + print(" : ", best_action) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=str(best_action)) + + @overrides + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) + if (self.sweep_issue_done): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + # elif(not self.pending_samples): + # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + else: + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/genetic_algorithm.py b/sight_service/genetic_algorithm.py index 25e6698..f66ad88 100644 --- a/sight_service/genetic_algorithm.py +++ b/sight_service/genetic_algorithm.py @@ -14,390 +14,366 @@ """Genetic Algorithms for driving Sight applications.""" from concurrent import futures -from helpers.logs.logs_handler import logger as logging -from overrides import overrides -from typing import Any, Dict, List, Tuple - import math import random -from sight_service.proto import service_pb2 -from sight_service.optimizer_instance import param_dict_to_proto +from typing import Any, Dict, List, Tuple + +from helpers.logs.logs_handler import logger as logging +from overrides import overrides from sight_service.optimizer_instance import OptimizerInstance +from sight_service.optimizer_instance import param_dict_to_proto +from sight_service.proto import service_pb2 class GeneticAlgorithm(OptimizerInstance): - def __init__(self): - super().__init__() - self.ga_population = [] - self.ga_active_samples = {} - self.proposals = [] - self.max_population_size = 40 - self.num_decisions = 0 - self.algorithms_tried = {} - self.algorithms_succeeded_above_min = {} - self.algorithms_succeeded_best = {} - self.history = [] - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - response = super(GeneticAlgorithm, self).launch(request) - response.display_string = 'Genetic Algorithm Launch SUCCESS!' - logging.info('request.genetic_algorithm_config=%s', - request.genetic_algorithm_config) - # if request.genetic_algorithm_config.max_population_size: - # self.max_population_size = max( - # 3, request.genetic_algorithm_config.max_population_size - # ) - ga_config = request.decision_config_params.choice_config[ - request.label].genetic_algorithm_config - self.max_population_size = ga_config.max_population_size - return response - - def find_best_worst( - self, options: List[Dict[str, - Any]]) -> Tuple[float, int, float, int]: - largest_outcome = -math.inf - largest_idx = -1 - smallest_outcome = math.inf - smallest_idx = -1 - sum_outcomes = 0 - for i, unit in enumerate(options): - if unit['outcome'] > largest_outcome: - largest_outcome = unit['outcome'] - largest_idx = i - if unit['outcome'] < smallest_outcome: - smallest_outcome = unit['outcome'] - smallest_idx = i - sum_outcomes += unit['outcome'] - - return ( - largest_outcome, - largest_idx, - smallest_outcome, - smallest_idx, - sum_outcomes, - ) - - def find_best_worst_probweighted( - self, options: List[Dict[str, - Any]]) -> Tuple[float, int, float, int]: + def __init__(self): + super().__init__() + self.ga_population = [] + self.ga_active_samples = {} + self.proposals = [] + self.max_population_size = 40 + self.num_decisions = 0 + self.algorithms_tried = {} + self.algorithms_succeeded_above_min = {} + self.algorithms_succeeded_best = {} + self.history = [] + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(GeneticAlgorithm, self).launch(request) + response.display_string = 'Genetic Algorithm Launch SUCCESS!' + logging.info('request.genetic_algorithm_config=%s', + request.genetic_algorithm_config) + # if request.genetic_algorithm_config.max_population_size: + # self.max_population_size = max( + # 3, request.genetic_algorithm_config.max_population_size + # ) + ga_config = request.decision_config_params.choice_config[ + request.label].genetic_algorithm_config + self.max_population_size = ga_config.max_population_size + return response + + def find_best_worst( + self, options: List[Dict[str, Any]]) -> Tuple[float, int, float, int]: + largest_outcome = -math.inf + largest_idx = -1 + smallest_outcome = math.inf + smallest_idx = -1 + sum_outcomes = 0 + for i, unit in enumerate(options): + if unit['outcome'] > largest_outcome: + largest_outcome = unit['outcome'] + largest_idx = i + if unit['outcome'] < smallest_outcome: + smallest_outcome = unit['outcome'] + smallest_idx = i + sum_outcomes += unit['outcome'] + + return ( + largest_outcome, + largest_idx, + smallest_outcome, + smallest_idx, + sum_outcomes, + ) + + def find_best_worst_probweighted( + self, options: List[Dict[str, Any]]) -> Tuple[float, int, float, int]: + ( + largest_outcome, + largest_idx, + smallest_outcome, + smallest_idx, + sum_outcomes, + ) = self.find_best_worst(options) + # logging.info('largest_outcome=%s, largest_idx=%s, smallest_outcome=%s, smallest_idx=%s, sum_outcomes=%s', largest_outcome, largest_idx, smallest_outcome, smallest_idx, sum_outcomes) + + sum_of_max_adjusted_outcomes = largest_outcome * len(options) - sum_outcomes + smallest_outcome_choice = random.uniform(0, sum_of_max_adjusted_outcomes) + logging.info( + 'sum_of_max_adjusted_outcomes=%s, smallest_outcome_choice=%s', + sum_of_max_adjusted_outcomes, + smallest_outcome_choice, + ) + + cumulative_outcomes_sum = 0 + smallest_outcome = math.inf + smallest_idx = -1 + for i, unit in enumerate(options): + cumulative_outcomes_sum += largest_outcome - unit['outcome'] + # logging.info('unit[outcome]=%s, cumulative_outcomes_sum=%s, found=%s', unit['outcome'], cumulative_outcomes_sum, smallest_outcome_choice < cumulative_outcomes_sum) + if smallest_outcome_choice <= cumulative_outcomes_sum: + return largest_outcome, largest_idx, unit['outcome'], i + + logging.error( + ('WARNING: smallest_outcome_choice=%s,' + ' sum_of_max_adjusted_outcomes=%s but we failed to find the index' + ' of this unit'), + smallest_outcome_choice, + sum_of_max_adjusted_outcomes, + ) + return largest_outcome, largest_idx, smallest_outcome, smallest_idx + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + logging.info('%s| ga_population(#%d)=', request.worker_id, + len(self.ga_population)) + for member in sorted(self.ga_population, + key=lambda p: p['outcome'], + reverse=True): + logging.info('%s| %s: %s', request.worker_id, member['outcome'], + member['action']) + + self.num_decisions += 1 + if (len(self.ga_population) < self.max_population_size or + random.randint(1, 100) <= 5): + algorithm = 'random_sample' + # Randomly sample an action. + next_action = {} + for key in self.actions.keys(): + next_action[key] = random.uniform(self.actions[key].min_value, + self.actions[key].max_value) + # logging.info(" [%s - %s]: %s", self.actions[key].min_value, + # self.actions[key].max_value, + # next_action[key]) + + if len(self.ga_population) >= self.max_population_size: + largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( + self.find_best_worst_probweighted(self.ga_population)) + # Remove the worst member of the population + del self.ga_population[smallest_idx] + + logging.info( + '%s| Randomly sample: next_action : %s', + request.worker_id, + next_action, + ) + else: + largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( + self.find_best_worst_probweighted(self.ga_population)) + + # logging.info('Retrying largest=%s', self.ga_population[spouse_idx]) + # next_action = dict(self.ga_population[largest_idx]['action']) + # # Remove the chosen member of the population + # logging.info('deleting largest unit=%s', self.ga_population[largest_idx]) + # del self.ga_population[largest_idx] + + if self.proposals and random.randint(0, 10) < 5: ( - largest_outcome, - largest_idx, - smallest_outcome, - smallest_idx, - sum_outcomes, - ) = self.find_best_worst(options) - # logging.info('largest_outcome=%s, largest_idx=%s, smallest_outcome=%s, smallest_idx=%s, sum_outcomes=%s', largest_outcome, largest_idx, smallest_outcome, smallest_idx, sum_outcomes) - - sum_of_max_adjusted_outcomes = largest_outcome * len( - options) - sum_outcomes - smallest_outcome_choice = random.uniform(0, - sum_of_max_adjusted_outcomes) - logging.info( - 'sum_of_max_adjusted_outcomes=%s, smallest_outcome_choice=%s', - sum_of_max_adjusted_outcomes, - smallest_outcome_choice, - ) - - cumulative_outcomes_sum = 0 - smallest_outcome = math.inf - smallest_idx = -1 - for i, unit in enumerate(options): - cumulative_outcomes_sum += largest_outcome - unit['outcome'] - # logging.info('unit[outcome]=%s, cumulative_outcomes_sum=%s, found=%s', unit['outcome'], cumulative_outcomes_sum, smallest_outcome_choice < cumulative_outcomes_sum) - if smallest_outcome_choice <= cumulative_outcomes_sum: - return largest_outcome, largest_idx, unit['outcome'], i - - logging.error( - ('WARNING: smallest_outcome_choice=%s,' - ' sum_of_max_adjusted_outcomes=%s but we failed to find the index' - ' of this unit'), - smallest_outcome_choice, - sum_of_max_adjusted_outcomes, - ) - return largest_outcome, largest_idx, smallest_outcome, smallest_idx - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - logging.info('%s| ga_population(#%d)=', request.worker_id, - len(self.ga_population)) - for member in sorted(self.ga_population, - key=lambda p: p['outcome'], - reverse=True): - logging.info('%s| %s: %s', request.worker_id, member['outcome'], - member['action']) - - self.num_decisions += 1 - if (len(self.ga_population) < self.max_population_size - or random.randint(1, 100) <= 5): - algorithm = 'random_sample' - # Randomly sample an action. - next_action = {} - for key in self.actions.keys(): - next_action[key] = random.uniform(self.actions[key].min_value, - self.actions[key].max_value) - # logging.info(" [%s - %s]: %s", self.actions[key].min_value, - # self.actions[key].max_value, - # next_action[key]) - - if len(self.ga_population) >= self.max_population_size: - largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( - self.find_best_worst_probweighted(self.ga_population)) - # Remove the worst member of the population - del self.ga_population[smallest_idx] - - logging.info( - '%s| Randomly sample: next_action : %s', - request.worker_id, - next_action, - ) - else: - largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( - self.find_best_worst_probweighted(self.ga_population)) - - # logging.info('Retrying largest=%s', self.ga_population[spouse_idx]) - # next_action = dict(self.ga_population[largest_idx]['action']) - # # Remove the chosen member of the population - # logging.info('deleting largest unit=%s', self.ga_population[largest_idx]) - # del self.ga_population[largest_idx] - - if self.proposals and random.randint(0, 10) < 5: - ( - prop_largest_outcome, - prop_largest_idx, - prop_smallest_outcome, - prop_smallest_idx, - ) = self.find_best_worst_probweighted(self.proposals) - logging.info( - '%s| Best proposal: %s: %s', - request.worker_id, - self.proposals[prop_largest_idx]['outcome'], - self.proposals[prop_largest_idx]['action'], - ) - next_action = self.proposals[prop_largest_idx]['action'] - algorithm = 'best_proposal' - del self.proposals[prop_largest_idx] - else: - spouse_idx = random.randint(0, len(self.ga_population) - 1) - # logging.info('smallest_idx=%s, largest_idx=%s, spouse_idx=%s', - # smallest_idx, largest_idx, spouse_idx) - while spouse_idx == smallest_idx or spouse_idx == largest_idx: - spouse_idx = (spouse_idx + 1) % len(self.ga_population) - logging.info( - '%s| smallest_idx=%s, largest_idx=%s, spouse_idx=%s', - request.worker_id, - smallest_idx, - largest_idx, - spouse_idx, - ) - - if random.randint(0, 9) > 4: - # Mate largest_idx and spouse_idx - logging.info( - '%s| Mating largest unit=%s : %s', - request.worker_id, - self.ga_population[largest_idx]['outcome'], - self.ga_population[largest_idx]['action'], - ) - logging.info( - '%s| and spouse=%s : %s', - request.worker_id, - self.ga_population[spouse_idx]['outcome'], - self.ga_population[spouse_idx]['action'], - ) - next_action = {} - keys = sorted(self.actions.keys()) - cross_idx = random.randint(0, len(keys) - 1) - logging.info('%s| at cross_idx=%d', request.worker_id, - cross_idx) - for i, key in enumerate(keys): - if i < cross_idx: - next_action[key] = self.ga_population[spouse_idx][ - 'action'][key] - else: - next_action[key] = self.ga_population[largest_idx][ - 'action'][key] - algorithm = 'mating' - else: - mutation_prob = random.randint(0, 100) - logging.info( - '%s| mutating mutation_prob=%s, spouse=%s: %s', - request.worker_id, - mutation_prob, - self.ga_population[spouse_idx]['outcome'], - self.ga_population[spouse_idx]['action'], - ) - next_action = {} - for key in self.actions.keys(): - if random.randint(0, 999) <= mutation_prob: - next_action[key] = random.uniform( - self.actions[key].min_value, - self.actions[key].max_value) - # next_action[key] = self.ga_population[spouse_idx]['action'][key] * random.uniform(.9, 1.1) - # if next_action[key] < self.actions[key].min_value: - # next_action[key] = self.actions[key].min_value - # elif next_action[key] > self.actions[key].max_value: - # next_action[key] = self.actions[key].max_value - else: - next_action[key] = self.ga_population[spouse_idx][ - 'action'][key] - # logging.info('received_action[%s]=%s original=%s', key, next_action[key], claim_year_sold_delay) - algorithm = f'mutating_{mutation_prob}' - logging.info('%s| new next_action=%s', request.worker_id, - next_action) - - # Remove the worst member of the population - # logging.info('deleting smallest unit=%s', self.ga_population[smallest_idx]) - del self.ga_population[smallest_idx] - - self.ga_active_samples[request.worker_id] = { - 'action': next_action, - 'algorithm': algorithm, - } - - dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend(param_dict_to_proto(next_action)) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - algorithm = self.ga_active_samples[request.worker_id]['algorithm'] - if algorithm not in self.algorithms_tried: - self.algorithms_tried[algorithm] = 0 - self.algorithms_succeeded_above_min[algorithm] = 0 - self.algorithms_succeeded_best[algorithm] = 0 - self.algorithms_tried[algorithm] += 1 - - if self.ga_population: - largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( - self.find_best_worst_probweighted(self.ga_population)) - if request.decision_outcome.reward >= smallest_outcome: - self.algorithms_succeeded_above_min[algorithm] += 1 - if request.decision_outcome.reward >= largest_outcome: - self.algorithms_succeeded_best[algorithm] += 1 - - self.ga_population.append({ - 'outcome': - request.decision_outcome.reward, - 'action': - self.ga_active_samples[request.worker_id]['action'], - }) - self.history.append({ - 'algorithm': - algorithm, - 'outcome': - request.decision_outcome.reward, - 'action': - self.ga_active_samples[request.worker_id]['action'], - 'worker_id': - request.worker_id, - }) + prop_largest_outcome, + prop_largest_idx, + prop_smallest_outcome, + prop_smallest_idx, + ) = self.find_best_worst_probweighted(self.proposals) logging.info( - '%s| FinalizeEpisode member=%s: %s / %s', + '%s| Best proposal: %s: %s', request.worker_id, - request.decision_outcome.reward, - self.ga_active_samples[request.worker_id]['algorithm'], - self.ga_active_samples[request.worker_id]['action'], + self.proposals[prop_largest_idx]['outcome'], + self.proposals[prop_largest_idx]['action'], ) - del self.ga_active_samples[request.worker_id] + next_action = self.proposals[prop_largest_idx]['action'] + algorithm = 'best_proposal' + del self.proposals[prop_largest_idx] + else: + spouse_idx = random.randint(0, len(self.ga_population) - 1) + # logging.info('smallest_idx=%s, largest_idx=%s, spouse_idx=%s', + # smallest_idx, largest_idx, spouse_idx) + while spouse_idx == smallest_idx or spouse_idx == largest_idx: + spouse_idx = (spouse_idx + 1) % len(self.ga_population) logging.info( - '%s| FinalizeEpisode #ga_active_samples=%s', + '%s| smallest_idx=%s, largest_idx=%s, spouse_idx=%s', request.worker_id, - len(self.ga_active_samples), + smallest_idx, + largest_idx, + spouse_idx, ) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - response = ( - f'[GeneticAlgorithm (max_population_size={self.max_population_size},' - f' num_decisions={self.num_decisions}):\n') - response += f' ga_population(#{len(self.ga_population)}):\n' - keys = sorted(self.actions.keys()) - response += ' idx,outcome,' + ','.join(keys) + '\n' - for i, unit in enumerate( - sorted(self.ga_population, - key=lambda p: p['outcome'], - reverse=True)): - response += (f' {i},{unit["outcome"]:.5F},' + - ','.join([str(unit['action'][key]) - for key in keys]) + '\n') - - response += f' ga_active_samples(#{len(self.ga_active_samples)}):\n' - response += ' worker_id,algorithm,' + ','.join(keys) + '\n' - for worker_id, sample in self.ga_active_samples.items(): - response += (f' {worker_id},{sample["algorithm"]},' + - ','.join([str(sample['action'][key]) - for key in keys]) + '\n') - response += ']' - - response += f' proposals(#{len(self.proposals)}):\n' - response += ' idx,outcome,' + ','.join(keys) + '\n' - for i, unit in enumerate( - sorted(self.proposals, - key=lambda p: p['outcome'], - reverse=True)): - response += (f' {i},{unit["outcome"]:.5F},' + - ','.join([str(unit['action'][key]) - for key in keys]) + '\n') - if i > 50: - break - - response += f' algorithms:\n' - for algorithm in sorted(self.algorithms_tried.keys()): - response += ( - ' %s: tried=%s, algorithms_succeeded_above_min=%.4E,' - ' algorithms_succeeded_best=%.4E\n' % ( - algorithm, - self.algorithms_tried[algorithm], - self.algorithms_succeeded_above_min[algorithm] / - self.algorithms_tried[algorithm], - self.algorithms_succeeded_best[algorithm] / - self.algorithms_tried[algorithm], - )) - - response += ' history:\n' - for i, h in enumerate(self.history): - response += ' %d: %s\n' % (i, h) - - return service_pb2.CurrentStatusResponse(response_str=response) - - def propose_action( - self, request: service_pb2.ProposeActionRequest - ) -> service_pb2.ProposeActionResponse: - action = {} - for key, value in request.action.items(): - action[key] = value - largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( - self.find_best_worst_probweighted(self.ga_population)) - if request.outcome.reward >= smallest_outcome: - self.proposals.append({ - 'action': action, - 'outcome': request.outcome.reward, - }) - logging.info( - '%s| Accepted Proposal %s: %s', - request.worker_id, - request.outcome.reward, - action, - ) + if random.randint(0, 9) > 4: + # Mate largest_idx and spouse_idx + logging.info( + '%s| Mating largest unit=%s : %s', + request.worker_id, + self.ga_population[largest_idx]['outcome'], + self.ga_population[largest_idx]['action'], + ) + logging.info( + '%s| and spouse=%s : %s', + request.worker_id, + self.ga_population[spouse_idx]['outcome'], + self.ga_population[spouse_idx]['action'], + ) + next_action = {} + keys = sorted(self.actions.keys()) + cross_idx = random.randint(0, len(keys) - 1) + logging.info('%s| at cross_idx=%d', request.worker_id, cross_idx) + for i, key in enumerate(keys): + if i < cross_idx: + next_action[key] = self.ga_population[spouse_idx]['action'][key] + else: + next_action[key] = self.ga_population[largest_idx]['action'][key] + algorithm = 'mating' else: - logging.info( - '%s| Rejected Proposal %s: %s', - request.worker_id, - request.outcome.reward, - action, - ) - return service_pb2.ProposeActionResponse() + mutation_prob = random.randint(0, 100) + logging.info( + '%s| mutating mutation_prob=%s, spouse=%s: %s', + request.worker_id, + mutation_prob, + self.ga_population[spouse_idx]['outcome'], + self.ga_population[spouse_idx]['action'], + ) + next_action = {} + for key in self.actions.keys(): + if random.randint(0, 999) <= mutation_prob: + next_action[key] = random.uniform(self.actions[key].min_value, + self.actions[key].max_value) + # next_action[key] = self.ga_population[spouse_idx]['action'][key] * random.uniform(.9, 1.1) + # if next_action[key] < self.actions[key].min_value: + # next_action[key] = self.actions[key].min_value + # elif next_action[key] > self.actions[key].max_value: + # next_action[key] = self.actions[key].max_value + else: + next_action[key] = self.ga_population[spouse_idx]['action'][key] + # logging.info('received_action[%s]=%s original=%s', key, next_action[key], claim_year_sold_delay) + algorithm = f'mutating_{mutation_prob}' + logging.info('%s| new next_action=%s', request.worker_id, next_action) + + # Remove the worst member of the population + # logging.info('deleting smallest unit=%s', self.ga_population[smallest_idx]) + del self.ga_population[smallest_idx] + + self.ga_active_samples[request.worker_id] = { + 'action': next_action, + 'algorithm': algorithm, + } + + dp_response = service_pb2.DecisionPointResponse() + dp_response.action.extend(param_dict_to_proto(next_action)) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + algorithm = self.ga_active_samples[request.worker_id]['algorithm'] + if algorithm not in self.algorithms_tried: + self.algorithms_tried[algorithm] = 0 + self.algorithms_succeeded_above_min[algorithm] = 0 + self.algorithms_succeeded_best[algorithm] = 0 + self.algorithms_tried[algorithm] += 1 + + if self.ga_population: + largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( + self.find_best_worst_probweighted(self.ga_population)) + if request.decision_outcome.reward >= smallest_outcome: + self.algorithms_succeeded_above_min[algorithm] += 1 + if request.decision_outcome.reward >= largest_outcome: + self.algorithms_succeeded_best[algorithm] += 1 + + self.ga_population.append({ + 'outcome': request.decision_outcome.reward, + 'action': self.ga_active_samples[request.worker_id]['action'], + }) + self.history.append({ + 'algorithm': algorithm, + 'outcome': request.decision_outcome.reward, + 'action': self.ga_active_samples[request.worker_id]['action'], + 'worker_id': request.worker_id, + }) + logging.info( + '%s| FinalizeEpisode member=%s: %s / %s', + request.worker_id, + request.decision_outcome.reward, + self.ga_active_samples[request.worker_id]['algorithm'], + self.ga_active_samples[request.worker_id]['action'], + ) + del self.ga_active_samples[request.worker_id] + logging.info( + '%s| FinalizeEpisode #ga_active_samples=%s', + request.worker_id, + len(self.ga_active_samples), + ) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + response = ( + f'[GeneticAlgorithm (max_population_size={self.max_population_size},' + f' num_decisions={self.num_decisions}):\n') + response += f' ga_population(#{len(self.ga_population)}):\n' + keys = sorted(self.actions.keys()) + response += ' idx,outcome,' + ','.join(keys) + '\n' + for i, unit in enumerate( + sorted(self.ga_population, key=lambda p: p['outcome'], reverse=True)): + response += (f' {i},{unit["outcome"]:.5F},' + + ','.join([str(unit['action'][key]) for key in keys]) + '\n') + + response += f' ga_active_samples(#{len(self.ga_active_samples)}):\n' + response += ' worker_id,algorithm,' + ','.join(keys) + '\n' + for worker_id, sample in self.ga_active_samples.items(): + response += (f' {worker_id},{sample["algorithm"]},' + + ','.join([str(sample['action'][key]) for key in keys]) + + '\n') + response += ']' + + response += f' proposals(#{len(self.proposals)}):\n' + response += ' idx,outcome,' + ','.join(keys) + '\n' + for i, unit in enumerate( + sorted(self.proposals, key=lambda p: p['outcome'], reverse=True)): + response += (f' {i},{unit["outcome"]:.5F},' + + ','.join([str(unit['action'][key]) for key in keys]) + '\n') + if i > 50: + break + + response += f' algorithms:\n' + for algorithm in sorted(self.algorithms_tried.keys()): + response += (' %s: tried=%s, algorithms_succeeded_above_min=%.4E,' + ' algorithms_succeeded_best=%.4E\n' % ( + algorithm, + self.algorithms_tried[algorithm], + self.algorithms_succeeded_above_min[algorithm] / + self.algorithms_tried[algorithm], + self.algorithms_succeeded_best[algorithm] / + self.algorithms_tried[algorithm], + )) + + response += ' history:\n' + for i, h in enumerate(self.history): + response += ' %d: %s\n' % (i, h) + + return service_pb2.CurrentStatusResponse(response_str=response) + + def propose_action( + self, request: service_pb2.ProposeActionRequest + ) -> service_pb2.ProposeActionResponse: + action = {} + for key, value in request.action.items(): + action[key] = value + + largest_outcome, largest_idx, smallest_outcome, smallest_idx = ( + self.find_best_worst_probweighted(self.ga_population)) + if request.outcome.reward >= smallest_outcome: + self.proposals.append({ + 'action': action, + 'outcome': request.outcome.reward, + }) + logging.info( + '%s| Accepted Proposal %s: %s', + request.worker_id, + request.outcome.reward, + action, + ) + else: + logging.info( + '%s| Rejected Proposal %s: %s', + request.worker_id, + request.outcome.reward, + action, + ) + return service_pb2.ProposeActionResponse() diff --git a/sight_service/llm.py b/sight_service/llm.py index 0d2f7f8..e2eed1b 100644 --- a/sight_service/llm.py +++ b/sight_service/llm.py @@ -15,7 +15,6 @@ from concurrent import futures import json -from helpers.logs.logs_handler import logger as logging import random import threading from typing import Any, Dict, List, Optional, Tuple @@ -23,6 +22,7 @@ import google.auth import google.auth.transport.requests import google.generativeai as genai +from helpers.logs.logs_handler import logger as logging from overrides import overrides import requests from sight.proto import sight_pb2 @@ -35,695 +35,666 @@ class LLM(OptimizerInstance): - """Uses an LLM to choose the parameters of the code. + """Uses an LLM to choose the parameters of the code. Attributes: script: The script of the conversation accrued so far. """ - def __init__(self): - super().__init__() - # genai.configure(api_key=_GENAI_API_KEY) - genai.configure(api_key="_GENAI_API_KEY") - self._intro = '' - self._history = [] - self._actions_to_do = [] - self._history_len_for_prompt = 20 - self._num_decision_points = 0 - # self.last_outcome = None - self._lock = threading.RLock() - self._waiting_on_tell = False - self._response_ready = False - self._response_for_listen = '' - self._waiting_on_llm_response = False - - def _attr_summary( - self, key: str, - attr: sight_pb2.DecisionConfigurationStart.AttrProps) -> str: - """Returns a summary of an attribute for the LLM.""" - if attr.min_value < attr.max_value: - return ( - f'"{key}": {{ "description": {attr.description}, "min_value":' - f' {attr.min_value}, "max_value": {attr.max_value} }},') - return f'"{key}": {{ "description": {attr.description} }},' - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - response = super(LLM, self).launch(request) - logging.info('LLM request=%s', request) - self._llm_config = request.decision_config_params.choice_config[ - request.label].llm_config - logging.info('LLM config=%s', self._llm_config) - self._bayesian_opt = BayesianOpt() - self._bayesian_opt.launch(request) - - self._intro += '' - if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_OPTIMIZE): - self._intro = ( - 'You are controlling an agent that is trying to reach a goal. The' - ' agent is described as follows.\n') - self._intro += f'"{self._llm_config.description}"\n' - if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_OPTIMIZE): - self._intro += ( - 'The simulation will periodically report its state and then ask you ' - + - 'to select an action for it to perform. After it has performed' - ' this ' + - 'action it will report back the numeric outcome of the this' - ' action. ' + - 'Higher outcome values are better than low outcome values. Your' - ' job ' + - 'is to choose actions that maximize the outcome values.\n') - if len(self.state) > 0: - self._intro += ( - 'The state of the simulation consists of the following attributes: \n' - ) - self._intro += (' {\n ' + '\n '.join( - [self._attr_summary(key, p) - for key, p in self.state.items()]) + '}\n') - self._intro += 'The possible actions you need to select are: \n' - self._intro += (' {\n ' + '\n '.join( - [self._attr_summary(key, p) - for key, p in self.actions.items()]) + '}\n') - self._intro += 'The possible outcomes you will observe are: \n' - self._intro += (' {\n ' + '\n '.join( - [self._attr_summary(key, p) - for key, p in self.outcomes.items()]) + '}\n') - self._intro += '========================\n' - - logging.info( - 'INTERACTIVE=%s', - self._llm_config.goal == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_INTERACTIVE, - ) - if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_INTERACTIVE): - self._waiting_on_tell = True - self._response_ready = False - self._response_for_listen = '' - self._waiting_on_llm_response = False - else: - detail_prompt = ( - 'Please summarize everything you know about these parameters for the' - ' above application area, detail the steps that need to be taken to' - ' create a good estimate these parameters.\n') - self._intro += (detail_prompt + - self._ask(self._intro + detail_prompt) + '\n') - - detail_prompt = ( - 'Based on this plan describe the most reasonable estimate of these' - ' parameters\n') - self._intro += (detail_prompt + - self._ask(self._intro + detail_prompt) + '\n') - - response.display_string = 'LLM SUCCESS! ' + self._intro - logging.info('self._intro=%s', self._intro) - return response - - def _random_state(self) -> Dict[str, float]: - """Returns a random state.""" - s = {} - for key, p in self.state.items(): - s[key] = (p.max_value - - p.min_value) * random.random() + p.min_value - return s - - def _random_action(self) -> Dict[str, float]: - """Returns a random action.""" - a = {} - for key, p in self.actions.items(): - a[key] = (p.max_value - - p.min_value) * random.random() + p.min_value - return a - - def _random_outcome(self) -> Dict[str, float]: - """Returns a random outcome.""" - o = {} - for key, p in self.outcomes.items(): - o[key] = (p.max_value - - p.min_value) * random.random() + p.min_value - return o - - def _random_event(self) -> Dict[str, Any]: - return { - 'state': self._random_state(), - 'action': self._random_action(), - 'outcome': self._random_outcome(), - # random.random(), - } - - def _filtered_history(self, include_example_action: bool) -> List[Any]: - ordered_history = self._history[:-1].copy() - # logging.info( - # '#hist=%d ordered_history[#%d]=%s', - # len(self._history), - # len(ordered_history), - # ordered_history, - # ) - ordered_history = sorted( - ordered_history, - key=lambda h: -h['outcome'] - if 'outcome' in h and isinstance(h['outcome'], float) else 0, - ) - if len(ordered_history) > self._history_len_for_prompt: - ordered_history = ordered_history[0:self._history_len_for_prompt - - 1] - random.shuffle(ordered_history) - - # If this is the first question, add a random event to serve as an example - # of the format. - # if include_example_action and len(ordered_history) == 0: - # ordered_history.append(self._random_event()) - - logging.info( - 'ordered_history[#%d]=%s', - len(ordered_history), - ordered_history, - ) - # if worker_id is None: - if len(self._history) == 0: - return ordered_history - return ordered_history + [self._history[-1]] - - def _hist_event_to_text(self, event: Dict, last_outcome: float, - is_last_event: bool) -> Tuple[str, Any]: - t = '' - if len(event['state']) > 0: - t += 'Decision State:\n' - t += (' {' + - ', '.join([f'"{k}": {v}' - for k, v in event['state'].items()]) + '}\n') - # t += 'Decision Action (json format): ' - if event['action'] is not None or is_last_event: - t += 'Simulation parameters (json format): ' - if event['action'] is not None: - t += (' {' + ', '.join([ - f'"{key}": {value}' for key, value in event['action'].items() - ]) + '}\n') - if event['outcome'] is not None: - # t += 'Decision Outcome: ' + str(event['outcome']) + '\n' - t += 'Simulation Outcome (json format): ' + str( - event['outcome']) + '\n' - if (self._llm_config.goal != sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_INTERACTIVE): - if last_outcome is not None: - if last_outcome < event['outcome'] - 0.1: - t += ' This is a better outcome than the last time.\n' - elif last_outcome > event['outcome'] + 0.1: - t += ' This is a worse outcome than the last time.\n' - else: - t += ' This is a similar outcome to the last time.\n' - t += '========================\n' - last_outcome = event['outcome'] - return t, last_outcome - - def _history_to_text(self, include_example_action: bool = True) -> str: - t = '' - last_outcome = None - hist = self._filtered_history(include_example_action) - logging.info( - '_history_to_text() include_example_action=%s hist=%s', - include_example_action, - hist, - ) - # if include_example_action and ( - # len(hist) == 0 or (len(hist) == 1 and hist[0]['outcome'] is None) - # ): - # logging.info('_history_to_text() Adding random_event') - # t += self._hist_event_to_text(self._random_event(), None, False) - for i, event in enumerate(hist): - logging.info('_history_to_text event=%s', event) - event_text, last_outcome = self._hist_event_to_text( - event, last_outcome, i == len(hist) - 1) - t += event_text - return t - - def _history_to_chat( - self, - worker_id: str, - include_example_action: bool = True) -> List[Dict[str, str]]: - chat = [] - last_outcome = None - last_outcome_message = '' - for h in self._filtered_history(include_example_action): - if len(h['state']) > 0: - chat.append({ - 'author': - 'USER', - 'content': - (last_outcome_message + 'Decision State:\n' + ' {' + - ', '.join([f'"{k}": {v}' - for k, v in h['state'].items()]) + '}\n' + - 'Please provide the Decision Action (json format):\n'), - }) - if h['action'] is not None: - chat.append({ - 'author': - 'AI', - 'content': (+ 'Decision Action:\n' + ' {{' + ', '.join([ - f'"{key}": {value}' - for key, value in h['action'].items() - ]) + '}'), - }) - if h['outcome'] is not None: - last_outcome_message = 'Decision Outcome: ' + str( - h['outcome']) + '\n' - if (self._llm_config.goal - != sight_pb2.DecisionConfigurationStart.LLMConfig. - LLMGoal.LM_INTERACTIVE): - if last_outcome is not None: - if last_outcome < h['outcome'] - 0.1: - last_outcome_message += ( - ' This is a better outcome than the last time.\n' - ) - elif last_outcome > h['outcome'] + 0.1: - last_outcome_message += ( - ' This is a worse outcome than the last time.\n' - ) - else: - last_outcome_message += ( - ' This is a similar outcome to the last time.\n' - ) - return chat - - def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: - """Returns the dict representation of a DecisionParams proto""" - d = {} - for a in dp: - d[a.key] = a.value.double_value - return d - - def _get_creds(self) -> Any: - creds, project = google.auth.default() - auth_req = google.auth.transport.requests.Request() - creds.refresh(auth_req) - return creds - - def _get_req_headers(self) -> Dict[str, str]: - return { - 'Authorization': f'Bearer {self._get_creds().token}', - 'Content-Type': 'application/json; charset=utf-8', - } - - def _ask(self, prompt) -> str: - if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMAlgorithm.LA_TEXT_BISON): - return self._ask_text_bison(prompt) - elif (self._llm_config.algorithm == sight_pb2. - DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON): - return self._ask_chat_bison(prompt) - elif (self._llm_config.algorithm == sight_pb2. - DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): - return self._ask_gemini_pro(prompt) - else: - raise ValueError(f'Invalid algorithm {self._llm_config.algorithm}') - - def _ask_text_bison(self, prompt) -> str: - while True: - response = requests.post( - f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/text-bison-32k:predict", - data=json.dumps({ - 'instances': [{ - 'prompt': prompt - }], - 'parameters': { - 'temperature': 0.2, - 'maxOutputTokens': 2048, - 'topK': 40, - 'topP': 0.55, - # "groundingConfig": string, - # "stopSequences": [ string ], - # "candidateCount": integer, - # "logprobs": integer, - # "presencePenalty": float, - # "frequencyPenalty": float, - # "logitBias": map, - 'echo': False, - }, - }), - headers=self._get_req_headers(), - ).json() - # logging.info('response=%s', response) - if ('error' in response - or response['predictions'][0]['content'].strip() == ''): - continue - return response['predictions'][0]['content'].strip() - - def _get_action(self, worker_id: str) -> List[Dict[str, float]]: - if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMAlgorithm.LA_TEXT_BISON): - return self._action_from_text_bison(worker_id) - elif (self._llm_config.algorithm == sight_pb2. - DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_CHAT_BISON): - return self._action_from_chat_bison(worker_id) - elif (self._llm_config.algorithm == sight_pb2. - DecisionConfigurationStart.LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): - return self._action_from_gemini_pro(worker_id) - else: - raise ValueError(f'Invalid algorithm {self._llm_config.algorithm}') - - def _action_from_text_bison(self, - worker_id: str) -> List[Dict[str, float]]: - logging.info('ask_text_bison') - logging.info(self._intro + '\n' + self._history_to_text()) - while True: - text = self._ask_text_bison(self._intro + '\n' + - self._history_to_text()) - logging.info('text=[%s]', text) - # text = text.removeprefix('```json\n') - # logging.info('text=[%s]', text) - text = text.strip('`').split('\n')[0] - # text = text.split('\n')[0].strip() - logging.info('text=[%s]', text) - try: - return [json.loads(text)] - except json.decoder.JSONDecodeError: - continue - - def _ask_chat_bison(self, prompt, message) -> str: - response = requests.post( - f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/chat-bison-32k:predict", - data=json.dumps({ - 'instances': [ - { - 'context': prompt, - 'messages': message, - }, - ], - 'parameters': { - 'temperature': 0.2, - 'maxOutputTokens': 2048, - 'topK': 40, - 'topP': 0.55, - # "groundingConfig": string, - # "stopSequences": [ string ], - # "candidateCount": integer, - # "logprobs": integer, - # "presencePenalty": float, - # "frequencyPenalty": float, - # "logitBias": map, - 'echo': False, - }, - }), - headers=self._get_req_headers(), - ).json() - # logging.info('response=%s', response) - # logging.info( - # "response['predictions']=%s", response['predictions'][0]['candidates'] - # ) - # if 'error' in response or response['predictions'][0]['content'].strip() == '': - # continue - return response['predictions'][0]['candidates'][0]['content'].strip() - - def _action_from_chat_bison(self, - worker_id: str) -> List[Dict[str, float]]: - while True: - text = self._ask_chat_bison(self._intro, - self._history_to_chat(worker_id)) - logging.info('text=[%s]', text) - try: - return [json.loads(text)] - except json.decoder.JSONDecodeError: - continue - - def _ask_gemini_pro(self, prompt) -> str: - while True: - response = requests.post( - f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent", - data=json.dumps({ - 'contents': { - 'role': 'user', - 'parts': { - 'text': prompt - }, - }, - 'safety_settings': { - 'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', - 'threshold': 'BLOCK_LOW_AND_ABOVE', - }, - 'generation_config': { - 'temperature': 0.9, - 'topP': 1, - 'topK': 1, - 'maxOutputTokens': 8192, - # "stopSequences": [".", "?", "!"] - }, - }), - headers=self._get_req_headers(), - ).json() - logging.info('response=%s', response) - if len(response) == 0: - continue - text = '' - for r in response: - if 'parts' in r['candidates'][0]['content']: - text += r['candidates'][0]['content']['parts'][0]['text'] - text = text.strip() - if text == '': - continue - return text - - def _action_from_gemini_pro(self, - worker_id: str) -> List[Dict[str, float]]: - while True: - logging.info('ask_geminipro') - prompt = self._intro + '\n' - random_sample, _ = self._hist_event_to_text( - self._random_event(), None, False) - prompt += 'Example request: ' + random_sample + '\n' - prompt += self._history_to_text() - logging.info('prompt=%s', prompt) - text = self._ask_gemini_pro(prompt) - if text.startswith('```json'): - text = [text.removeprefix('```json').removesuffix('```')] + def __init__(self): + super().__init__() + # genai.configure(api_key=_GENAI_API_KEY) + genai.configure(api_key="_GENAI_API_KEY") + self._intro = '' + self._history = [] + self._actions_to_do = [] + self._history_len_for_prompt = 20 + self._num_decision_points = 0 + # self.last_outcome = None + self._lock = threading.RLock() + self._waiting_on_tell = False + self._response_ready = False + self._response_for_listen = '' + self._waiting_on_llm_response = False + + def _attr_summary( + self, key: str, + attr: sight_pb2.DecisionConfigurationStart.AttrProps) -> str: + """Returns a summary of an attribute for the LLM.""" + if attr.min_value < attr.max_value: + return (f'"{key}": {{ "description": {attr.description}, "min_value":' + f' {attr.min_value}, "max_value": {attr.max_value} }},') + return f'"{key}": {{ "description": {attr.description} }},' + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(LLM, self).launch(request) + logging.info('LLM request=%s', request) + self._llm_config = request.decision_config_params.choice_config[ + request.label].llm_config + logging.info('LLM config=%s', self._llm_config) + self._bayesian_opt = BayesianOpt() + self._bayesian_opt.launch(request) + + self._intro += '' + if (self._llm_config.goal == + sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_OPTIMIZE): + self._intro = ( + 'You are controlling an agent that is trying to reach a goal. The' + ' agent is described as follows.\n') + self._intro += f'"{self._llm_config.description}"\n' + if (self._llm_config.goal == + sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_OPTIMIZE): + self._intro += ( + 'The simulation will periodically report its state and then ask you ' + + 'to select an action for it to perform. After it has performed' + ' this ' + + 'action it will report back the numeric outcome of the this' + ' action. ' + + 'Higher outcome values are better than low outcome values. Your' + ' job ' + 'is to choose actions that maximize the outcome values.\n') + if len(self.state) > 0: + self._intro += ( + 'The state of the simulation consists of the following attributes: \n' + ) + self._intro += (' {\n ' + '\n '.join( + [self._attr_summary(key, p) for key, p in self.state.items()]) + + '}\n') + self._intro += 'The possible actions you need to select are: \n' + self._intro += (' {\n ' + '\n '.join( + [self._attr_summary(key, p) for key, p in self.actions.items()]) + + '}\n') + self._intro += 'The possible outcomes you will observe are: \n' + self._intro += (' {\n ' + '\n '.join( + [self._attr_summary(key, p) for key, p in self.outcomes.items()]) + + '}\n') + self._intro += '========================\n' + + logging.info( + 'INTERACTIVE=%s', + self._llm_config.goal == + sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE, + ) + if (self._llm_config.goal == + sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE): + self._waiting_on_tell = True + self._response_ready = False + self._response_for_listen = '' + self._waiting_on_llm_response = False + else: + detail_prompt = ( + 'Please summarize everything you know about these parameters for the' + ' above application area, detail the steps that need to be taken to' + ' create a good estimate these parameters.\n') + self._intro += (detail_prompt + self._ask(self._intro + detail_prompt) + + '\n') + + detail_prompt = ( + 'Based on this plan describe the most reasonable estimate of these' + ' parameters\n') + self._intro += (detail_prompt + self._ask(self._intro + detail_prompt) + + '\n') + + response.display_string = 'LLM SUCCESS! ' + self._intro + logging.info('self._intro=%s', self._intro) + return response + + def _random_state(self) -> Dict[str, float]: + """Returns a random state.""" + s = {} + for key, p in self.state.items(): + s[key] = (p.max_value - p.min_value) * random.random() + p.min_value + return s + + def _random_action(self) -> Dict[str, float]: + """Returns a random action.""" + a = {} + for key, p in self.actions.items(): + a[key] = (p.max_value - p.min_value) * random.random() + p.min_value + return a + + def _random_outcome(self) -> Dict[str, float]: + """Returns a random outcome.""" + o = {} + for key, p in self.outcomes.items(): + o[key] = (p.max_value - p.min_value) * random.random() + p.min_value + return o + + def _random_event(self) -> Dict[str, Any]: + return { + 'state': self._random_state(), + 'action': self._random_action(), + 'outcome': self._random_outcome(), + # random.random(), + } + + def _filtered_history(self, include_example_action: bool) -> List[Any]: + ordered_history = self._history[:-1].copy() + # logging.info( + # '#hist=%d ordered_history[#%d]=%s', + # len(self._history), + # len(ordered_history), + # ordered_history, + # ) + ordered_history = sorted( + ordered_history, + key=lambda h: -h['outcome'] + if 'outcome' in h and isinstance(h['outcome'], float) else 0, + ) + if len(ordered_history) > self._history_len_for_prompt: + ordered_history = ordered_history[0:self._history_len_for_prompt - 1] + random.shuffle(ordered_history) + + # If this is the first question, add a random event to serve as an example + # of the format. + # if include_example_action and len(ordered_history) == 0: + # ordered_history.append(self._random_event()) + + logging.info( + 'ordered_history[#%d]=%s', + len(ordered_history), + ordered_history, + ) + # if worker_id is None: + if len(self._history) == 0: + return ordered_history + return ordered_history + [self._history[-1]] + + def _hist_event_to_text(self, event: Dict, last_outcome: float, + is_last_event: bool) -> Tuple[str, Any]: + t = '' + if len(event['state']) > 0: + t += 'Decision State:\n' + t += (' {' + + ', '.join([f'"{k}": {v}' for k, v in event['state'].items()]) + + '}\n') + # t += 'Decision Action (json format): ' + if event['action'] is not None or is_last_event: + t += 'Simulation parameters (json format): ' + if event['action'] is not None: + t += (' {' + ', '.join( + [f'"{key}": {value}' for key, value in event['action'].items()]) + + '}\n') + if event['outcome'] is not None: + # t += 'Decision Outcome: ' + str(event['outcome']) + '\n' + t += 'Simulation Outcome (json format): ' + str(event['outcome']) + '\n' + if (self._llm_config.goal != sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE): + if last_outcome is not None: + if last_outcome < event['outcome'] - 0.1: + t += ' This is a better outcome than the last time.\n' + elif last_outcome > event['outcome'] + 0.1: + t += ' This is a worse outcome than the last time.\n' + else: + t += ' This is a similar outcome to the last time.\n' + t += '========================\n' + last_outcome = event['outcome'] + return t, last_outcome + + def _history_to_text(self, include_example_action: bool = True) -> str: + t = '' + last_outcome = None + hist = self._filtered_history(include_example_action) + logging.info( + '_history_to_text() include_example_action=%s hist=%s', + include_example_action, + hist, + ) + # if include_example_action and ( + # len(hist) == 0 or (len(hist) == 1 and hist[0]['outcome'] is None) + # ): + # logging.info('_history_to_text() Adding random_event') + # t += self._hist_event_to_text(self._random_event(), None, False) + for i, event in enumerate(hist): + logging.info('_history_to_text event=%s', event) + event_text, last_outcome = self._hist_event_to_text( + event, last_outcome, i == len(hist) - 1) + t += event_text + return t + + def _history_to_chat( + self, + worker_id: str, + include_example_action: bool = True) -> List[Dict[str, str]]: + chat = [] + last_outcome = None + last_outcome_message = '' + for h in self._filtered_history(include_example_action): + if len(h['state']) > 0: + chat.append({ + 'author': + 'USER', + 'content': + (last_outcome_message + 'Decision State:\n' + ' {' + + ', '.join([f'"{k}": {v}' for k, v in h['state'].items()]) + + '}\n' + 'Please provide the Decision Action (json format):\n'), + }) + if h['action'] is not None: + chat.append({ + 'author': + 'AI', + 'content': (+ 'Decision Action:\n' + ' {{' + ', '.join( + [f'"{key}": {value}' for key, value in h['action'].items()]) + + '}'), + }) + if h['outcome'] is not None: + last_outcome_message = 'Decision Outcome: ' + str(h['outcome']) + '\n' + if (self._llm_config.goal != sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMGoal.LM_INTERACTIVE): + if last_outcome is not None: + if last_outcome < h['outcome'] - 0.1: + last_outcome_message += ( + ' This is a better outcome than the last time.\n') + elif last_outcome > h['outcome'] + 0.1: + last_outcome_message += ( + ' This is a worse outcome than the last time.\n') else: - text = text.split('\n') - logging.info('text=[%s]', text) - - actions = [] - for i in range(0, len(text), 3): - try: - logging.info('%d: processed %s', i, text[i]) - action = text[i].removeprefix( - 'Simulation parameters (json format):') - logging.info('%d: action=%s', i, action) - actions.append(json.loads(action)) - except json.decoder.JSONDecodeError: - continue - if len(actions) == 0: - continue - return actions - - def _is_done(self, worker_id: str) -> Tuple[bool, str]: - """Checks with the LLM to see whether it has enough information to answer. + last_outcome_message += ( + ' This is a similar outcome to the last time.\n') + return chat + + def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: + """Returns the dict representation of a DecisionParams proto""" + d = {} + for a in dp: + d[a.key] = a.value.double_value + return d + + def _get_creds(self) -> Any: + creds, project = google.auth.default() + auth_req = google.auth.transport.requests.Request() + creds.refresh(auth_req) + return creds + + def _get_req_headers(self) -> Dict[str, str]: + return { + 'Authorization': f'Bearer {self._get_creds().token}', + 'Content-Type': 'application/json; charset=utf-8', + } + + def _ask(self, prompt) -> str: + if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_TEXT_BISON): + return self._ask_text_bison(prompt) + elif (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_CHAT_BISON): + return self._ask_chat_bison(prompt) + elif (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): + return self._ask_gemini_pro(prompt) + else: + raise ValueError(f'Invalid algorithm {self._llm_config.algorithm}') + + def _ask_text_bison(self, prompt) -> str: + while True: + response = requests.post( + f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/text-bison-32k:predict", + data=json.dumps({ + 'instances': [{ + 'prompt': prompt + }], + 'parameters': { + 'temperature': 0.2, + 'maxOutputTokens': 2048, + 'topK': 40, + 'topP': 0.55, + # "groundingConfig": string, + # "stopSequences": [ string ], + # "candidateCount": integer, + # "logprobs": integer, + # "presencePenalty": float, + # "frequencyPenalty": float, + # "logitBias": map, + 'echo': False, + }, + }), + headers=self._get_req_headers(), + ).json() + # logging.info('response=%s', response) + if ('error' in response or + response['predictions'][0]['content'].strip() == ''): + continue + return response['predictions'][0]['content'].strip() + + def _get_action(self, worker_id: str) -> List[Dict[str, float]]: + if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_TEXT_BISON): + return self._action_from_text_bison(worker_id) + elif (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_CHAT_BISON): + return self._action_from_chat_bison(worker_id) + elif (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): + return self._action_from_gemini_pro(worker_id) + else: + raise ValueError(f'Invalid algorithm {self._llm_config.algorithm}') + + def _action_from_text_bison(self, worker_id: str) -> List[Dict[str, float]]: + logging.info('ask_text_bison') + logging.info(self._intro + '\n' + self._history_to_text()) + while True: + text = self._ask_text_bison(self._intro + '\n' + self._history_to_text()) + logging.info('text=[%s]', text) + # text = text.removeprefix('```json\n') + # logging.info('text=[%s]', text) + text = text.strip('`').split('\n')[0] + # text = text.split('\n')[0].strip() + logging.info('text=[%s]', text) + try: + return [json.loads(text)] + except json.decoder.JSONDecodeError: + continue + + def _ask_chat_bison(self, prompt, message) -> str: + response = requests.post( + f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/chat-bison-32k:predict", + data=json.dumps({ + 'instances': [{ + 'context': prompt, + 'messages': message, + },], + 'parameters': { + 'temperature': 0.2, + 'maxOutputTokens': 2048, + 'topK': 40, + 'topP': 0.55, + # "groundingConfig": string, + # "stopSequences": [ string ], + # "candidateCount": integer, + # "logprobs": integer, + # "presencePenalty": float, + # "frequencyPenalty": float, + # "logitBias": map, + 'echo': False, + }, + }), + headers=self._get_req_headers(), + ).json() + # logging.info('response=%s', response) + # logging.info( + # "response['predictions']=%s", response['predictions'][0]['candidates'] + # ) + # if 'error' in response or response['predictions'][0]['content'].strip() == '': + # continue + return response['predictions'][0]['candidates'][0]['content'].strip() + + def _action_from_chat_bison(self, worker_id: str) -> List[Dict[str, float]]: + while True: + text = self._ask_chat_bison(self._intro, self._history_to_chat(worker_id)) + logging.info('text=[%s]', text) + try: + return [json.loads(text)] + except json.decoder.JSONDecodeError: + continue + + def _ask_gemini_pro(self, prompt) -> str: + while True: + response = requests.post( + f"https://us-central1-aiplatform.googleapis.com/v1/projects/{os.environ['PROJECT_ID']}/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent", + data=json.dumps({ + 'contents': { + 'role': 'user', + 'parts': { + 'text': prompt + }, + }, + 'safety_settings': { + 'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', + 'threshold': 'BLOCK_LOW_AND_ABOVE', + }, + 'generation_config': { + 'temperature': 0.9, + 'topP': 1, + 'topK': 1, + 'maxOutputTokens': 8192, + # "stopSequences": [".", "?", "!"] + }, + }), + headers=self._get_req_headers(), + ).json() + logging.info('response=%s', response) + if len(response) == 0: + continue + text = '' + for r in response: + if 'parts' in r['candidates'][0]['content']: + text += r['candidates'][0]['content']['parts'][0]['text'] + text = text.strip() + if text == '': + continue + return text + + def _action_from_gemini_pro(self, worker_id: str) -> List[Dict[str, float]]: + while True: + logging.info('ask_geminipro') + prompt = self._intro + '\n' + random_sample, _ = self._hist_event_to_text(self._random_event(), None, + False) + prompt += 'Example request: ' + random_sample + '\n' + prompt += self._history_to_text() + logging.info('prompt=%s', prompt) + text = self._ask_gemini_pro(prompt) + if text.startswith('```json'): + text = [text.removeprefix('```json').removesuffix('```')] + else: + text = text.split('\n') + logging.info('text=[%s]', text) + + actions = [] + for i in range(0, len(text), 3): + try: + logging.info('%d: processed %s', i, text[i]) + action = text[i].removeprefix('Simulation parameters (json format):') + logging.info('%d: action=%s', i, action) + actions.append(json.loads(action)) + except json.decoder.JSONDecodeError: + continue + if len(actions) == 0: + continue + return actions + + def _is_done(self, worker_id: str) -> Tuple[bool, str]: + """Checks with the LLM to see whether it has enough information to answer. Returns a tuple with a boolean that indicates whether the question can be answered and if so, the answer string. """ - if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): - return self._is_done_from_gemini_pro(worker_id) - return False, '' - - def _is_done_from_gemini_pro(self, worker_id: str) -> Tuple[bool, str]: - question = ( - self._intro + '\n' + self._history_to_text(False) + - '\nHas the question been fully answered, including all of its' - ' clauses? Answer Y if yes or N if there are any additional' - ' simulations that need to be performed to fully answer the question.' - ) - logging.info('_is_done_from_gemini_pro question=%s', question) - text = self._ask_gemini_pro(question) - logging.info('_is_done_from_gemini_pro text=%s', text) - if not text.lower().startswith('y'): - logging.info('_is_done_from_gemini_pro NOT DONE') - return False, '' - question = (self._intro + '\n' + self._history_to_text(False) + - "\nWhat is the answer to the user's question?") - text = self._ask_gemini_pro(question) - logging.info('_is_done_from_gemini_pro answer=%s', text) - return True, text - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - logging.info('DecisionPoint request=%s', request) - # self._append_outcome(request.decision_outcome.reward) - self._lock.acquire() - - dp_response = service_pb2.DecisionPointResponse() - - if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_INTERACTIVE and self._waiting_on_tell): - self._lock.release() - dp_response.action_type = ( - service_pb2.DecisionPointResponse.ActionType.AT_RETRY) - return dp_response - - if len(self._history) > 0 and 'outcome' not in self._history[0]: - if len(request.decision_outcome.outcome_params) > 0: - self._history[-1]['outcome'] = self._params_to_dict( - request.decision_point.outcome_params) - else: - self._history[-1]['outcome'] = request.decision_outcome.reward - # self.last_outcome = self._history[-1]['outcome'] - # self.script += 'Decision State:\n' - # self.script += ' {' + ', '.join([ - # f'"{p.key}": {p.value.double_value}' - # for p in request.decision_point.state_params - # ]) + '}\n' - # self.script += 'Decision Action (json format):\n' - self._history.append({ - 'state': - self._params_to_dict(request.decision_point.state_params), - 'action': - None, - 'outcome': - None, - }) - - if self._actions_to_do: - selected_actions = [self._actions_to_do.pop(0)] - # Periodically try a random action, but not on the first trial in case the - # user just wants a single reasonable recommendation. - elif (self._llm_config.goal != sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_INTERACTIVE and len(self._history) > 1 - and random.random() > 0.1): - logging.info('##########################\n##### BAYESIAN OPT' - ' ######\n##########################') - # selected_actions = self._random_action() - dp = self._bayesian_opt.decision_point(request) - selected_actions = {} - for a in dp.action: - selected_actions[a.key] = a.value.double_value - selected_actions = [selected_actions] - print('selected_actions=%s' % selected_actions) - - else: - selected_actions = self._get_action(request.worker_id) - - logging.info('decision_point(): selected_actions=%s', selected_actions) - - self._history[-1]['action'] = selected_actions[0] - # If there are more actions to perform, store them in self._actions_to_do - if len(selected_actions) >= 1: - self._actions_to_do.extend(selected_actions[1:]) - - # self.script += ' {' + ', '.join([ - # f'"{key}": {value}' - # for key, value in selected_actions.items() - # ]) + '}\n' - - for key, value in self._history[-1]['action'].items(): - a = dp_response.action.add() - a.key = key - a.value.double_value = float(value) - - self._num_decision_points += 1 - - self._lock.release() - dp_response.action_type = ( - service_pb2.DecisionPointResponse.ActionType.AT_ACT) - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - self._lock.acquire() - - logging.info('FinalizeEpisode request=%s', request) - if len(request.decision_outcome.outcome_params) > 0: - self._history[-1]['outcome'] = self._params_to_dict( - request.decision_outcome.outcome_params) - else: - self._history[-1]['outcome'] = request.decision_outcome.reward - # self.last_outcome = self._history[-1]['outcome'] - - logging.info('self._history[-1]=%s', self._history[-1]) - for key, value in self._history[-1]['action'].items(): - a = request.decision_point.choice_params.add() - a.key = key - a.value.double_value = float(value) - self._bayesian_opt.finalize_episode(request) - - if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_INTERACTIVE): - # If there are no outstanding acitions, ask the LLM whether the user's - # question can be answered via the already-completed model runs. - if len(self._actions_to_do) == 0: - can_respond_to_question, response = self._is_done( - request.worker_id) - self._response_ready = can_respond_to_question - if self._response_ready: - self._waiting_on_tell = True - self._response_for_listen = response - self._lock.release() - - logging.info( - 'FinalizeEpisode response=%s', - service_pb2.FinalizeEpisodeResponse(response_str='Success!'), - ) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def tell(self, - request: service_pb2.TellRequest) -> service_pb2.TellResponse: - tell_response = service_pb2.TellResponse() - self._lock.acquire() - logging.info('tell() request=%s', request) - - if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_INTERACTIVE and self._waiting_on_tell): - logging.info('INTERACTIVE') - self._intro += '\n' + self._history_to_text(False) + '\n' - self._history = [] - self._intro += 'User input: ' + request.message_str - # self._intro += '\n' + request.message_str - self._waiting_on_tell = False - logging.info('tell self._intro=%s', self._intro) - - self._lock.release() - tell_response.response_str = self._ask(self._intro) - return tell_response - - @overrides - def listen( - self, - request: service_pb2.ListenRequest) -> service_pb2.ListenResponse: - listen_response = service_pb2.ListenResponse() - self._lock.acquire() - logging.info( - 'listen() request=%s, self._response_ready=%s,' - ' self._response_for_listen=%s', - request, - self._response_ready, - self._response_for_listen, - ) - - if (self._llm_config.goal == sight_pb2.DecisionConfigurationStart. - LLMConfig.LLMGoal.LM_INTERACTIVE): - listen_response.response_ready = self._response_ready - if self._response_ready: - listen_response.response_str = self._response_for_listen - self._response_ready = False - - self._lock.release() - logging.info('listen() response=%s', listen_response) - return listen_response - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - bayesian_opt_status = self._bayesian_opt.current_status(request) - return service_pb2.CurrentStatusResponse( - response_str= - f"""[LLM: script={self._intro + self._history_to_text(None)} + if (self._llm_config.algorithm == sight_pb2.DecisionConfigurationStart. + LLMConfig.LLMAlgorithm.LA_GEMINI_PRO): + return self._is_done_from_gemini_pro(worker_id) + return False, '' + + def _is_done_from_gemini_pro(self, worker_id: str) -> Tuple[bool, str]: + question = ( + self._intro + '\n' + self._history_to_text(False) + + '\nHas the question been fully answered, including all of its' + ' clauses? Answer Y if yes or N if there are any additional' + ' simulations that need to be performed to fully answer the question.') + logging.info('_is_done_from_gemini_pro question=%s', question) + text = self._ask_gemini_pro(question) + logging.info('_is_done_from_gemini_pro text=%s', text) + if not text.lower().startswith('y'): + logging.info('_is_done_from_gemini_pro NOT DONE') + return False, '' + question = (self._intro + '\n' + self._history_to_text(False) + + "\nWhat is the answer to the user's question?") + text = self._ask_gemini_pro(question) + logging.info('_is_done_from_gemini_pro answer=%s', text) + return True, text + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + logging.info('DecisionPoint request=%s', request) + # self._append_outcome(request.decision_outcome.reward) + self._lock.acquire() + + dp_response = service_pb2.DecisionPointResponse() + + if (self._llm_config.goal + == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE + and self._waiting_on_tell): + self._lock.release() + dp_response.action_type = ( + service_pb2.DecisionPointResponse.ActionType.AT_RETRY) + return dp_response + + if len(self._history) > 0 and 'outcome' not in self._history[0]: + if len(request.decision_outcome.outcome_params) > 0: + self._history[-1]['outcome'] = self._params_to_dict( + request.decision_point.outcome_params) + else: + self._history[-1]['outcome'] = request.decision_outcome.reward + # self.last_outcome = self._history[-1]['outcome'] + # self.script += 'Decision State:\n' + # self.script += ' {' + ', '.join([ + # f'"{p.key}": {p.value.double_value}' + # for p in request.decision_point.state_params + # ]) + '}\n' + # self.script += 'Decision Action (json format):\n' + self._history.append({ + 'state': self._params_to_dict(request.decision_point.state_params), + 'action': None, + 'outcome': None, + }) + + if self._actions_to_do: + selected_actions = [self._actions_to_do.pop(0)] + # Periodically try a random action, but not on the first trial in case the + # user just wants a single reasonable recommendation. + elif ( + self._llm_config.goal + != sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE + and len(self._history) > 1 and random.random() > 0.1): + logging.info('##########################\n##### BAYESIAN OPT' + ' ######\n##########################') + # selected_actions = self._random_action() + dp = self._bayesian_opt.decision_point(request) + selected_actions = {} + for a in dp.action: + selected_actions[a.key] = a.value.double_value + selected_actions = [selected_actions] + print('selected_actions=%s' % selected_actions) + + else: + selected_actions = self._get_action(request.worker_id) + + logging.info('decision_point(): selected_actions=%s', selected_actions) + + self._history[-1]['action'] = selected_actions[0] + # If there are more actions to perform, store them in self._actions_to_do + if len(selected_actions) >= 1: + self._actions_to_do.extend(selected_actions[1:]) + + # self.script += ' {' + ', '.join([ + # f'"{key}": {value}' + # for key, value in selected_actions.items() + # ]) + '}\n' + + for key, value in self._history[-1]['action'].items(): + a = dp_response.action.add() + a.key = key + a.value.double_value = float(value) + + self._num_decision_points += 1 + + self._lock.release() + dp_response.action_type = ( + service_pb2.DecisionPointResponse.ActionType.AT_ACT) + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + self._lock.acquire() + + logging.info('FinalizeEpisode request=%s', request) + if len(request.decision_outcome.outcome_params) > 0: + self._history[-1]['outcome'] = self._params_to_dict( + request.decision_outcome.outcome_params) + else: + self._history[-1]['outcome'] = request.decision_outcome.reward + # self.last_outcome = self._history[-1]['outcome'] + + logging.info('self._history[-1]=%s', self._history[-1]) + for key, value in self._history[-1]['action'].items(): + a = request.decision_point.choice_params.add() + a.key = key + a.value.double_value = float(value) + self._bayesian_opt.finalize_episode(request) + + if (self._llm_config.goal == + sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE): + # If there are no outstanding acitions, ask the LLM whether the user's + # question can be answered via the already-completed model runs. + if len(self._actions_to_do) == 0: + can_respond_to_question, response = self._is_done(request.worker_id) + self._response_ready = can_respond_to_question + if self._response_ready: + self._waiting_on_tell = True + self._response_for_listen = response + self._lock.release() + + logging.info( + 'FinalizeEpisode response=%s', + service_pb2.FinalizeEpisodeResponse(response_str='Success!'), + ) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def tell(self, request: service_pb2.TellRequest) -> service_pb2.TellResponse: + tell_response = service_pb2.TellResponse() + self._lock.acquire() + logging.info('tell() request=%s', request) + + if (self._llm_config.goal + == sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE + and self._waiting_on_tell): + logging.info('INTERACTIVE') + self._intro += '\n' + self._history_to_text(False) + '\n' + self._history = [] + self._intro += 'User input: ' + request.message_str + # self._intro += '\n' + request.message_str + self._waiting_on_tell = False + logging.info('tell self._intro=%s', self._intro) + + self._lock.release() + tell_response.response_str = self._ask(self._intro) + return tell_response + + @overrides + def listen(self, + request: service_pb2.ListenRequest) -> service_pb2.ListenResponse: + listen_response = service_pb2.ListenResponse() + self._lock.acquire() + logging.info( + 'listen() request=%s, self._response_ready=%s,' + ' self._response_for_listen=%s', + request, + self._response_ready, + self._response_for_listen, + ) + + if (self._llm_config.goal == + sight_pb2.DecisionConfigurationStart.LLMConfig.LLMGoal.LM_INTERACTIVE): + listen_response.response_ready = self._response_ready + if self._response_ready: + listen_response.response_str = self._response_for_listen + self._response_ready = False + + self._lock.release() + logging.info('listen() response=%s', listen_response) + return listen_response + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + bayesian_opt_status = self._bayesian_opt.current_status(request) + return service_pb2.CurrentStatusResponse( + response_str=f"""[LLM: script={self._intro + self._history_to_text(None)} ----------------- BayesianOpt={bayesian_opt_status.response_str}""") diff --git a/sight_service/nevergrad_opt.py b/sight_service/nevergrad_opt.py index fc56e35..1b2cbee 100644 --- a/sight_service/nevergrad_opt.py +++ b/sight_service/nevergrad_opt.py @@ -13,294 +13,288 @@ # limitations under the License. """LLM-based optimization for driving Sight applications.""" -from helpers.logs.logs_handler import logger as logging -from overrides import overrides +import json +import os +import random +import threading from typing import Any, Dict, List, Tuple +import google.auth +import google.auth.transport.requests +from helpers.logs.logs_handler import logger as logging import nevergrad as ng -from sight_service.optimizer_instance import param_dict_to_proto -from sight_service.optimizer_instance import OptimizerInstance -from sight_service.proto import service_pb2 +from overrides import overrides +import requests from sight.proto import sight_pb2 from sight_service.normalizer import Normalizer -import random -import requests -import google.auth -import google.auth.transport.requests -import json -import os -import threading +from sight_service.optimizer_instance import OptimizerInstance +from sight_service.optimizer_instance import param_dict_to_proto +from sight_service.proto import service_pb2 _file_name = "nevergrad_opt.py" class NeverGradOpt(OptimizerInstance): - """Uses the NeverGrad library to choose the parameters of the code. + """Uses the NeverGrad library to choose the parameters of the code. Attributes: possible_values: Maps each action attributes to the list of possible values of this attribute. """ - def __init__(self): - super().__init__() - self.num_samples_issued = 0 - self.active_samples = {} - self.complete_samples = {} - self.possible_values = {} - self._lock = threading.RLock() - self._total_count = 0 - self._completed_count = 0 - self.normalizer = Normalizer() - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - response = super(NeverGradOpt, self).launch(request) - # logging.info('request : %s', request) - self._ng_config = request.decision_config_params.choice_config[ - request.label].never_grad_config - # print ('ng=%s' % ng.__dict__) - - self._total_count = request.decision_config_params.num_trials - - self.actions = self.normalizer.normalize_in_0_to_1(self.actions) - # print("self.actions : ", self.actions) - - self.possible_values = {} - for i, key in enumerate(sorted(self.actions.keys())): - if self.actions[key].valid_float_values: - self.possible_values[key] = list( - self.actions[key].valid_float_values) - elif self.actions[key].step_size: - self.possible_values[key] = [] - cur = self.actions[key].min_value - while cur <= self.actions[key].max_value: - self.possible_values[key].append(cur) - cur += self.actions[key].step_size - # print('possible_values=%s' % self.possible_values) - - params = {} - for key, p in self.actions.items(): - if self.actions[key].valid_float_values: - params[key] = ng.p.Choice( - choices=len(self.possible_values[key])) - elif self.actions[key].step_size: - params[key] = ng.p.TransitionChoice( - choices=len(self.possible_values[key])) - else: - params[key] = ng.p.Scalar(lower=p.min_value, upper=p.max_value) - - # print('here params are : ', params) - # # print('here **params are : ', **params) - # print('here ng.p.Dict is : ', ng.p.Dict(**params)) - # print('here ng.p.Instrumentation is : ', ng.p.Instrumentation(ng.p.Dict(**params))) - - parametrization = ng.p.Instrumentation(ng.p.Dict(**params)) - budget = 1000 - - if (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. - NeverGradConfig.NeverGradAlgorithm.NG_AUTO): - self._optimizer = ng.optimizers.NGOpt( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_BO): - self._optimizer = ng.optimizers.BO(parametrization=parametrization, - budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_CMA): - self._optimizer = ng.optimizers.CMA( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_TwoPointsDE): - self._optimizer = ng.optimizers.TwoPointsDE( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_RandomSearch): - self._optimizer = ng.optimizers.RandomSearch( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_PSO): - self._optimizer = ng.optimizers.PSO( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_ScrHammersleySearch): - self._optimizer = ng.optimizers.ScrHammersleySearch( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_DE): - self._optimizer = ng.optimizers.DE(parametrization=parametrization, + def __init__(self): + super().__init__() + self.num_samples_issued = 0 + self.active_samples = {} + self.complete_samples = {} + self.possible_values = {} + self._lock = threading.RLock() + self._total_count = 0 + self._completed_count = 0 + self.normalizer = Normalizer() + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(NeverGradOpt, self).launch(request) + # logging.info('request : %s', request) + self._ng_config = request.decision_config_params.choice_config[ + request.label].never_grad_config + # print ('ng=%s' % ng.__dict__) + + self._total_count = request.decision_config_params.num_trials + + self.actions = self.normalizer.normalize_in_0_to_1(self.actions) + # print("self.actions : ", self.actions) + + self.possible_values = {} + for i, key in enumerate(sorted(self.actions.keys())): + if self.actions[key].valid_float_values: + self.possible_values[key] = list(self.actions[key].valid_float_values) + elif self.actions[key].step_size: + self.possible_values[key] = [] + cur = self.actions[key].min_value + while cur <= self.actions[key].max_value: + self.possible_values[key].append(cur) + cur += self.actions[key].step_size + # print('possible_values=%s' % self.possible_values) + + params = {} + for key, p in self.actions.items(): + if self.actions[key].valid_float_values: + params[key] = ng.p.Choice(choices=len(self.possible_values[key])) + elif self.actions[key].step_size: + params[key] = ng.p.TransitionChoice( + choices=len(self.possible_values[key])) + else: + params[key] = ng.p.Scalar(lower=p.min_value, upper=p.max_value) + + # print('here params are : ', params) + # # print('here **params are : ', **params) + # print('here ng.p.Dict is : ', ng.p.Dict(**params)) + # print('here ng.p.Instrumentation is : ', ng.p.Instrumentation(ng.p.Dict(**params))) + + parametrization = ng.p.Instrumentation(ng.p.Dict(**params)) + budget = 1000 + + if (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_AUTO): + self._optimizer = ng.optimizers.NGOpt(parametrization=parametrization, + budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_BO): + self._optimizer = ng.optimizers.BO(parametrization=parametrization, + budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_CMA): + self._optimizer = ng.optimizers.CMA(parametrization=parametrization, + budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_TwoPointsDE): + self._optimizer = ng.optimizers.TwoPointsDE( + parametrization=parametrization, budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_RandomSearch): + self._optimizer = ng.optimizers.RandomSearch( + parametrization=parametrization, budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_PSO): + self._optimizer = ng.optimizers.PSO(parametrization=parametrization, + budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_ScrHammersleySearch): + self._optimizer = ng.optimizers.ScrHammersleySearch( + parametrization=parametrization, budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_DE): + self._optimizer = ng.optimizers.DE(parametrization=parametrization, + budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_CGA): + self._optimizer = ng.optimizers.cGA(parametrization=parametrization, + budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_ES): + self._optimizer = ng.optimizers.ES(parametrization=parametrization, + budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_DL_OPO): + self._optimizer = ng.optimizers.DiscreteLenglerOnePlusOne( + parametrization=parametrization, budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_DDE): + self._optimizer = ng.optimizers.DiscreteDE( + parametrization=parametrization, budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_NMM): + self._optimizer = ng.optimizers.NeuralMetaModel( + parametrization=parametrization, budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_TINY_SPSA): + self._optimizer = ng.optimizers.TinySPSA(parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_CGA): - self._optimizer = ng.optimizers.cGA( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_ES): - self._optimizer = ng.optimizers.ES(parametrization=parametrization, + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_VORONOI_DE): + self._optimizer = ng.optimizers.VoronoiDE(parametrization=parametrization, + budget=budget) + elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart. + NeverGradConfig.NeverGradAlgorithm.NG_CMA_SMALL): + self._optimizer = ng.optimizers.CMAsmall(parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_DL_OPO): - self._optimizer = ng.optimizers.DiscreteLenglerOnePlusOne( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_DDE): - self._optimizer = ng.optimizers.DiscreteDE( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_NMM): - self._optimizer = ng.optimizers.NeuralMetaModel( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_TINY_SPSA): - self._optimizer = ng.optimizers.TinySPSA( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_VORONOI_DE): - self._optimizer = ng.optimizers.VoronoiDE( - parametrization=parametrization, budget=budget) - elif (self._ng_config.algorithm == sight_pb2.DecisionConfigurationStart - .NeverGradConfig.NeverGradAlgorithm.NG_CMA_SMALL): - self._optimizer = ng.optimizers.CMAsmall( - parametrization=parametrization, budget=budget) - - # print(self._optimizer, type(self._optimizer)) - - response.display_string = 'NeverGrad Start' - print('response=%s' % response) - # raise SystemExit - return response - - def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: - """Returns the dict representation of a DecisionParams proto""" - d = {} - for a in dp: - d[a.key] = a.value.double_value - return d - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - # logging.info('DecisionPoint request=%s', request) - # print('DecisionPoint request=%s' % request) - - self._lock.acquire() - selected_actions = self._optimizer.ask() - # logging.info('selected_actions=%s', selected_actions.args) - - # logging.info('selected_actions=%s', selected_actions.kwargs) - self.active_samples[request.worker_id] = { - 'action': selected_actions.args[0], - 'sample_num': self.num_samples_issued, - } - # print('self.active_samples : ', self.active_samples) - self.last_action = selected_actions - self.num_samples_issued += 1 - self._lock.release() - - denormalized_actions = self.normalizer.denormalize_from_0_to_1( - selected_actions.args[0]) - # print("denormalized_actions : ", denormalized_actions) - - dp_response = service_pb2.DecisionPointResponse() - for key, value in denormalized_actions.items(): - a = dp_response.action.add() - a.key = key - a.value.sub_type = sight_pb2.Value.ST_DOUBLE - a.value.double_value = float(value) - - # self.last_outcome = request.decision_outcome.outcome_value - # print('DecisionPoint response=%s' % dp_response) - - # print('DecisionPoint response=%s' % dp_response) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - # logging.info('FinalizeEpisode request=%s', request) - d = self.last_action - - self._lock.acquire() - # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) - self.complete_samples[self.active_samples[ - request.worker_id]['sample_num']] = { - # 'outcome': request.decision_outcome.reward, - # 'action': self.active_samples[request.worker_id]['action'], - 'reward': request.decision_outcome.reward, - 'action': self.active_samples[request.worker_id]['action'], - 'outcome': request.decision_outcome.outcome_params - } - # print('self.complete_samples : ', self.complete_samples) - del self.active_samples[request.worker_id] - - # logging.info('FinalizeEpisode outcome=%s / %s', - # request.decision_outcome.reward, d) - self._optimizer.tell(d, 0 - request.decision_outcome.reward) - # self._completed_count += 1 - - del self.last_action - self._lock.release() - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - response = '[NeverGrad (num_ask=#%s, num_tell=#%s)\n' % ( - self._optimizer.num_ask, self._optimizer.num_tell) - - self._lock.acquire() - response += 'sample_num, ' + ', '.join(list( - self.actions)) + ', outcome\n' - cur = [0] * len(self.actions) - keys = sorted(self.actions.keys()) - logging.info('self.complete_samples=%s', self.complete_samples) - for s in sorted(self.complete_samples.items(), - key=lambda x: x[1]['outcome'], - reverse=True): - response += str(s[0]) + ', ' - response += ', '.join([str(s[1]['action'][key]) for key in keys]) - response += ', ' + str(s[1]['outcome']) + '\n' - - response += 'pareto_front:\n' - for trial in self._optimizer.pareto_front(): - response += ', '.join([str(trial.args[0][key]) - for key in keys]) + '\n' - response += ']\n' - self._lock.release() - - # print('self._total_count was : ', self._total_count) - # print('self._completed_count is now : ', self._completed_count) - - if (self._completed_count == self._total_count): - status = service_pb2.CurrentStatusResponse.Status.SUCCESS - elif (self._completed_count < self._total_count): - status = service_pb2.CurrentStatusResponse.Status.IN_PROGRESS - else: - status = service_pb2.CurrentStatusResponse.Status.FAILURE - - return service_pb2.CurrentStatusResponse(response_str=response, - status=status) - - @overrides - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) - if (self._completed_count == self._total_count): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - # elif(not self.pending_samples): - # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY - else: - # Increasing count here so that multiple workers can't enter the dp call for same sample at last - self._completed_count += 1 - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) + + # print(self._optimizer, type(self._optimizer)) + + response.display_string = 'NeverGrad Start' + print('response=%s' % response) + # raise SystemExit + return response + + def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: + """Returns the dict representation of a DecisionParams proto""" + d = {} + for a in dp: + d[a.key] = a.value.double_value + return d + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + # logging.info('DecisionPoint request=%s', request) + # print('DecisionPoint request=%s' % request) + + self._lock.acquire() + selected_actions = self._optimizer.ask() + # logging.info('selected_actions=%s', selected_actions.args) + + # logging.info('selected_actions=%s', selected_actions.kwargs) + self.active_samples[request.worker_id] = { + 'action': selected_actions.args[0], + 'sample_num': self.num_samples_issued, + } + # print('self.active_samples : ', self.active_samples) + self.last_action = selected_actions + self.num_samples_issued += 1 + self._lock.release() + + denormalized_actions = self.normalizer.denormalize_from_0_to_1( + selected_actions.args[0]) + # print("denormalized_actions : ", denormalized_actions) + + dp_response = service_pb2.DecisionPointResponse() + for key, value in denormalized_actions.items(): + a = dp_response.action.add() + a.key = key + a.value.sub_type = sight_pb2.Value.ST_DOUBLE + a.value.double_value = float(value) + + # self.last_outcome = request.decision_outcome.outcome_value + # print('DecisionPoint response=%s' % dp_response) + + # print('DecisionPoint response=%s' % dp_response) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + # logging.info('FinalizeEpisode request=%s', request) + d = self.last_action + + self._lock.acquire() + # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) + self.complete_samples[self.active_samples[request.worker_id]['sample_num']] = { + # 'outcome': request.decision_outcome.reward, + # 'action': self.active_samples[request.worker_id]['action'], + 'reward': request.decision_outcome.reward, + 'action': self.active_samples[request.worker_id]['action'], + 'outcome': request.decision_outcome.outcome_params + } + # print('self.complete_samples : ', self.complete_samples) + del self.active_samples[request.worker_id] + + # logging.info('FinalizeEpisode outcome=%s / %s', + # request.decision_outcome.reward, d) + self._optimizer.tell(d, 0 - request.decision_outcome.reward) + # self._completed_count += 1 + + del self.last_action + self._lock.release() + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + response = '[NeverGrad (num_ask=#%s, num_tell=#%s)\n' % ( + self._optimizer.num_ask, self._optimizer.num_tell) + + self._lock.acquire() + response += 'sample_num, ' + ', '.join(list(self.actions)) + ', outcome\n' + cur = [0] * len(self.actions) + keys = sorted(self.actions.keys()) + logging.info('self.complete_samples=%s', self.complete_samples) + for s in sorted(self.complete_samples.items(), + key=lambda x: x[1]['outcome'], + reverse=True): + response += str(s[0]) + ', ' + response += ', '.join([str(s[1]['action'][key]) for key in keys]) + response += ', ' + str(s[1]['outcome']) + '\n' + + response += 'pareto_front:\n' + for trial in self._optimizer.pareto_front(): + response += ', '.join([str(trial.args[0][key]) for key in keys]) + '\n' + response += ']\n' + self._lock.release() + + # print('self._total_count was : ', self._total_count) + # print('self._completed_count is now : ', self._completed_count) + + if (self._completed_count == self._total_count): + status = service_pb2.CurrentStatusResponse.Status.SUCCESS + elif (self._completed_count < self._total_count): + status = service_pb2.CurrentStatusResponse.Status.IN_PROGRESS + else: + status = service_pb2.CurrentStatusResponse.Status.FAILURE + + return service_pb2.CurrentStatusResponse(response_str=response, + status=status) + + @overrides + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) + if (self._completed_count == self._total_count): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + # elif(not self.pending_samples): + # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + else: + # Increasing count here so that multiple workers can't enter the dp call for same sample at last + self._completed_count += 1 + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/normalizer.py b/sight_service/normalizer.py index 1ed8a85..b05e6bc 100644 --- a/sight_service/normalizer.py +++ b/sight_service/normalizer.py @@ -15,51 +15,49 @@ import copy + def scale_value(value, output_min, output_max, input_min=0, input_max=1): - # Check if input value is within the source range - if not (input_min <= value <= input_max): - raise ValueError("Input value is outside the input range") + # Check if input value is within the source range + if not (input_min <= value <= input_max): + raise ValueError("Input value is outside the input range") - # Calculate the normalized value within the input range - normalized_value = (value - input_min) / (input_max - input_min) - # print('normalized_value : ', normalized_value) + # Calculate the normalized value within the input range + normalized_value = (value - input_min) / (input_max - input_min) + # print('normalized_value : ', normalized_value) - # Scale the normalized value to the output range - scaled_value = normalized_value * (output_max - output_min) + output_min - # print('scaled_value : ', scaled_value) + # Scale the normalized value to the output range + scaled_value = normalized_value * (output_max - output_min) + output_min + # print('scaled_value : ', scaled_value) - return scaled_value + return scaled_value class Normalizer: - """provide interface to normalize - denormalize the action values in specified ranges. + """provide interface to normalize - denormalize the action values in specified ranges. """ - def __init__(self): - self.actions = {} - self.normalized_actions = {} - self.denormalized_actions = {} - - def normalize_in_0_to_1(self, actions): - # storing key-value pair of action name and attr_proto in self.actions - self.actions = copy.deepcopy(actions) - - # normalizing min max value of actions to 0-1 - for k,v in actions.items(): - current_action = actions[k] - current_action.min_value = 0 - current_action.max_value = 1 - self.normalized_actions[k] = current_action - - # return key-value pair of action name and normalized attr_proto - return self.normalized_actions - - def denormalize_from_0_to_1(self, actions): - for k,v in actions.items(): - self.denormalized_actions[k] = scale_value(v, output_min=0, output_max=self.actions[k].max_value) - return self.denormalized_actions - - - - + def __init__(self): + self.actions = {} + self.normalized_actions = {} + self.denormalized_actions = {} + + def normalize_in_0_to_1(self, actions): + # storing key-value pair of action name and attr_proto in self.actions + self.actions = copy.deepcopy(actions) + + # normalizing min max value of actions to 0-1 + for k, v in actions.items(): + current_action = actions[k] + current_action.min_value = 0 + current_action.max_value = 1 + self.normalized_actions[k] = current_action + + # return key-value pair of action name and normalized attr_proto + return self.normalized_actions + + def denormalize_from_0_to_1(self, actions): + for k, v in actions.items(): + self.denormalized_actions[k] = scale_value( + v, output_min=0, output_max=self.actions[k].max_value) + return self.denormalized_actions diff --git a/sight_service/optimizer_instance.py b/sight_service/optimizer_instance.py index 4aea8d0..76f0d49 100644 --- a/sight_service/optimizer_instance.py +++ b/sight_service/optimizer_instance.py @@ -14,158 +14,153 @@ """An instance of a Sight optimizer dedicated to a single experiment.""" from concurrent import futures +from typing import Any, Dict, List, Sequence, Tuple + from helpers.logs.logs_handler import logger as logging -from typing import Any, Dict, List, Tuple, Sequence +from sight.proto import sight_pb2 from sight.widgets.decision import utils from sight_service.proto import service_pb2 -from sight.proto import sight_pb2 _file_name = "optimizer_instance.py" def param_dict_to_proto( - param_dict: Dict[str, float]) -> List[sight_pb2.DecisionParam]: - """converting dictionary of parameters into proto.""" - param_proto: List[sight_pb2.DecisionParam] = [] - for k, v in sorted(param_dict.items()): - if isinstance(v, str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=v, - ) - elif isinstance(v, float): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=v, - ) - elif (not utils.is_scalar(v)): - print('here v is : ', v, type(v)) - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_JSON, - json_value=v, - ) - else: - raise ValueError( - 'action attribute type must be either string or float') - - param_proto.append(sight_pb2.DecisionParam(key=k, value=val)) - return param_proto + param_dict: Dict[str, float]) -> List[sight_pb2.DecisionParam]: + """converting dictionary of parameters into proto.""" + param_proto: List[sight_pb2.DecisionParam] = [] + for k, v in sorted(param_dict.items()): + if isinstance(v, str): + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_STRING, + string_value=v, + ) + elif isinstance(v, float): + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_DOUBLE, + double_value=v, + ) + elif (not utils.is_scalar(v)): + print('here v is : ', v, type(v)) + val = sight_pb2.Value( + sub_type=sight_pb2.Value.ST_JSON, + json_value=v, + ) + else: + raise ValueError('action attribute type must be either string or float') + + param_proto.append(sight_pb2.DecisionParam(key=k, value=val)) + return param_proto def param_proto_to_dict( - param_proto: Sequence[sight_pb2.DecisionParam], ) -> Dict[str, float]: - """converting proto back into dictionary of parameters.""" - param_dict = {} - for param in param_proto: - # if ((param.value.sub_type != sight_pb2.Value.ST_DOUBLE) and (param.value.sub_type != sight_pb2.Value.ST_STRING)): - # raise ValueError("Unsupported action type %s" % param.value.sub_type) - # param_dict[param.key] = param.value.double_value - if (param.value.sub_type == sight_pb2.Value.ST_DOUBLE): - param_dict[param.key] = param.value.double_value - elif (param.value.sub_type == sight_pb2.Value.ST_STRING): - param_dict[param.key] = param.value.string_value - elif (param.value.sub_type == sight_pb2.Value.ST_BOOL): - param_dict[param.key] = param.value.bool_value - elif (param.value.sub_type == sight_pb2.Value.ST_BYTES): - param_dict[param.key] = param.value.bytes_value - elif (param.value.sub_type == sight_pb2.Value.ST_INT64): - param_dict[param.key] = param.value.int64_value - elif (param.value.sub_type == sight_pb2.Value.ST_JSON): - param_dict[param.key] = param.value.json_value - else: - raise ValueError("Unsupported action type %s" % - param.value.sub_type) - return param_dict + param_proto: Sequence[sight_pb2.DecisionParam],) -> Dict[str, float]: + """converting proto back into dictionary of parameters.""" + param_dict = {} + for param in param_proto: + # if ((param.value.sub_type != sight_pb2.Value.ST_DOUBLE) and (param.value.sub_type != sight_pb2.Value.ST_STRING)): + # raise ValueError("Unsupported action type %s" % param.value.sub_type) + # param_dict[param.key] = param.value.double_value + if (param.value.sub_type == sight_pb2.Value.ST_DOUBLE): + param_dict[param.key] = param.value.double_value + elif (param.value.sub_type == sight_pb2.Value.ST_STRING): + param_dict[param.key] = param.value.string_value + elif (param.value.sub_type == sight_pb2.Value.ST_BOOL): + param_dict[param.key] = param.value.bool_value + elif (param.value.sub_type == sight_pb2.Value.ST_BYTES): + param_dict[param.key] = param.value.bytes_value + elif (param.value.sub_type == sight_pb2.Value.ST_INT64): + param_dict[param.key] = param.value.int64_value + elif (param.value.sub_type == sight_pb2.Value.ST_JSON): + param_dict[param.key] = param.value.json_value + else: + raise ValueError("Unsupported action type %s" % param.value.sub_type) + return param_dict class OptimizerInstance: - """An OptimizerInstance class that is generic for all optimizers. + """An OptimizerInstance class that is generic for all optimizers. An optimizer containing base methods which specialized optimizers will override while communicating with client. """ - def __init__(self): - self.actions = {} - self.state = {} - self.outcomes = {} + def __init__(self): + self.actions = {} + self.state = {} + self.outcomes = {} - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - """Initializing new study and storing state and action attributes for the same. + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + """Initializing new study and storing state and action attributes for the same. """ - method_name = "launch" - logging.debug(">>>> In %s of %s", method_name, _file_name) - # logging.info('request.decision_config_params=%s', request.decision_config_params) - - # sorting dict key wise to maintain consistency at for all calls - action_keys = list(request.decision_config_params.action_attrs.keys()) - action_keys.sort() - for k in action_keys: - self.actions[k] = request.decision_config_params.action_attrs[k] - - # sorting dict key wise to maintain consistency at for all calls - state_keys = list(request.decision_config_params.state_attrs.keys()) - state_keys.sort() - for k in state_keys: - self.state[k] = request.decision_config_params.state_attrs[k] - - # sorting dict key wise to maintain consistency at for all calls - outcome_keys = list( - request.decision_config_params.outcome_attrs.keys()) - outcome_keys.sort() - for k in outcome_keys: - self.outcomes[k] = request.decision_config_params.outcome_attrs[k] - - print(f"<<<<<<<<< Out {method_name} of {_file_name}.") - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.LaunchResponse() - - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - return service_pb2.DecisionPointResponse() - - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - return service_pb2.FinalizeEpisodeResponse() - - def tell(self, - request: service_pb2.TellRequest) -> service_pb2.TellResponse: - return service_pb2.TellResponse() - - def listen( - self, - request: service_pb2.ListenRequest) -> service_pb2.ListenResponse: - return service_pb2.ListenResponse() - - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - return service_pb2.CurrentStatusResponse() - - def propose_action( - self, request: service_pb2.ProposeActionRequest - ) -> service_pb2.ProposeActionResponse: - return service_pb2.ProposeActionResponse() - - def GetOutcome( - self, request: service_pb2.GetOutcomeRequest - ) -> service_pb2.GetOutcomeResponse: - return service_pb2.GetOutcomeResponse() - - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - return service_pb2.FetchOptimalActionResponse() - - def close(self, - request: service_pb2.CloseRequest) -> service_pb2.CloseResponse: - return service_pb2.CloseResponse() - - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - return service_pb2.WorkerAliveResponse() + method_name = "launch" + logging.debug(">>>> In %s of %s", method_name, _file_name) + # logging.info('request.decision_config_params=%s', request.decision_config_params) + + # sorting dict key wise to maintain consistency at for all calls + action_keys = list(request.decision_config_params.action_attrs.keys()) + action_keys.sort() + for k in action_keys: + self.actions[k] = request.decision_config_params.action_attrs[k] + + # sorting dict key wise to maintain consistency at for all calls + state_keys = list(request.decision_config_params.state_attrs.keys()) + state_keys.sort() + for k in state_keys: + self.state[k] = request.decision_config_params.state_attrs[k] + + # sorting dict key wise to maintain consistency at for all calls + outcome_keys = list(request.decision_config_params.outcome_attrs.keys()) + outcome_keys.sort() + for k in outcome_keys: + self.outcomes[k] = request.decision_config_params.outcome_attrs[k] + + print(f"<<<<<<<<< Out {method_name} of {_file_name}.") + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.LaunchResponse() + + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + return service_pb2.DecisionPointResponse() + + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + return service_pb2.FinalizeEpisodeResponse() + + def tell(self, request: service_pb2.TellRequest) -> service_pb2.TellResponse: + return service_pb2.TellResponse() + + def listen(self, + request: service_pb2.ListenRequest) -> service_pb2.ListenResponse: + return service_pb2.ListenResponse() + + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + return service_pb2.CurrentStatusResponse() + + def propose_action( + self, request: service_pb2.ProposeActionRequest + ) -> service_pb2.ProposeActionResponse: + return service_pb2.ProposeActionResponse() + + def GetOutcome( + self, + request: service_pb2.GetOutcomeRequest) -> service_pb2.GetOutcomeResponse: + return service_pb2.GetOutcomeResponse() + + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + return service_pb2.FetchOptimalActionResponse() + + def close(self, + request: service_pb2.CloseRequest) -> service_pb2.CloseResponse: + return service_pb2.CloseResponse() + + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + return service_pb2.WorkerAliveResponse() diff --git a/sight_service/proto/numproto/numproto.py b/sight_service/proto/numproto/numproto.py index 6124a6d..639aab6 100644 --- a/sight_service/proto/numproto/numproto.py +++ b/sight_service/proto/numproto/numproto.py @@ -11,17 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """NumPy ndarray to protobuf serialization and deserialization""" from io import BytesIO import numpy as np - from sight_service.proto.numproto.protobuf.ndarray_pb2 import NDArray def ndarray_to_proto(nda: np.ndarray) -> NDArray: - """Serializes a numpy array into an NDArray protobuf message. + """Serializes a numpy array into an NDArray protobuf message. Args: nda (np.ndarray): numpy array to serialize. @@ -29,14 +27,14 @@ def ndarray_to_proto(nda: np.ndarray) -> NDArray: Returns: Returns an NDArray protobuf message. """ - nda_bytes = BytesIO() - np.save(nda_bytes, nda, allow_pickle=False) + nda_bytes = BytesIO() + np.save(nda_bytes, nda, allow_pickle=False) - return NDArray(ndarray=nda_bytes.getvalue()) + return NDArray(ndarray=nda_bytes.getvalue()) def proto_to_ndarray(nda_proto: NDArray) -> np.ndarray: - """Deserializes an NDArray protobuf message into a numpy array. + """Deserializes an NDArray protobuf message into a numpy array. Args: nda_proto (NDArray): NDArray protobuf message to deserialize. @@ -44,6 +42,6 @@ def proto_to_ndarray(nda_proto: NDArray) -> np.ndarray: Returns: Returns a numpy.ndarray. """ - nda_bytes = BytesIO(nda_proto.ndarray) + nda_bytes = BytesIO(nda_proto.ndarray) - return np.load(nda_bytes, allow_pickle=False) + return np.load(nda_bytes, allow_pickle=False) diff --git a/sight_service/proto/numproto/protobuf/ndarray_pb2.py b/sight_service/proto/numproto/protobuf/ndarray_pb2.py index 66e0984..da2a2e3 100644 --- a/sight_service/proto/numproto/protobuf/ndarray_pb2.py +++ b/sight_service/proto/numproto/protobuf/ndarray_pb2.py @@ -2,24 +2,25 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: sight_service/proto/numproto/protobuf/ndarray.proto """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n3sight_service/proto/numproto/protobuf/ndarray.proto\x12\x11numproto.protobuf\"\x1a\n\x07NDArray\x12\x0f\n\x07ndarray\x18\x01 \x01(\x0c\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n3sight_service/proto/numproto/protobuf/ndarray.proto\x12\x11numproto.protobuf\"\x1a\n\x07NDArray\x12\x0f\n\x07ndarray\x18\x01 \x01(\x0c\x62\x06proto3' +) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight_service.proto.numproto.protobuf.ndarray_pb2', globals()) +_builder.BuildTopDescriptorsAndMessages( + DESCRIPTOR, 'sight_service.proto.numproto.protobuf.ndarray_pb2', globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _NDARRAY._serialized_start=74 - _NDARRAY._serialized_end=100 + _NDARRAY._serialized_start = 74 + _NDARRAY._serialized_end = 100 # @@protoc_insertion_point(module_scope) diff --git a/sight_service/proto/service_pb2.py b/sight_service/proto/service_pb2.py index 2e01cd2..e4f00df 100644 --- a/sight_service/proto/service_pb2.py +++ b/sight_service/proto/service_pb2.py @@ -2,23 +2,26 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: sight_service/proto/service.proto """Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import enum_type_wrapper + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - -from sight.proto import sight_pb2 as sight_dot_proto_dot_sight__pb2 -from sight_service.proto.numproto.protobuf import ndarray_pb2 as sight__service_dot_proto_dot_numproto_dot_protobuf_dot_ndarray__pb2 from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from sight.proto import sight_pb2 as sight_dot_proto_dot_sight__pb2 +from sight_service.proto.numproto.protobuf import ( + ndarray_pb2 as sight__service_dot_proto_dot_numproto_dot_protobuf_dot_ndarray__pb2 +) - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!sight_service/proto/service.proto\x12\x0fsight.x.service\x1a\x17sight/proto/sight.proto\x1a\x33sight_service/proto/numproto/protobuf/ndarray.proto\x1a\x1cgoogle/api/annotations.proto\"\xa5\x03\n\x0c\x41\x63me_Request\x12G\n\x14\x65pisode_observations\x18\x01 \x03(\x0b\x32).sight.x.service.Acme_Request.Observation\x12\x14\n\x0clearner_keys\x18\x02 \x03(\t\x1a\xfe\x01\n\x0bObservation\x12*\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x38\n\x08steptype\x18\x02 \x01(\x0e\x32&.sight.x.service.Acme_Request.StepType\x12*\n\x06reward\x18\x03 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12,\n\x08\x64iscount\x18\x04 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12/\n\x0bobservation\x18\x05 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\"5\n\x08StepType\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\x07\n\x03MID\x10\x02\x12\x08\n\x04LAST\x10\x03\"\x80\x02\n\rAcme_Response\x12\x34\n\x06layers\x18\x01 \x03(\x0b\x32$.sight.x.service.Acme_Response.Layer\x1a\xb8\x01\n\x05Layer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x07weights\x18\x02 \x01(\x0b\x32\x30.sight.x.service.Acme_Response.Layer.WeightsData\x1a^\n\x0bWeightsData\x12\t\n\x01\x62\x18\x01 \x03(\x02\x12%\n\x01w\x18\x02 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x0e\n\x06offset\x18\x03 \x03(\x02\x12\r\n\x05scale\x18\x04 \x03(\x02\"\xe0\x01\n\x14\x44\x65\x63isionPointRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12\x32\n\x0b\x61\x63me_config\x18\x05 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"\x9f\x02\n\x15\x44\x65\x63isionPointResponse\x12,\n\x06\x61\x63tion\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x35\n\racme_response\x18\x02 \x01(\x0b\x32\x1e.sight.x.service.Acme_Response\x12\x14\n\x0cresponse_idx\x18\x03 \x01(\x03\x12\x46\n\x0b\x61\x63tion_type\x18\x04 \x01(\x0e\x32\x31.sight.x.service.DecisionPointResponse.ActionType\"C\n\nActionType\x12\x0e\n\nAT_UNKNOWN\x10\x00\x12\n\n\x06\x41T_ACT\x10\x01\x12\x0b\n\x07\x41T_DONE\x10\x02\x12\x0c\n\x08\x41T_RETRY\x10\x03\"A\n\x19\x46\x65tchOptimalActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"2\n\x1a\x46\x65tchOptimalActionResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"5\n\x0bTellRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0bmessage_str\x18\x02 \x01(\t\"$\n\x0cTellResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"\"\n\rListenRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\">\n\x0eListenResponse\x12\x16\n\x0eresponse_ready\x18\x01 \x01(\x08\x12\x14\n\x0cresponse_str\x18\x02 \x01(\t\"<\n\x14\x43urrentStatusRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xae\x01\n\x15\x43urrentStatusResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12=\n\x06status\x18\x02 \x01(\x0e\x32-.sight.x.service.CurrentStatusResponse.Status\"@\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0f\n\x0bIN_PROGRESS\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\"|\n\rLaunchRequest\x12I\n\x16\x64\x65\x63ision_config_params\x18\x01 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStart\x12\r\n\x05label\x18\x03 \x01(\t\x12\x11\n\tclient_id\x18\x04 \x01(\t\"(\n\x0eLaunchResponse\x12\x16\n\x0e\x64isplay_string\x18\x01 \x01(\t\"\x8f\x01\n\x14ProposeActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x03 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\"*\n\x15ProposeActionResponse\x12\x11\n\taction_id\x18\x01 \x01(\x03\":\n\x11GetOutcomeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\nunique_ids\x18\x02 \x03(\x03\"\xf7\x03\n\x12GetOutcomeResponse\x12<\n\x07outcome\x18\x01 \x03(\x0b\x32+.sight.x.service.GetOutcomeResponse.Outcome\x1a\xa2\x03\n\x07Outcome\x12\x31\n\x0bstate_attrs\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x0e\n\x06reward\x18\x03 \x01(\x02\x12\x33\n\routcome_attrs\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x05 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x42\n\x06status\x18\x06 \x01(\x0e\x32\x32.sight.x.service.GetOutcomeResponse.Outcome.Status\x12\x14\n\x0cresponse_str\x18\x07 \x01(\t\x12\x11\n\taction_id\x18\x08 \x01(\x03\"L\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\r\n\tNOT_EXIST\x10\x04\"\xb3\x02\n\x16\x46inalizeEpisodeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12O\n\x0eoptimizer_type\x18\x05 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12\x32\n\x0b\x61\x63me_config\x18\x06 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"D\n\x17\x46inalizeEpisodeResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12\x13\n\x0bstop_worker\x18\x02 \x01(\x08\" \n\x0bTestRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x1b\n\x0cTestResponse\x12\x0b\n\x03val\x18\x01 \x01(\t\"\x0f\n\rCreateRequest\"1\n\x0e\x43reateResponse\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x13\n\x0bpath_prefix\x18\x02 \x01(\t\"!\n\x0c\x43loseRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"%\n\rCloseResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\":\n\x12WorkerAliveRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xa0\x01\n\x13WorkerAliveResponse\x12\x44\n\x0bstatus_type\x18\x04 \x01(\x0e\x32/.sight.x.service.WorkerAliveResponse.StatusType\"C\n\nStatusType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\n\n\x06ST_ACT\x10\x01\x12\x0b\n\x07ST_DONE\x10\x02\x12\x0c\n\x08ST_RETRY\x10\x03*[\n\tLogFormat\x12\x0e\n\nLF_UNKNOWN\x10\x00\x12\x0f\n\x0bLF_COLUMNIO\x10\x01\x12\x10\n\x0cLF_CAPACITOR\x10\x02\x12\x0e\n\nLF_SPANNER\x10\x03\x12\x0b\n\x07LF_AVRO\x10\x04\x32\xd6\x0c\n\x0cSightService\x12\x61\n\x04Test\x12\x1c.sight.x.service.TestRequest\x1a\x1d.sight.x.service.TestResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1/test/{client_id}\x12]\n\x06\x43reate\x12\x1e.sight.x.service.CreateRequest\x1a\x1f.sight.x.service.CreateResponse\"\x12\x82\xd3\xe4\x93\x02\x0c\x12\n/v1/create\x12Y\n\x05\x43lose\x12\x1d.sight.x.service.CloseRequest\x1a\x1e.sight.x.service.CloseResponse\"\x11\x82\xd3\xe4\x93\x02\x0b\x12\t/v1/Close\x12q\n\x0bWorkerAlive\x12#.sight.x.service.WorkerAliveRequest\x1a$.sight.x.service.WorkerAliveResponse\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/v1/WorkerAlive\x12\x89\x01\n\x06Launch\x12\x1e.sight.x.service.LaunchRequest\x1a\x1f.sight.x.service.LaunchResponse\">\x82\xd3\xe4\x93\x02\x38\"\x1e/v1/launch/{client_id}/{label}:\x16\x64\x65\x63ision_config_params\x12\x9f\x01\n\rDecisionPoint\x12%.sight.x.service.DecisionPointRequest\x1a&.sight.x.service.DecisionPointResponse\"?\x82\xd3\xe4\x93\x02\x39\"*/v1/decision_point/{client_id}/{worker_id}:\x0b\x61\x63me_config\x12m\n\x04Tell\x12\x1c.sight.x.service.TellRequest\x1a\x1d.sight.x.service.TellResponse\"(\x82\xd3\xe4\x93\x02\"\" /v1/tell/{client_id}/{worker_id}\x12u\n\x06Listen\x12\x1e.sight.x.service.ListenRequest\x1a\x1f.sight.x.service.ListenResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/listen/{client_id}/{worker_id}\x12\x92\x01\n\rCurrentStatus\x12%.sight.x.service.CurrentStatusRequest\x1a&.sight.x.service.CurrentStatusResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v1/current_status/{client_id}/{worker_id}\x12\xa7\x01\n\x12\x46\x65tchOptimalAction\x12*.sight.x.service.FetchOptimalActionRequest\x1a+.sight.x.service.FetchOptimalActionResponse\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1/fetch_optimal_action/{client_id}/{worker_id}\x12`\n\rProposeAction\x12%.sight.x.service.ProposeActionRequest\x1a&.sight.x.service.ProposeActionResponse\"\x00\x12W\n\nGetOutcome\x12\".sight.x.service.GetOutcomeRequest\x1a#.sight.x.service.GetOutcomeResponse\"\x00\x12\xa7\x01\n\x0f\x46inalizeEpisode\x12\'.sight.x.service.FinalizeEpisodeRequest\x1a(.sight.x.service.FinalizeEpisodeResponse\"A\x82\xd3\xe4\x93\x02;\",/v1/finalize_episode/{client_id}/{worker_id}:\x0b\x61\x63me_configb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n!sight_service/proto/service.proto\x12\x0fsight.x.service\x1a\x17sight/proto/sight.proto\x1a\x33sight_service/proto/numproto/protobuf/ndarray.proto\x1a\x1cgoogle/api/annotations.proto\"\xa5\x03\n\x0c\x41\x63me_Request\x12G\n\x14\x65pisode_observations\x18\x01 \x03(\x0b\x32).sight.x.service.Acme_Request.Observation\x12\x14\n\x0clearner_keys\x18\x02 \x03(\t\x1a\xfe\x01\n\x0bObservation\x12*\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x38\n\x08steptype\x18\x02 \x01(\x0e\x32&.sight.x.service.Acme_Request.StepType\x12*\n\x06reward\x18\x03 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12,\n\x08\x64iscount\x18\x04 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12/\n\x0bobservation\x18\x05 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\"5\n\x08StepType\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\x07\n\x03MID\x10\x02\x12\x08\n\x04LAST\x10\x03\"\x80\x02\n\rAcme_Response\x12\x34\n\x06layers\x18\x01 \x03(\x0b\x32$.sight.x.service.Acme_Response.Layer\x1a\xb8\x01\n\x05Layer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x07weights\x18\x02 \x01(\x0b\x32\x30.sight.x.service.Acme_Response.Layer.WeightsData\x1a^\n\x0bWeightsData\x12\t\n\x01\x62\x18\x01 \x03(\x02\x12%\n\x01w\x18\x02 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x0e\n\x06offset\x18\x03 \x03(\x02\x12\r\n\x05scale\x18\x04 \x03(\x02\"\xe0\x01\n\x14\x44\x65\x63isionPointRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12\x32\n\x0b\x61\x63me_config\x18\x05 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"\x9f\x02\n\x15\x44\x65\x63isionPointResponse\x12,\n\x06\x61\x63tion\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x35\n\racme_response\x18\x02 \x01(\x0b\x32\x1e.sight.x.service.Acme_Response\x12\x14\n\x0cresponse_idx\x18\x03 \x01(\x03\x12\x46\n\x0b\x61\x63tion_type\x18\x04 \x01(\x0e\x32\x31.sight.x.service.DecisionPointResponse.ActionType\"C\n\nActionType\x12\x0e\n\nAT_UNKNOWN\x10\x00\x12\n\n\x06\x41T_ACT\x10\x01\x12\x0b\n\x07\x41T_DONE\x10\x02\x12\x0c\n\x08\x41T_RETRY\x10\x03\"A\n\x19\x46\x65tchOptimalActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"2\n\x1a\x46\x65tchOptimalActionResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"5\n\x0bTellRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0bmessage_str\x18\x02 \x01(\t\"$\n\x0cTellResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"\"\n\rListenRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\">\n\x0eListenResponse\x12\x16\n\x0eresponse_ready\x18\x01 \x01(\x08\x12\x14\n\x0cresponse_str\x18\x02 \x01(\t\"<\n\x14\x43urrentStatusRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xae\x01\n\x15\x43urrentStatusResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12=\n\x06status\x18\x02 \x01(\x0e\x32-.sight.x.service.CurrentStatusResponse.Status\"@\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0f\n\x0bIN_PROGRESS\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\"|\n\rLaunchRequest\x12I\n\x16\x64\x65\x63ision_config_params\x18\x01 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStart\x12\r\n\x05label\x18\x03 \x01(\t\x12\x11\n\tclient_id\x18\x04 \x01(\t\"(\n\x0eLaunchResponse\x12\x16\n\x0e\x64isplay_string\x18\x01 \x01(\t\"\x8f\x01\n\x14ProposeActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x03 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\"*\n\x15ProposeActionResponse\x12\x11\n\taction_id\x18\x01 \x01(\x03\":\n\x11GetOutcomeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\nunique_ids\x18\x02 \x03(\x03\"\xf7\x03\n\x12GetOutcomeResponse\x12<\n\x07outcome\x18\x01 \x03(\x0b\x32+.sight.x.service.GetOutcomeResponse.Outcome\x1a\xa2\x03\n\x07Outcome\x12\x31\n\x0bstate_attrs\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x0e\n\x06reward\x18\x03 \x01(\x02\x12\x33\n\routcome_attrs\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x05 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x42\n\x06status\x18\x06 \x01(\x0e\x32\x32.sight.x.service.GetOutcomeResponse.Outcome.Status\x12\x14\n\x0cresponse_str\x18\x07 \x01(\t\x12\x11\n\taction_id\x18\x08 \x01(\x03\"L\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\r\n\tNOT_EXIST\x10\x04\"\xb3\x02\n\x16\x46inalizeEpisodeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12O\n\x0eoptimizer_type\x18\x05 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12\x32\n\x0b\x61\x63me_config\x18\x06 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"D\n\x17\x46inalizeEpisodeResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12\x13\n\x0bstop_worker\x18\x02 \x01(\x08\" \n\x0bTestRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x1b\n\x0cTestResponse\x12\x0b\n\x03val\x18\x01 \x01(\t\"\x0f\n\rCreateRequest\"1\n\x0e\x43reateResponse\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x13\n\x0bpath_prefix\x18\x02 \x01(\t\"!\n\x0c\x43loseRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"%\n\rCloseResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\":\n\x12WorkerAliveRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xa0\x01\n\x13WorkerAliveResponse\x12\x44\n\x0bstatus_type\x18\x04 \x01(\x0e\x32/.sight.x.service.WorkerAliveResponse.StatusType\"C\n\nStatusType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\n\n\x06ST_ACT\x10\x01\x12\x0b\n\x07ST_DONE\x10\x02\x12\x0c\n\x08ST_RETRY\x10\x03*[\n\tLogFormat\x12\x0e\n\nLF_UNKNOWN\x10\x00\x12\x0f\n\x0bLF_COLUMNIO\x10\x01\x12\x10\n\x0cLF_CAPACITOR\x10\x02\x12\x0e\n\nLF_SPANNER\x10\x03\x12\x0b\n\x07LF_AVRO\x10\x04\x32\xd6\x0c\n\x0cSightService\x12\x61\n\x04Test\x12\x1c.sight.x.service.TestRequest\x1a\x1d.sight.x.service.TestResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1/test/{client_id}\x12]\n\x06\x43reate\x12\x1e.sight.x.service.CreateRequest\x1a\x1f.sight.x.service.CreateResponse\"\x12\x82\xd3\xe4\x93\x02\x0c\x12\n/v1/create\x12Y\n\x05\x43lose\x12\x1d.sight.x.service.CloseRequest\x1a\x1e.sight.x.service.CloseResponse\"\x11\x82\xd3\xe4\x93\x02\x0b\x12\t/v1/Close\x12q\n\x0bWorkerAlive\x12#.sight.x.service.WorkerAliveRequest\x1a$.sight.x.service.WorkerAliveResponse\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/v1/WorkerAlive\x12\x89\x01\n\x06Launch\x12\x1e.sight.x.service.LaunchRequest\x1a\x1f.sight.x.service.LaunchResponse\">\x82\xd3\xe4\x93\x02\x38\"\x1e/v1/launch/{client_id}/{label}:\x16\x64\x65\x63ision_config_params\x12\x9f\x01\n\rDecisionPoint\x12%.sight.x.service.DecisionPointRequest\x1a&.sight.x.service.DecisionPointResponse\"?\x82\xd3\xe4\x93\x02\x39\"*/v1/decision_point/{client_id}/{worker_id}:\x0b\x61\x63me_config\x12m\n\x04Tell\x12\x1c.sight.x.service.TellRequest\x1a\x1d.sight.x.service.TellResponse\"(\x82\xd3\xe4\x93\x02\"\" /v1/tell/{client_id}/{worker_id}\x12u\n\x06Listen\x12\x1e.sight.x.service.ListenRequest\x1a\x1f.sight.x.service.ListenResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/listen/{client_id}/{worker_id}\x12\x92\x01\n\rCurrentStatus\x12%.sight.x.service.CurrentStatusRequest\x1a&.sight.x.service.CurrentStatusResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v1/current_status/{client_id}/{worker_id}\x12\xa7\x01\n\x12\x46\x65tchOptimalAction\x12*.sight.x.service.FetchOptimalActionRequest\x1a+.sight.x.service.FetchOptimalActionResponse\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1/fetch_optimal_action/{client_id}/{worker_id}\x12`\n\rProposeAction\x12%.sight.x.service.ProposeActionRequest\x1a&.sight.x.service.ProposeActionResponse\"\x00\x12W\n\nGetOutcome\x12\".sight.x.service.GetOutcomeRequest\x1a#.sight.x.service.GetOutcomeResponse\"\x00\x12\xa7\x01\n\x0f\x46inalizeEpisode\x12\'.sight.x.service.FinalizeEpisodeRequest\x1a(.sight.x.service.FinalizeEpisodeResponse\"A\x82\xd3\xe4\x93\x02;\",/v1/finalize_episode/{client_id}/{worker_id}:\x0b\x61\x63me_configb\x06proto3' +) _LOGFORMAT = DESCRIPTOR.enum_types_by_name['LogFormat'] LogFormat = enum_type_wrapper.EnumTypeWrapper(_LOGFORMAT) @@ -28,31 +31,39 @@ LF_SPANNER = 3 LF_AVRO = 4 - _ACME_REQUEST = DESCRIPTOR.message_types_by_name['Acme_Request'] _ACME_REQUEST_OBSERVATION = _ACME_REQUEST.nested_types_by_name['Observation'] _ACME_RESPONSE = DESCRIPTOR.message_types_by_name['Acme_Response'] _ACME_RESPONSE_LAYER = _ACME_RESPONSE.nested_types_by_name['Layer'] -_ACME_RESPONSE_LAYER_WEIGHTSDATA = _ACME_RESPONSE_LAYER.nested_types_by_name['WeightsData'] +_ACME_RESPONSE_LAYER_WEIGHTSDATA = _ACME_RESPONSE_LAYER.nested_types_by_name[ + 'WeightsData'] _DECISIONPOINTREQUEST = DESCRIPTOR.message_types_by_name['DecisionPointRequest'] -_DECISIONPOINTRESPONSE = DESCRIPTOR.message_types_by_name['DecisionPointResponse'] -_FETCHOPTIMALACTIONREQUEST = DESCRIPTOR.message_types_by_name['FetchOptimalActionRequest'] -_FETCHOPTIMALACTIONRESPONSE = DESCRIPTOR.message_types_by_name['FetchOptimalActionResponse'] +_DECISIONPOINTRESPONSE = DESCRIPTOR.message_types_by_name[ + 'DecisionPointResponse'] +_FETCHOPTIMALACTIONREQUEST = DESCRIPTOR.message_types_by_name[ + 'FetchOptimalActionRequest'] +_FETCHOPTIMALACTIONRESPONSE = DESCRIPTOR.message_types_by_name[ + 'FetchOptimalActionResponse'] _TELLREQUEST = DESCRIPTOR.message_types_by_name['TellRequest'] _TELLRESPONSE = DESCRIPTOR.message_types_by_name['TellResponse'] _LISTENREQUEST = DESCRIPTOR.message_types_by_name['ListenRequest'] _LISTENRESPONSE = DESCRIPTOR.message_types_by_name['ListenResponse'] _CURRENTSTATUSREQUEST = DESCRIPTOR.message_types_by_name['CurrentStatusRequest'] -_CURRENTSTATUSRESPONSE = DESCRIPTOR.message_types_by_name['CurrentStatusResponse'] +_CURRENTSTATUSRESPONSE = DESCRIPTOR.message_types_by_name[ + 'CurrentStatusResponse'] _LAUNCHREQUEST = DESCRIPTOR.message_types_by_name['LaunchRequest'] _LAUNCHRESPONSE = DESCRIPTOR.message_types_by_name['LaunchResponse'] _PROPOSEACTIONREQUEST = DESCRIPTOR.message_types_by_name['ProposeActionRequest'] -_PROPOSEACTIONRESPONSE = DESCRIPTOR.message_types_by_name['ProposeActionResponse'] +_PROPOSEACTIONRESPONSE = DESCRIPTOR.message_types_by_name[ + 'ProposeActionResponse'] _GETOUTCOMEREQUEST = DESCRIPTOR.message_types_by_name['GetOutcomeRequest'] _GETOUTCOMERESPONSE = DESCRIPTOR.message_types_by_name['GetOutcomeResponse'] -_GETOUTCOMERESPONSE_OUTCOME = _GETOUTCOMERESPONSE.nested_types_by_name['Outcome'] -_FINALIZEEPISODEREQUEST = DESCRIPTOR.message_types_by_name['FinalizeEpisodeRequest'] -_FINALIZEEPISODERESPONSE = DESCRIPTOR.message_types_by_name['FinalizeEpisodeResponse'] +_GETOUTCOMERESPONSE_OUTCOME = _GETOUTCOMERESPONSE.nested_types_by_name[ + 'Outcome'] +_FINALIZEEPISODEREQUEST = DESCRIPTOR.message_types_by_name[ + 'FinalizeEpisodeRequest'] +_FINALIZEEPISODERESPONSE = DESCRIPTOR.message_types_by_name[ + 'FinalizeEpisodeResponse'] _TESTREQUEST = DESCRIPTOR.message_types_by_name['TestRequest'] _TESTRESPONSE = DESCRIPTOR.message_types_by_name['TestResponse'] _CREATEREQUEST = DESCRIPTOR.message_types_by_name['CreateRequest'] @@ -62,236 +73,340 @@ _WORKERALIVEREQUEST = DESCRIPTOR.message_types_by_name['WorkerAliveRequest'] _WORKERALIVERESPONSE = DESCRIPTOR.message_types_by_name['WorkerAliveResponse'] _ACME_REQUEST_STEPTYPE = _ACME_REQUEST.enum_types_by_name['StepType'] -_DECISIONPOINTRESPONSE_ACTIONTYPE = _DECISIONPOINTRESPONSE.enum_types_by_name['ActionType'] -_CURRENTSTATUSRESPONSE_STATUS = _CURRENTSTATUSRESPONSE.enum_types_by_name['Status'] -_GETOUTCOMERESPONSE_OUTCOME_STATUS = _GETOUTCOMERESPONSE_OUTCOME.enum_types_by_name['Status'] -_WORKERALIVERESPONSE_STATUSTYPE = _WORKERALIVERESPONSE.enum_types_by_name['StatusType'] -Acme_Request = _reflection.GeneratedProtocolMessageType('Acme_Request', (_message.Message,), { - - 'Observation' : _reflection.GeneratedProtocolMessageType('Observation', (_message.Message,), { - 'DESCRIPTOR' : _ACME_REQUEST_OBSERVATION, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Request.Observation) +_DECISIONPOINTRESPONSE_ACTIONTYPE = _DECISIONPOINTRESPONSE.enum_types_by_name[ + 'ActionType'] +_CURRENTSTATUSRESPONSE_STATUS = _CURRENTSTATUSRESPONSE.enum_types_by_name[ + 'Status'] +_GETOUTCOMERESPONSE_OUTCOME_STATUS = _GETOUTCOMERESPONSE_OUTCOME.enum_types_by_name[ + 'Status'] +_WORKERALIVERESPONSE_STATUSTYPE = _WORKERALIVERESPONSE.enum_types_by_name[ + 'StatusType'] +Acme_Request = _reflection.GeneratedProtocolMessageType( + 'Acme_Request', + (_message.Message,), + { + 'Observation': + _reflection.GeneratedProtocolMessageType( + 'Observation', + (_message.Message,), + { + 'DESCRIPTOR': _ACME_REQUEST_OBSERVATION, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Request.Observation) + }), + 'DESCRIPTOR': + _ACME_REQUEST, + '__module__': + 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Request) }) - , - 'DESCRIPTOR' : _ACME_REQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Request) - }) _sym_db.RegisterMessage(Acme_Request) _sym_db.RegisterMessage(Acme_Request.Observation) -Acme_Response = _reflection.GeneratedProtocolMessageType('Acme_Response', (_message.Message,), { - - 'Layer' : _reflection.GeneratedProtocolMessageType('Layer', (_message.Message,), { - - 'WeightsData' : _reflection.GeneratedProtocolMessageType('WeightsData', (_message.Message,), { - 'DESCRIPTOR' : _ACME_RESPONSE_LAYER_WEIGHTSDATA, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response.Layer.WeightsData) - }) - , - 'DESCRIPTOR' : _ACME_RESPONSE_LAYER, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response.Layer) +Acme_Response = _reflection.GeneratedProtocolMessageType( + 'Acme_Response', + (_message.Message,), + { + 'Layer': + _reflection.GeneratedProtocolMessageType( + 'Layer', + (_message.Message,), + { + 'WeightsData': + _reflection.GeneratedProtocolMessageType( + 'WeightsData', + (_message.Message,), + { + 'DESCRIPTOR': _ACME_RESPONSE_LAYER_WEIGHTSDATA, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response.Layer.WeightsData) + }), + 'DESCRIPTOR': + _ACME_RESPONSE_LAYER, + '__module__': + 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response.Layer) + }), + 'DESCRIPTOR': + _ACME_RESPONSE, + '__module__': + 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response) }) - , - 'DESCRIPTOR' : _ACME_RESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response) - }) _sym_db.RegisterMessage(Acme_Response) _sym_db.RegisterMessage(Acme_Response.Layer) _sym_db.RegisterMessage(Acme_Response.Layer.WeightsData) -DecisionPointRequest = _reflection.GeneratedProtocolMessageType('DecisionPointRequest', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONPOINTREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.DecisionPointRequest) - }) +DecisionPointRequest = _reflection.GeneratedProtocolMessageType( + 'DecisionPointRequest', + (_message.Message,), + { + 'DESCRIPTOR': _DECISIONPOINTREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.DecisionPointRequest) + }) _sym_db.RegisterMessage(DecisionPointRequest) -DecisionPointResponse = _reflection.GeneratedProtocolMessageType('DecisionPointResponse', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONPOINTRESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.DecisionPointResponse) - }) +DecisionPointResponse = _reflection.GeneratedProtocolMessageType( + 'DecisionPointResponse', + (_message.Message,), + { + 'DESCRIPTOR': _DECISIONPOINTRESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.DecisionPointResponse) + }) _sym_db.RegisterMessage(DecisionPointResponse) -FetchOptimalActionRequest = _reflection.GeneratedProtocolMessageType('FetchOptimalActionRequest', (_message.Message,), { - 'DESCRIPTOR' : _FETCHOPTIMALACTIONREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.FetchOptimalActionRequest) - }) +FetchOptimalActionRequest = _reflection.GeneratedProtocolMessageType( + 'FetchOptimalActionRequest', + (_message.Message,), + { + 'DESCRIPTOR': _FETCHOPTIMALACTIONREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.FetchOptimalActionRequest) + }) _sym_db.RegisterMessage(FetchOptimalActionRequest) -FetchOptimalActionResponse = _reflection.GeneratedProtocolMessageType('FetchOptimalActionResponse', (_message.Message,), { - 'DESCRIPTOR' : _FETCHOPTIMALACTIONRESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.FetchOptimalActionResponse) - }) +FetchOptimalActionResponse = _reflection.GeneratedProtocolMessageType( + 'FetchOptimalActionResponse', + (_message.Message,), + { + 'DESCRIPTOR': _FETCHOPTIMALACTIONRESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.FetchOptimalActionResponse) + }) _sym_db.RegisterMessage(FetchOptimalActionResponse) -TellRequest = _reflection.GeneratedProtocolMessageType('TellRequest', (_message.Message,), { - 'DESCRIPTOR' : _TELLREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.TellRequest) - }) +TellRequest = _reflection.GeneratedProtocolMessageType( + 'TellRequest', + (_message.Message,), + { + 'DESCRIPTOR': _TELLREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.TellRequest) + }) _sym_db.RegisterMessage(TellRequest) -TellResponse = _reflection.GeneratedProtocolMessageType('TellResponse', (_message.Message,), { - 'DESCRIPTOR' : _TELLRESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.TellResponse) - }) +TellResponse = _reflection.GeneratedProtocolMessageType( + 'TellResponse', + (_message.Message,), + { + 'DESCRIPTOR': _TELLRESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.TellResponse) + }) _sym_db.RegisterMessage(TellResponse) -ListenRequest = _reflection.GeneratedProtocolMessageType('ListenRequest', (_message.Message,), { - 'DESCRIPTOR' : _LISTENREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.ListenRequest) - }) +ListenRequest = _reflection.GeneratedProtocolMessageType( + 'ListenRequest', + (_message.Message,), + { + 'DESCRIPTOR': _LISTENREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.ListenRequest) + }) _sym_db.RegisterMessage(ListenRequest) -ListenResponse = _reflection.GeneratedProtocolMessageType('ListenResponse', (_message.Message,), { - 'DESCRIPTOR' : _LISTENRESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.ListenResponse) - }) +ListenResponse = _reflection.GeneratedProtocolMessageType( + 'ListenResponse', + (_message.Message,), + { + 'DESCRIPTOR': _LISTENRESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.ListenResponse) + }) _sym_db.RegisterMessage(ListenResponse) -CurrentStatusRequest = _reflection.GeneratedProtocolMessageType('CurrentStatusRequest', (_message.Message,), { - 'DESCRIPTOR' : _CURRENTSTATUSREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.CurrentStatusRequest) - }) +CurrentStatusRequest = _reflection.GeneratedProtocolMessageType( + 'CurrentStatusRequest', + (_message.Message,), + { + 'DESCRIPTOR': _CURRENTSTATUSREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.CurrentStatusRequest) + }) _sym_db.RegisterMessage(CurrentStatusRequest) -CurrentStatusResponse = _reflection.GeneratedProtocolMessageType('CurrentStatusResponse', (_message.Message,), { - 'DESCRIPTOR' : _CURRENTSTATUSRESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.CurrentStatusResponse) - }) +CurrentStatusResponse = _reflection.GeneratedProtocolMessageType( + 'CurrentStatusResponse', + (_message.Message,), + { + 'DESCRIPTOR': _CURRENTSTATUSRESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.CurrentStatusResponse) + }) _sym_db.RegisterMessage(CurrentStatusResponse) -LaunchRequest = _reflection.GeneratedProtocolMessageType('LaunchRequest', (_message.Message,), { - 'DESCRIPTOR' : _LAUNCHREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.LaunchRequest) - }) +LaunchRequest = _reflection.GeneratedProtocolMessageType( + 'LaunchRequest', + (_message.Message,), + { + 'DESCRIPTOR': _LAUNCHREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.LaunchRequest) + }) _sym_db.RegisterMessage(LaunchRequest) -LaunchResponse = _reflection.GeneratedProtocolMessageType('LaunchResponse', (_message.Message,), { - 'DESCRIPTOR' : _LAUNCHRESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.LaunchResponse) - }) +LaunchResponse = _reflection.GeneratedProtocolMessageType( + 'LaunchResponse', + (_message.Message,), + { + 'DESCRIPTOR': _LAUNCHRESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.LaunchResponse) + }) _sym_db.RegisterMessage(LaunchResponse) -ProposeActionRequest = _reflection.GeneratedProtocolMessageType('ProposeActionRequest', (_message.Message,), { - 'DESCRIPTOR' : _PROPOSEACTIONREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.ProposeActionRequest) - }) +ProposeActionRequest = _reflection.GeneratedProtocolMessageType( + 'ProposeActionRequest', + (_message.Message,), + { + 'DESCRIPTOR': _PROPOSEACTIONREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.ProposeActionRequest) + }) _sym_db.RegisterMessage(ProposeActionRequest) -ProposeActionResponse = _reflection.GeneratedProtocolMessageType('ProposeActionResponse', (_message.Message,), { - 'DESCRIPTOR' : _PROPOSEACTIONRESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.ProposeActionResponse) - }) +ProposeActionResponse = _reflection.GeneratedProtocolMessageType( + 'ProposeActionResponse', + (_message.Message,), + { + 'DESCRIPTOR': _PROPOSEACTIONRESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.ProposeActionResponse) + }) _sym_db.RegisterMessage(ProposeActionResponse) -GetOutcomeRequest = _reflection.GeneratedProtocolMessageType('GetOutcomeRequest', (_message.Message,), { - 'DESCRIPTOR' : _GETOUTCOMEREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeRequest) - }) +GetOutcomeRequest = _reflection.GeneratedProtocolMessageType( + 'GetOutcomeRequest', + (_message.Message,), + { + 'DESCRIPTOR': _GETOUTCOMEREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeRequest) + }) _sym_db.RegisterMessage(GetOutcomeRequest) -GetOutcomeResponse = _reflection.GeneratedProtocolMessageType('GetOutcomeResponse', (_message.Message,), { - - 'Outcome' : _reflection.GeneratedProtocolMessageType('Outcome', (_message.Message,), { - 'DESCRIPTOR' : _GETOUTCOMERESPONSE_OUTCOME, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeResponse.Outcome) +GetOutcomeResponse = _reflection.GeneratedProtocolMessageType( + 'GetOutcomeResponse', + (_message.Message,), + { + 'Outcome': + _reflection.GeneratedProtocolMessageType( + 'Outcome', + (_message.Message,), + { + 'DESCRIPTOR': _GETOUTCOMERESPONSE_OUTCOME, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeResponse.Outcome) + }), + 'DESCRIPTOR': + _GETOUTCOMERESPONSE, + '__module__': + 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeResponse) }) - , - 'DESCRIPTOR' : _GETOUTCOMERESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeResponse) - }) _sym_db.RegisterMessage(GetOutcomeResponse) _sym_db.RegisterMessage(GetOutcomeResponse.Outcome) -FinalizeEpisodeRequest = _reflection.GeneratedProtocolMessageType('FinalizeEpisodeRequest', (_message.Message,), { - 'DESCRIPTOR' : _FINALIZEEPISODEREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.FinalizeEpisodeRequest) - }) +FinalizeEpisodeRequest = _reflection.GeneratedProtocolMessageType( + 'FinalizeEpisodeRequest', + (_message.Message,), + { + 'DESCRIPTOR': _FINALIZEEPISODEREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.FinalizeEpisodeRequest) + }) _sym_db.RegisterMessage(FinalizeEpisodeRequest) -FinalizeEpisodeResponse = _reflection.GeneratedProtocolMessageType('FinalizeEpisodeResponse', (_message.Message,), { - 'DESCRIPTOR' : _FINALIZEEPISODERESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.FinalizeEpisodeResponse) - }) +FinalizeEpisodeResponse = _reflection.GeneratedProtocolMessageType( + 'FinalizeEpisodeResponse', + (_message.Message,), + { + 'DESCRIPTOR': _FINALIZEEPISODERESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.FinalizeEpisodeResponse) + }) _sym_db.RegisterMessage(FinalizeEpisodeResponse) -TestRequest = _reflection.GeneratedProtocolMessageType('TestRequest', (_message.Message,), { - 'DESCRIPTOR' : _TESTREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.TestRequest) - }) +TestRequest = _reflection.GeneratedProtocolMessageType( + 'TestRequest', + (_message.Message,), + { + 'DESCRIPTOR': _TESTREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.TestRequest) + }) _sym_db.RegisterMessage(TestRequest) -TestResponse = _reflection.GeneratedProtocolMessageType('TestResponse', (_message.Message,), { - 'DESCRIPTOR' : _TESTRESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.TestResponse) - }) +TestResponse = _reflection.GeneratedProtocolMessageType( + 'TestResponse', + (_message.Message,), + { + 'DESCRIPTOR': _TESTRESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.TestResponse) + }) _sym_db.RegisterMessage(TestResponse) -CreateRequest = _reflection.GeneratedProtocolMessageType('CreateRequest', (_message.Message,), { - 'DESCRIPTOR' : _CREATEREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.CreateRequest) - }) +CreateRequest = _reflection.GeneratedProtocolMessageType( + 'CreateRequest', + (_message.Message,), + { + 'DESCRIPTOR': _CREATEREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.CreateRequest) + }) _sym_db.RegisterMessage(CreateRequest) -CreateResponse = _reflection.GeneratedProtocolMessageType('CreateResponse', (_message.Message,), { - 'DESCRIPTOR' : _CREATERESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.CreateResponse) - }) +CreateResponse = _reflection.GeneratedProtocolMessageType( + 'CreateResponse', + (_message.Message,), + { + 'DESCRIPTOR': _CREATERESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.CreateResponse) + }) _sym_db.RegisterMessage(CreateResponse) -CloseRequest = _reflection.GeneratedProtocolMessageType('CloseRequest', (_message.Message,), { - 'DESCRIPTOR' : _CLOSEREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.CloseRequest) - }) +CloseRequest = _reflection.GeneratedProtocolMessageType( + 'CloseRequest', + (_message.Message,), + { + 'DESCRIPTOR': _CLOSEREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.CloseRequest) + }) _sym_db.RegisterMessage(CloseRequest) -CloseResponse = _reflection.GeneratedProtocolMessageType('CloseResponse', (_message.Message,), { - 'DESCRIPTOR' : _CLOSERESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.CloseResponse) - }) +CloseResponse = _reflection.GeneratedProtocolMessageType( + 'CloseResponse', + (_message.Message,), + { + 'DESCRIPTOR': _CLOSERESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.CloseResponse) + }) _sym_db.RegisterMessage(CloseResponse) -WorkerAliveRequest = _reflection.GeneratedProtocolMessageType('WorkerAliveRequest', (_message.Message,), { - 'DESCRIPTOR' : _WORKERALIVEREQUEST, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.WorkerAliveRequest) - }) +WorkerAliveRequest = _reflection.GeneratedProtocolMessageType( + 'WorkerAliveRequest', + (_message.Message,), + { + 'DESCRIPTOR': _WORKERALIVEREQUEST, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.WorkerAliveRequest) + }) _sym_db.RegisterMessage(WorkerAliveRequest) -WorkerAliveResponse = _reflection.GeneratedProtocolMessageType('WorkerAliveResponse', (_message.Message,), { - 'DESCRIPTOR' : _WORKERALIVERESPONSE, - '__module__' : 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.WorkerAliveResponse) - }) +WorkerAliveResponse = _reflection.GeneratedProtocolMessageType( + 'WorkerAliveResponse', + (_message.Message,), + { + 'DESCRIPTOR': _WORKERALIVERESPONSE, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.WorkerAliveResponse) + }) _sym_db.RegisterMessage(WorkerAliveResponse) _SIGHTSERVICE = DESCRIPTOR.services_by_name['SightService'] @@ -299,103 +414,114 @@ DESCRIPTOR._options = None _SIGHTSERVICE.methods_by_name['Test']._options = None - _SIGHTSERVICE.methods_by_name['Test']._serialized_options = b'\202\323\344\223\002\026\022\024/v1/test/{client_id}' + _SIGHTSERVICE.methods_by_name[ + 'Test']._serialized_options = b'\202\323\344\223\002\026\022\024/v1/test/{client_id}' _SIGHTSERVICE.methods_by_name['Create']._options = None - _SIGHTSERVICE.methods_by_name['Create']._serialized_options = b'\202\323\344\223\002\014\022\n/v1/create' + _SIGHTSERVICE.methods_by_name[ + 'Create']._serialized_options = b'\202\323\344\223\002\014\022\n/v1/create' _SIGHTSERVICE.methods_by_name['Close']._options = None - _SIGHTSERVICE.methods_by_name['Close']._serialized_options = b'\202\323\344\223\002\013\022\t/v1/Close' + _SIGHTSERVICE.methods_by_name[ + 'Close']._serialized_options = b'\202\323\344\223\002\013\022\t/v1/Close' _SIGHTSERVICE.methods_by_name['WorkerAlive']._options = None - _SIGHTSERVICE.methods_by_name['WorkerAlive']._serialized_options = b'\202\323\344\223\002\021\022\017/v1/WorkerAlive' + _SIGHTSERVICE.methods_by_name[ + 'WorkerAlive']._serialized_options = b'\202\323\344\223\002\021\022\017/v1/WorkerAlive' _SIGHTSERVICE.methods_by_name['Launch']._options = None - _SIGHTSERVICE.methods_by_name['Launch']._serialized_options = b'\202\323\344\223\0028\"\036/v1/launch/{client_id}/{label}:\026decision_config_params' + _SIGHTSERVICE.methods_by_name[ + 'Launch']._serialized_options = b'\202\323\344\223\0028\"\036/v1/launch/{client_id}/{label}:\026decision_config_params' _SIGHTSERVICE.methods_by_name['DecisionPoint']._options = None - _SIGHTSERVICE.methods_by_name['DecisionPoint']._serialized_options = b'\202\323\344\223\0029\"*/v1/decision_point/{client_id}/{worker_id}:\013acme_config' + _SIGHTSERVICE.methods_by_name[ + 'DecisionPoint']._serialized_options = b'\202\323\344\223\0029\"*/v1/decision_point/{client_id}/{worker_id}:\013acme_config' _SIGHTSERVICE.methods_by_name['Tell']._options = None - _SIGHTSERVICE.methods_by_name['Tell']._serialized_options = b'\202\323\344\223\002\"\" /v1/tell/{client_id}/{worker_id}' + _SIGHTSERVICE.methods_by_name[ + 'Tell']._serialized_options = b'\202\323\344\223\002\"\" /v1/tell/{client_id}/{worker_id}' _SIGHTSERVICE.methods_by_name['Listen']._options = None - _SIGHTSERVICE.methods_by_name['Listen']._serialized_options = b'\202\323\344\223\002$\022\"/v1/listen/{client_id}/{worker_id}' + _SIGHTSERVICE.methods_by_name[ + 'Listen']._serialized_options = b'\202\323\344\223\002$\022\"/v1/listen/{client_id}/{worker_id}' _SIGHTSERVICE.methods_by_name['CurrentStatus']._options = None - _SIGHTSERVICE.methods_by_name['CurrentStatus']._serialized_options = b'\202\323\344\223\002,\022*/v1/current_status/{client_id}/{worker_id}' + _SIGHTSERVICE.methods_by_name[ + 'CurrentStatus']._serialized_options = b'\202\323\344\223\002,\022*/v1/current_status/{client_id}/{worker_id}' _SIGHTSERVICE.methods_by_name['FetchOptimalAction']._options = None - _SIGHTSERVICE.methods_by_name['FetchOptimalAction']._serialized_options = b'\202\323\344\223\0022\0220/v1/fetch_optimal_action/{client_id}/{worker_id}' + _SIGHTSERVICE.methods_by_name[ + 'FetchOptimalAction']._serialized_options = b'\202\323\344\223\0022\0220/v1/fetch_optimal_action/{client_id}/{worker_id}' _SIGHTSERVICE.methods_by_name['FinalizeEpisode']._options = None - _SIGHTSERVICE.methods_by_name['FinalizeEpisode']._serialized_options = b'\202\323\344\223\002;\",/v1/finalize_episode/{client_id}/{worker_id}:\013acme_config' - _LOGFORMAT._serialized_start=3645 - _LOGFORMAT._serialized_end=3736 - _ACME_REQUEST._serialized_start=163 - _ACME_REQUEST._serialized_end=584 - _ACME_REQUEST_OBSERVATION._serialized_start=275 - _ACME_REQUEST_OBSERVATION._serialized_end=529 - _ACME_REQUEST_STEPTYPE._serialized_start=531 - _ACME_REQUEST_STEPTYPE._serialized_end=584 - _ACME_RESPONSE._serialized_start=587 - _ACME_RESPONSE._serialized_end=843 - _ACME_RESPONSE_LAYER._serialized_start=659 - _ACME_RESPONSE_LAYER._serialized_end=843 - _ACME_RESPONSE_LAYER_WEIGHTSDATA._serialized_start=749 - _ACME_RESPONSE_LAYER_WEIGHTSDATA._serialized_end=843 - _DECISIONPOINTREQUEST._serialized_start=846 - _DECISIONPOINTREQUEST._serialized_end=1070 - _DECISIONPOINTRESPONSE._serialized_start=1073 - _DECISIONPOINTRESPONSE._serialized_end=1360 - _DECISIONPOINTRESPONSE_ACTIONTYPE._serialized_start=1293 - _DECISIONPOINTRESPONSE_ACTIONTYPE._serialized_end=1360 - _FETCHOPTIMALACTIONREQUEST._serialized_start=1362 - _FETCHOPTIMALACTIONREQUEST._serialized_end=1427 - _FETCHOPTIMALACTIONRESPONSE._serialized_start=1429 - _FETCHOPTIMALACTIONRESPONSE._serialized_end=1479 - _TELLREQUEST._serialized_start=1481 - _TELLREQUEST._serialized_end=1534 - _TELLRESPONSE._serialized_start=1536 - _TELLRESPONSE._serialized_end=1572 - _LISTENREQUEST._serialized_start=1574 - _LISTENREQUEST._serialized_end=1608 - _LISTENRESPONSE._serialized_start=1610 - _LISTENRESPONSE._serialized_end=1672 - _CURRENTSTATUSREQUEST._serialized_start=1674 - _CURRENTSTATUSREQUEST._serialized_end=1734 - _CURRENTSTATUSRESPONSE._serialized_start=1737 - _CURRENTSTATUSRESPONSE._serialized_end=1911 - _CURRENTSTATUSRESPONSE_STATUS._serialized_start=1847 - _CURRENTSTATUSRESPONSE_STATUS._serialized_end=1911 - _LAUNCHREQUEST._serialized_start=1913 - _LAUNCHREQUEST._serialized_end=2037 - _LAUNCHRESPONSE._serialized_start=2039 - _LAUNCHRESPONSE._serialized_end=2079 - _PROPOSEACTIONREQUEST._serialized_start=2082 - _PROPOSEACTIONREQUEST._serialized_end=2225 - _PROPOSEACTIONRESPONSE._serialized_start=2227 - _PROPOSEACTIONRESPONSE._serialized_end=2269 - _GETOUTCOMEREQUEST._serialized_start=2271 - _GETOUTCOMEREQUEST._serialized_end=2329 - _GETOUTCOMERESPONSE._serialized_start=2332 - _GETOUTCOMERESPONSE._serialized_end=2835 - _GETOUTCOMERESPONSE_OUTCOME._serialized_start=2417 - _GETOUTCOMERESPONSE_OUTCOME._serialized_end=2835 - _GETOUTCOMERESPONSE_OUTCOME_STATUS._serialized_start=2759 - _GETOUTCOMERESPONSE_OUTCOME_STATUS._serialized_end=2835 - _FINALIZEEPISODEREQUEST._serialized_start=2838 - _FINALIZEEPISODEREQUEST._serialized_end=3145 - _FINALIZEEPISODERESPONSE._serialized_start=3147 - _FINALIZEEPISODERESPONSE._serialized_end=3215 - _TESTREQUEST._serialized_start=3217 - _TESTREQUEST._serialized_end=3249 - _TESTRESPONSE._serialized_start=3251 - _TESTRESPONSE._serialized_end=3278 - _CREATEREQUEST._serialized_start=3280 - _CREATEREQUEST._serialized_end=3295 - _CREATERESPONSE._serialized_start=3297 - _CREATERESPONSE._serialized_end=3346 - _CLOSEREQUEST._serialized_start=3348 - _CLOSEREQUEST._serialized_end=3381 - _CLOSERESPONSE._serialized_start=3383 - _CLOSERESPONSE._serialized_end=3420 - _WORKERALIVEREQUEST._serialized_start=3422 - _WORKERALIVEREQUEST._serialized_end=3480 - _WORKERALIVERESPONSE._serialized_start=3483 - _WORKERALIVERESPONSE._serialized_end=3643 - _WORKERALIVERESPONSE_STATUSTYPE._serialized_start=3576 - _WORKERALIVERESPONSE_STATUSTYPE._serialized_end=3643 - _SIGHTSERVICE._serialized_start=3739 - _SIGHTSERVICE._serialized_end=5361 + _SIGHTSERVICE.methods_by_name[ + 'FinalizeEpisode']._serialized_options = b'\202\323\344\223\002;\",/v1/finalize_episode/{client_id}/{worker_id}:\013acme_config' + _LOGFORMAT._serialized_start = 3645 + _LOGFORMAT._serialized_end = 3736 + _ACME_REQUEST._serialized_start = 163 + _ACME_REQUEST._serialized_end = 584 + _ACME_REQUEST_OBSERVATION._serialized_start = 275 + _ACME_REQUEST_OBSERVATION._serialized_end = 529 + _ACME_REQUEST_STEPTYPE._serialized_start = 531 + _ACME_REQUEST_STEPTYPE._serialized_end = 584 + _ACME_RESPONSE._serialized_start = 587 + _ACME_RESPONSE._serialized_end = 843 + _ACME_RESPONSE_LAYER._serialized_start = 659 + _ACME_RESPONSE_LAYER._serialized_end = 843 + _ACME_RESPONSE_LAYER_WEIGHTSDATA._serialized_start = 749 + _ACME_RESPONSE_LAYER_WEIGHTSDATA._serialized_end = 843 + _DECISIONPOINTREQUEST._serialized_start = 846 + _DECISIONPOINTREQUEST._serialized_end = 1070 + _DECISIONPOINTRESPONSE._serialized_start = 1073 + _DECISIONPOINTRESPONSE._serialized_end = 1360 + _DECISIONPOINTRESPONSE_ACTIONTYPE._serialized_start = 1293 + _DECISIONPOINTRESPONSE_ACTIONTYPE._serialized_end = 1360 + _FETCHOPTIMALACTIONREQUEST._serialized_start = 1362 + _FETCHOPTIMALACTIONREQUEST._serialized_end = 1427 + _FETCHOPTIMALACTIONRESPONSE._serialized_start = 1429 + _FETCHOPTIMALACTIONRESPONSE._serialized_end = 1479 + _TELLREQUEST._serialized_start = 1481 + _TELLREQUEST._serialized_end = 1534 + _TELLRESPONSE._serialized_start = 1536 + _TELLRESPONSE._serialized_end = 1572 + _LISTENREQUEST._serialized_start = 1574 + _LISTENREQUEST._serialized_end = 1608 + _LISTENRESPONSE._serialized_start = 1610 + _LISTENRESPONSE._serialized_end = 1672 + _CURRENTSTATUSREQUEST._serialized_start = 1674 + _CURRENTSTATUSREQUEST._serialized_end = 1734 + _CURRENTSTATUSRESPONSE._serialized_start = 1737 + _CURRENTSTATUSRESPONSE._serialized_end = 1911 + _CURRENTSTATUSRESPONSE_STATUS._serialized_start = 1847 + _CURRENTSTATUSRESPONSE_STATUS._serialized_end = 1911 + _LAUNCHREQUEST._serialized_start = 1913 + _LAUNCHREQUEST._serialized_end = 2037 + _LAUNCHRESPONSE._serialized_start = 2039 + _LAUNCHRESPONSE._serialized_end = 2079 + _PROPOSEACTIONREQUEST._serialized_start = 2082 + _PROPOSEACTIONREQUEST._serialized_end = 2225 + _PROPOSEACTIONRESPONSE._serialized_start = 2227 + _PROPOSEACTIONRESPONSE._serialized_end = 2269 + _GETOUTCOMEREQUEST._serialized_start = 2271 + _GETOUTCOMEREQUEST._serialized_end = 2329 + _GETOUTCOMERESPONSE._serialized_start = 2332 + _GETOUTCOMERESPONSE._serialized_end = 2835 + _GETOUTCOMERESPONSE_OUTCOME._serialized_start = 2417 + _GETOUTCOMERESPONSE_OUTCOME._serialized_end = 2835 + _GETOUTCOMERESPONSE_OUTCOME_STATUS._serialized_start = 2759 + _GETOUTCOMERESPONSE_OUTCOME_STATUS._serialized_end = 2835 + _FINALIZEEPISODEREQUEST._serialized_start = 2838 + _FINALIZEEPISODEREQUEST._serialized_end = 3145 + _FINALIZEEPISODERESPONSE._serialized_start = 3147 + _FINALIZEEPISODERESPONSE._serialized_end = 3215 + _TESTREQUEST._serialized_start = 3217 + _TESTREQUEST._serialized_end = 3249 + _TESTRESPONSE._serialized_start = 3251 + _TESTRESPONSE._serialized_end = 3278 + _CREATEREQUEST._serialized_start = 3280 + _CREATEREQUEST._serialized_end = 3295 + _CREATERESPONSE._serialized_start = 3297 + _CREATERESPONSE._serialized_end = 3346 + _CLOSEREQUEST._serialized_start = 3348 + _CLOSEREQUEST._serialized_end = 3381 + _CLOSERESPONSE._serialized_start = 3383 + _CLOSERESPONSE._serialized_end = 3420 + _WORKERALIVEREQUEST._serialized_start = 3422 + _WORKERALIVEREQUEST._serialized_end = 3480 + _WORKERALIVERESPONSE._serialized_start = 3483 + _WORKERALIVERESPONSE._serialized_end = 3643 + _WORKERALIVERESPONSE_STATUSTYPE._serialized_start = 3576 + _WORKERALIVERESPONSE_STATUSTYPE._serialized_end = 3643 + _SIGHTSERVICE._serialized_start = 3739 + _SIGHTSERVICE._serialized_end = 5361 # @@protoc_insertion_point(module_scope) diff --git a/sight_service/proto/service_pb2_grpc.py b/sight_service/proto/service_pb2_grpc.py index 07e1f35..bdad566 100644 --- a/sight_service/proto/service_pb2_grpc.py +++ b/sight_service/proto/service_pb2_grpc.py @@ -1,461 +1,360 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc - -from sight_service.proto import service_pb2 as sight__service_dot_proto_dot_service__pb2 +from sight_service.proto import ( + service_pb2 as sight__service_dot_proto_dot_service__pb2 +) class SightServiceStub(object): - """This API manages Sight logs, their creation and access to them. + """This API manages Sight logs, their creation and access to them. """ - def __init__(self, channel): - """Constructor. + def __init__(self, channel): + """Constructor. Args: channel: A grpc.Channel. """ - self.Test = channel.unary_unary( - '/sight.x.service.SightService/Test', - request_serializer=sight__service_dot_proto_dot_service__pb2.TestRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.TestResponse.FromString, - ) - self.Create = channel.unary_unary( - '/sight.x.service.SightService/Create', - request_serializer=sight__service_dot_proto_dot_service__pb2.CreateRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.CreateResponse.FromString, - ) - self.Close = channel.unary_unary( - '/sight.x.service.SightService/Close', - request_serializer=sight__service_dot_proto_dot_service__pb2.CloseRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.CloseResponse.FromString, - ) - self.WorkerAlive = channel.unary_unary( - '/sight.x.service.SightService/WorkerAlive', - request_serializer=sight__service_dot_proto_dot_service__pb2.WorkerAliveRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.WorkerAliveResponse.FromString, - ) - self.Launch = channel.unary_unary( - '/sight.x.service.SightService/Launch', - request_serializer=sight__service_dot_proto_dot_service__pb2.LaunchRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.LaunchResponse.FromString, - ) - self.DecisionPoint = channel.unary_unary( - '/sight.x.service.SightService/DecisionPoint', - request_serializer=sight__service_dot_proto_dot_service__pb2.DecisionPointRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.DecisionPointResponse.FromString, - ) - self.Tell = channel.unary_unary( - '/sight.x.service.SightService/Tell', - request_serializer=sight__service_dot_proto_dot_service__pb2.TellRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.TellResponse.FromString, - ) - self.Listen = channel.unary_unary( - '/sight.x.service.SightService/Listen', - request_serializer=sight__service_dot_proto_dot_service__pb2.ListenRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.ListenResponse.FromString, - ) - self.CurrentStatus = channel.unary_unary( - '/sight.x.service.SightService/CurrentStatus', - request_serializer=sight__service_dot_proto_dot_service__pb2.CurrentStatusRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.CurrentStatusResponse.FromString, - ) - self.FetchOptimalAction = channel.unary_unary( - '/sight.x.service.SightService/FetchOptimalAction', - request_serializer=sight__service_dot_proto_dot_service__pb2.FetchOptimalActionRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.FetchOptimalActionResponse.FromString, - ) - self.ProposeAction = channel.unary_unary( - '/sight.x.service.SightService/ProposeAction', - request_serializer=sight__service_dot_proto_dot_service__pb2.ProposeActionRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.ProposeActionResponse.FromString, - ) - self.GetOutcome = channel.unary_unary( - '/sight.x.service.SightService/GetOutcome', - request_serializer=sight__service_dot_proto_dot_service__pb2.GetOutcomeRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.GetOutcomeResponse.FromString, - ) - self.FinalizeEpisode = channel.unary_unary( - '/sight.x.service.SightService/FinalizeEpisode', - request_serializer=sight__service_dot_proto_dot_service__pb2.FinalizeEpisodeRequest.SerializeToString, - response_deserializer=sight__service_dot_proto_dot_service__pb2.FinalizeEpisodeResponse.FromString, - ) + self.Test = channel.unary_unary( + '/sight.x.service.SightService/Test', + request_serializer=sight__service_dot_proto_dot_service__pb2. + TestRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + TestResponse.FromString, + ) + self.Create = channel.unary_unary( + '/sight.x.service.SightService/Create', + request_serializer=sight__service_dot_proto_dot_service__pb2. + CreateRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + CreateResponse.FromString, + ) + self.Close = channel.unary_unary( + '/sight.x.service.SightService/Close', + request_serializer=sight__service_dot_proto_dot_service__pb2. + CloseRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + CloseResponse.FromString, + ) + self.WorkerAlive = channel.unary_unary( + '/sight.x.service.SightService/WorkerAlive', + request_serializer=sight__service_dot_proto_dot_service__pb2. + WorkerAliveRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + WorkerAliveResponse.FromString, + ) + self.Launch = channel.unary_unary( + '/sight.x.service.SightService/Launch', + request_serializer=sight__service_dot_proto_dot_service__pb2. + LaunchRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + LaunchResponse.FromString, + ) + self.DecisionPoint = channel.unary_unary( + '/sight.x.service.SightService/DecisionPoint', + request_serializer=sight__service_dot_proto_dot_service__pb2. + DecisionPointRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + DecisionPointResponse.FromString, + ) + self.Tell = channel.unary_unary( + '/sight.x.service.SightService/Tell', + request_serializer=sight__service_dot_proto_dot_service__pb2. + TellRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + TellResponse.FromString, + ) + self.Listen = channel.unary_unary( + '/sight.x.service.SightService/Listen', + request_serializer=sight__service_dot_proto_dot_service__pb2. + ListenRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + ListenResponse.FromString, + ) + self.CurrentStatus = channel.unary_unary( + '/sight.x.service.SightService/CurrentStatus', + request_serializer=sight__service_dot_proto_dot_service__pb2. + CurrentStatusRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + CurrentStatusResponse.FromString, + ) + self.FetchOptimalAction = channel.unary_unary( + '/sight.x.service.SightService/FetchOptimalAction', + request_serializer=sight__service_dot_proto_dot_service__pb2. + FetchOptimalActionRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + FetchOptimalActionResponse.FromString, + ) + self.ProposeAction = channel.unary_unary( + '/sight.x.service.SightService/ProposeAction', + request_serializer=sight__service_dot_proto_dot_service__pb2. + ProposeActionRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + ProposeActionResponse.FromString, + ) + self.GetOutcome = channel.unary_unary( + '/sight.x.service.SightService/GetOutcome', + request_serializer=sight__service_dot_proto_dot_service__pb2. + GetOutcomeRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + GetOutcomeResponse.FromString, + ) + self.FinalizeEpisode = channel.unary_unary( + '/sight.x.service.SightService/FinalizeEpisode', + request_serializer=sight__service_dot_proto_dot_service__pb2. + FinalizeEpisodeRequest.SerializeToString, + response_deserializer=sight__service_dot_proto_dot_service__pb2. + FinalizeEpisodeResponse.FromString, + ) class SightServiceServicer(object): - """This API manages Sight logs, their creation and access to them. + """This API manages Sight logs, their creation and access to them. """ - def Test(self, request, context): - """A test request to validate that the service is up. + def Test(self, request, context): + """A test request to validate that the service is up. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') - def Create(self, request, context): - """rpc PrintInsertionTime(TestRequest) returns (TestResponse) {} + def Create(self, request, context): + """rpc PrintInsertionTime(TestRequest) returns (TestResponse) {} Creates a new Sight log. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Close(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def WorkerAlive(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Launch(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DecisionPoint(self, request, context): - """rpc GetWeights(GetWeightsRequest) returns (GetWeightsResponse) {} + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Close(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def WorkerAlive(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Launch(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DecisionPoint(self, request, context): + """rpc GetWeights(GetWeightsRequest) returns (GetWeightsResponse) {} """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') - def Tell(self, request, context): - """rpc DecisionOutcome(DecisionOutcomeRequest) + def Tell(self, request, context): + """rpc DecisionOutcome(DecisionOutcomeRequest) returns (DecisionOutcomeResponse) {} rpc CopyDataToReplayServer(CopyDataToReplayServerRequest) returns (CopyDataToReplayServerResponse) {} """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Listen(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def CurrentStatus(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def FetchOptimalAction(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ProposeAction(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetOutcome(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def FinalizeEpisode(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Listen(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CurrentStatus(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def FetchOptimalAction(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ProposeAction(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetOutcome(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def FinalizeEpisode(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') def add_SightServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Test': grpc.unary_unary_rpc_method_handler( - servicer.Test, - request_deserializer=sight__service_dot_proto_dot_service__pb2.TestRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.TestResponse.SerializeToString, - ), - 'Create': grpc.unary_unary_rpc_method_handler( - servicer.Create, - request_deserializer=sight__service_dot_proto_dot_service__pb2.CreateRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.CreateResponse.SerializeToString, - ), - 'Close': grpc.unary_unary_rpc_method_handler( - servicer.Close, - request_deserializer=sight__service_dot_proto_dot_service__pb2.CloseRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.CloseResponse.SerializeToString, - ), - 'WorkerAlive': grpc.unary_unary_rpc_method_handler( - servicer.WorkerAlive, - request_deserializer=sight__service_dot_proto_dot_service__pb2.WorkerAliveRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.WorkerAliveResponse.SerializeToString, - ), - 'Launch': grpc.unary_unary_rpc_method_handler( - servicer.Launch, - request_deserializer=sight__service_dot_proto_dot_service__pb2.LaunchRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.LaunchResponse.SerializeToString, - ), - 'DecisionPoint': grpc.unary_unary_rpc_method_handler( - servicer.DecisionPoint, - request_deserializer=sight__service_dot_proto_dot_service__pb2.DecisionPointRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.DecisionPointResponse.SerializeToString, - ), - 'Tell': grpc.unary_unary_rpc_method_handler( - servicer.Tell, - request_deserializer=sight__service_dot_proto_dot_service__pb2.TellRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.TellResponse.SerializeToString, - ), - 'Listen': grpc.unary_unary_rpc_method_handler( - servicer.Listen, - request_deserializer=sight__service_dot_proto_dot_service__pb2.ListenRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.ListenResponse.SerializeToString, - ), - 'CurrentStatus': grpc.unary_unary_rpc_method_handler( - servicer.CurrentStatus, - request_deserializer=sight__service_dot_proto_dot_service__pb2.CurrentStatusRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.CurrentStatusResponse.SerializeToString, - ), - 'FetchOptimalAction': grpc.unary_unary_rpc_method_handler( - servicer.FetchOptimalAction, - request_deserializer=sight__service_dot_proto_dot_service__pb2.FetchOptimalActionRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.FetchOptimalActionResponse.SerializeToString, - ), - 'ProposeAction': grpc.unary_unary_rpc_method_handler( - servicer.ProposeAction, - request_deserializer=sight__service_dot_proto_dot_service__pb2.ProposeActionRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.ProposeActionResponse.SerializeToString, - ), - 'GetOutcome': grpc.unary_unary_rpc_method_handler( - servicer.GetOutcome, - request_deserializer=sight__service_dot_proto_dot_service__pb2.GetOutcomeRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.GetOutcomeResponse.SerializeToString, - ), - 'FinalizeEpisode': grpc.unary_unary_rpc_method_handler( - servicer.FinalizeEpisode, - request_deserializer=sight__service_dot_proto_dot_service__pb2.FinalizeEpisodeRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2.FinalizeEpisodeResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'sight.x.service.SightService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - # This class is part of an EXPERIMENTAL API. + rpc_method_handlers = { + 'Test': + grpc.unary_unary_rpc_method_handler( + servicer.Test, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + TestRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + TestResponse.SerializeToString, + ), + 'Create': + grpc.unary_unary_rpc_method_handler( + servicer.Create, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + CreateRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + CreateResponse.SerializeToString, + ), + 'Close': + grpc.unary_unary_rpc_method_handler( + servicer.Close, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + CloseRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + CloseResponse.SerializeToString, + ), + 'WorkerAlive': + grpc.unary_unary_rpc_method_handler( + servicer.WorkerAlive, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + WorkerAliveRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + WorkerAliveResponse.SerializeToString, + ), + 'Launch': + grpc.unary_unary_rpc_method_handler( + servicer.Launch, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + LaunchRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + LaunchResponse.SerializeToString, + ), + 'DecisionPoint': + grpc.unary_unary_rpc_method_handler( + servicer.DecisionPoint, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + DecisionPointRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + DecisionPointResponse.SerializeToString, + ), + 'Tell': + grpc.unary_unary_rpc_method_handler( + servicer.Tell, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + TellRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + TellResponse.SerializeToString, + ), + 'Listen': + grpc.unary_unary_rpc_method_handler( + servicer.Listen, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + ListenRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + ListenResponse.SerializeToString, + ), + 'CurrentStatus': + grpc.unary_unary_rpc_method_handler( + servicer.CurrentStatus, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + CurrentStatusRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + CurrentStatusResponse.SerializeToString, + ), + 'FetchOptimalAction': + grpc.unary_unary_rpc_method_handler( + servicer.FetchOptimalAction, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + FetchOptimalActionRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + FetchOptimalActionResponse.SerializeToString, + ), + 'ProposeAction': + grpc.unary_unary_rpc_method_handler( + servicer.ProposeAction, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + ProposeActionRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + ProposeActionResponse.SerializeToString, + ), + 'GetOutcome': + grpc.unary_unary_rpc_method_handler( + servicer.GetOutcome, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + GetOutcomeRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + GetOutcomeResponse.SerializeToString, + ), + 'FinalizeEpisode': + grpc.unary_unary_rpc_method_handler( + servicer.FinalizeEpisode, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + FinalizeEpisodeRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + FinalizeEpisodeResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'sight.x.service.SightService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +# This class is part of an EXPERIMENTAL API. class SightService(object): - """This API manages Sight logs, their creation and access to them. + """This API manages Sight logs, their creation and access to them. """ - @staticmethod - def Test(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/Test', - sight__service_dot_proto_dot_service__pb2.TestRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.TestResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def Create(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/Create', - sight__service_dot_proto_dot_service__pb2.CreateRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.CreateResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def Close(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/Close', - sight__service_dot_proto_dot_service__pb2.CloseRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.CloseResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def WorkerAlive(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/WorkerAlive', - sight__service_dot_proto_dot_service__pb2.WorkerAliveRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.WorkerAliveResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def Launch(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/Launch', - sight__service_dot_proto_dot_service__pb2.LaunchRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.LaunchResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def DecisionPoint(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/DecisionPoint', - sight__service_dot_proto_dot_service__pb2.DecisionPointRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.DecisionPointResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def Tell(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/Tell', - sight__service_dot_proto_dot_service__pb2.TellRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.TellResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def Listen(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/Listen', - sight__service_dot_proto_dot_service__pb2.ListenRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.ListenResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def CurrentStatus(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/CurrentStatus', - sight__service_dot_proto_dot_service__pb2.CurrentStatusRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.CurrentStatusResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def FetchOptimalAction(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/FetchOptimalAction', - sight__service_dot_proto_dot_service__pb2.FetchOptimalActionRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.FetchOptimalActionResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def ProposeAction(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/ProposeAction', - sight__service_dot_proto_dot_service__pb2.ProposeActionRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.ProposeActionResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def GetOutcome(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/GetOutcome', - sight__service_dot_proto_dot_service__pb2.GetOutcomeRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.GetOutcomeResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def FinalizeEpisode(request, + @staticmethod + def Test(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/Test', + sight__service_dot_proto_dot_service__pb2.TestRequest.SerializeToString, + sight__service_dot_proto_dot_service__pb2.TestResponse.FromString, + options, channel_credentials, insecure, call_credentials, compression, + wait_for_ready, timeout, metadata) + + @staticmethod + def Create(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/Create', + sight__service_dot_proto_dot_service__pb2.CreateRequest. + SerializeToString, + sight__service_dot_proto_dot_service__pb2.CreateResponse.FromString, + options, channel_credentials, insecure, call_credentials, compression, + wait_for_ready, timeout, metadata) + + @staticmethod + def Close(request, target, options=(), channel_credentials=None, @@ -465,8 +364,198 @@ def FinalizeEpisode(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/sight.x.service.SightService/FinalizeEpisode', - sight__service_dot_proto_dot_service__pb2.FinalizeEpisodeRequest.SerializeToString, - sight__service_dot_proto_dot_service__pb2.FinalizeEpisodeResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/Close', + sight__service_dot_proto_dot_service__pb2.CloseRequest. + SerializeToString, + sight__service_dot_proto_dot_service__pb2.CloseResponse.FromString, + options, channel_credentials, insecure, call_credentials, compression, + wait_for_ready, timeout, metadata) + + @staticmethod + def WorkerAlive(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/WorkerAlive', + sight__service_dot_proto_dot_service__pb2.WorkerAliveRequest. + SerializeToString, sight__service_dot_proto_dot_service__pb2. + WorkerAliveResponse.FromString, options, channel_credentials, insecure, + call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Launch(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/Launch', + sight__service_dot_proto_dot_service__pb2.LaunchRequest. + SerializeToString, + sight__service_dot_proto_dot_service__pb2.LaunchResponse.FromString, + options, channel_credentials, insecure, call_credentials, compression, + wait_for_ready, timeout, metadata) + + @staticmethod + def DecisionPoint(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/DecisionPoint', + sight__service_dot_proto_dot_service__pb2.DecisionPointRequest. + SerializeToString, sight__service_dot_proto_dot_service__pb2. + DecisionPointResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, + metadata) + + @staticmethod + def Tell(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/Tell', + sight__service_dot_proto_dot_service__pb2.TellRequest.SerializeToString, + sight__service_dot_proto_dot_service__pb2.TellResponse.FromString, + options, channel_credentials, insecure, call_credentials, compression, + wait_for_ready, timeout, metadata) + + @staticmethod + def Listen(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/Listen', + sight__service_dot_proto_dot_service__pb2.ListenRequest. + SerializeToString, + sight__service_dot_proto_dot_service__pb2.ListenResponse.FromString, + options, channel_credentials, insecure, call_credentials, compression, + wait_for_ready, timeout, metadata) + + @staticmethod + def CurrentStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/CurrentStatus', + sight__service_dot_proto_dot_service__pb2.CurrentStatusRequest. + SerializeToString, sight__service_dot_proto_dot_service__pb2. + CurrentStatusResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, + metadata) + + @staticmethod + def FetchOptimalAction(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/FetchOptimalAction', + sight__service_dot_proto_dot_service__pb2.FetchOptimalActionRequest. + SerializeToString, sight__service_dot_proto_dot_service__pb2. + FetchOptimalActionResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, + metadata) + + @staticmethod + def ProposeAction(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/ProposeAction', + sight__service_dot_proto_dot_service__pb2.ProposeActionRequest. + SerializeToString, sight__service_dot_proto_dot_service__pb2. + ProposeActionResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, + metadata) + + @staticmethod + def GetOutcome(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/GetOutcome', + sight__service_dot_proto_dot_service__pb2.GetOutcomeRequest. + SerializeToString, + sight__service_dot_proto_dot_service__pb2.GetOutcomeResponse.FromString, + options, channel_credentials, insecure, call_credentials, compression, + wait_for_ready, timeout, metadata) + + @staticmethod + def FinalizeEpisode(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, target, '/sight.x.service.SightService/FinalizeEpisode', + sight__service_dot_proto_dot_service__pb2.FinalizeEpisodeRequest. + SerializeToString, sight__service_dot_proto_dot_service__pb2. + FinalizeEpisodeResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, + metadata) diff --git a/sight_service/sensitivity_analysis.py b/sight_service/sensitivity_analysis.py index 652bd3b..1ab398b 100644 --- a/sight_service/sensitivity_analysis.py +++ b/sight_service/sensitivity_analysis.py @@ -13,191 +13,182 @@ # limitations under the License. """Sensitivity analysis of Sight applications.""" -from helpers.logs.logs_handler import logger as logging import random +import threading from typing import Any, Dict, List, Tuple + +from helpers.logs.logs_handler import logger as logging from overrides import overrides from sight_service.optimizer_instance import OptimizerInstance from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 -import threading _file_name = 'sensitivity_analysis.py' class SensitivityAnalysis(OptimizerInstance): - """Exhaustively searches over all the possible values of the action attributes. + """Exhaustively searches over all the possible values of the action attributes. Attributes: possible_values: Maps each action attributes to the list of possible values of this attribute. """ - def __init__(self): - super().__init__() - self.num_samples_issued = 0 - self.active_samples = {} - self.complete_samples = {} - self.possible_values = {} - self._lock = threading.RLock() - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - method_name = 'launch' - logging.debug('>>>> In %s of %s', method_name, _file_name) - - response = super(SensitivityAnalysis, self).launch(request) - self.possible_values = {} - for i, key in enumerate(sorted(self.actions.keys())): - if self.actions[key].valid_float_values: - self.possible_values[key] = list( - self.actions[key].valid_float_values) - elif self.actions[key].step_size: - self.possible_values[key] = [] - cur = self.actions[key].min_value - while cur <= self.actions[key].max_value: - self.possible_values[key].append(cur) - cur += self.actions[key].step_size - - logging.info('possible_values=%s', self.possible_values) - response.display_string = 'Sensitivity Analysis!' - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return response - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - method_name = 'decision_point' - logging.debug('>>>> In %s of %s', method_name, _file_name) - - next_action = {} - for i, key in enumerate(self.actions): - if key in self.possible_values: - print('selecting from possible values') - next_action[key] = self.possible_values[key][random.randint( - 0, - len(self.possible_values[key]) - 1)] - elif self.actions[key].HasField('continuous_prob_dist'): - rand_val = random.gauss( - self.actions[key].continuous_prob_dist.gaussian.mean, - self.actions[key].continuous_prob_dist.gaussian.stdev) - print( - 'self.actions[key].continuous_prob_dist=%s, rand_val=%s' % - (self.actions[key].continuous_prob_dist, rand_val)) - if rand_val < self.actions[key].min_value: - rand_val = self.actions[key].min_value - elif rand_val > self.actions[key].max_value: - rand_val = self.actions[key].max_value - next_action[key] = rand_val - if self.actions[key].continuous_prob_dist.HasField('gaussian'): - rand_val = random.gauss( - self.actions[key].continuous_prob_dist.gaussian.mean, - self.actions[key].continuous_prob_dist.gaussian.stdev) - print( - 'self.actions[key].continuous_prob_dist=%s, rand_val=%s' - % (self.actions[key].continuous_prob_dist, rand_val)) - if rand_val < self.actions[key].min_value: - rand_val = self.actions[key].min_value - elif rand_val > self.actions[key].max_value: - rand_val = self.actions[key].max_value - next_action[key] = rand_val - elif self.actions[key].continuous_prob_dist.HasField( - 'uniform'): - rand_val = random.uniform( - self.actions[key].continuous_prob_dist.uniform.min_val, - self.actions[key].continuous_prob_dist.uniform.max_val) - print( - 'self.actions[key].continuous_prob_dist=%s, rand_val=%s' - % (self.actions[key].continuous_prob_dist, rand_val)) - next_action[key] = rand_val - else: - raise ValueError( - 'Only support Gaussian continuous distribution.') - elif self.actions[key].HasField('discrete_prob_dist'): - if self.actions[key].discrete_prob_dist.HasField('uniform'): - rand_val = random.randint( - self.actions[key].discrete_prob_dist.uniform.min_val, - self.actions[key].discrete_prob_dist.uniform.max_val) - print( - 'self.actions[key].discrete_prob_dist=%s, rand_val=%s' - % (self.actions[key].discrete_prob_dist, rand_val)) - next_action[key] = rand_val - else: - raise ValueError( - 'Only support Uniform discrete distribution.') - else: - print('selecting from random.uniform') - next_action[key] = random.uniform(self.actions[key].min_value, - self.actions[key].max_value) - - self._lock.acquire() - self.active_samples[request.worker_id] = { - 'action': next_action, - 'sample_num': self.num_samples_issued, + def __init__(self): + super().__init__() + self.num_samples_issued = 0 + self.active_samples = {} + self.complete_samples = {} + self.possible_values = {} + self._lock = threading.RLock() + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + method_name = 'launch' + logging.debug('>>>> In %s of %s', method_name, _file_name) + + response = super(SensitivityAnalysis, self).launch(request) + self.possible_values = {} + for i, key in enumerate(sorted(self.actions.keys())): + if self.actions[key].valid_float_values: + self.possible_values[key] = list(self.actions[key].valid_float_values) + elif self.actions[key].step_size: + self.possible_values[key] = [] + cur = self.actions[key].min_value + while cur <= self.actions[key].max_value: + self.possible_values[key].append(cur) + cur += self.actions[key].step_size + + logging.info('possible_values=%s', self.possible_values) + response.display_string = 'Sensitivity Analysis!' + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return response + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + method_name = 'decision_point' + logging.debug('>>>> In %s of %s', method_name, _file_name) + + next_action = {} + for i, key in enumerate(self.actions): + if key in self.possible_values: + print('selecting from possible values') + next_action[key] = self.possible_values[key][random.randint( + 0, + len(self.possible_values[key]) - 1)] + elif self.actions[key].HasField('continuous_prob_dist'): + rand_val = random.gauss( + self.actions[key].continuous_prob_dist.gaussian.mean, + self.actions[key].continuous_prob_dist.gaussian.stdev) + print('self.actions[key].continuous_prob_dist=%s, rand_val=%s' % + (self.actions[key].continuous_prob_dist, rand_val)) + if rand_val < self.actions[key].min_value: + rand_val = self.actions[key].min_value + elif rand_val > self.actions[key].max_value: + rand_val = self.actions[key].max_value + next_action[key] = rand_val + if self.actions[key].continuous_prob_dist.HasField('gaussian'): + rand_val = random.gauss( + self.actions[key].continuous_prob_dist.gaussian.mean, + self.actions[key].continuous_prob_dist.gaussian.stdev) + print('self.actions[key].continuous_prob_dist=%s, rand_val=%s' % + (self.actions[key].continuous_prob_dist, rand_val)) + if rand_val < self.actions[key].min_value: + rand_val = self.actions[key].min_value + elif rand_val > self.actions[key].max_value: + rand_val = self.actions[key].max_value + next_action[key] = rand_val + elif self.actions[key].continuous_prob_dist.HasField('uniform'): + rand_val = random.uniform( + self.actions[key].continuous_prob_dist.uniform.min_val, + self.actions[key].continuous_prob_dist.uniform.max_val) + print('self.actions[key].continuous_prob_dist=%s, rand_val=%s' % + (self.actions[key].continuous_prob_dist, rand_val)) + next_action[key] = rand_val + else: + raise ValueError('Only support Gaussian continuous distribution.') + elif self.actions[key].HasField('discrete_prob_dist'): + if self.actions[key].discrete_prob_dist.HasField('uniform'): + rand_val = random.randint( + self.actions[key].discrete_prob_dist.uniform.min_val, + self.actions[key].discrete_prob_dist.uniform.max_val) + print('self.actions[key].discrete_prob_dist=%s, rand_val=%s' % + (self.actions[key].discrete_prob_dist, rand_val)) + next_action[key] = rand_val + else: + raise ValueError('Only support Uniform discrete distribution.') + else: + print('selecting from random.uniform') + next_action[key] = random.uniform(self.actions[key].min_value, + self.actions[key].max_value) + + self._lock.acquire() + self.active_samples[request.worker_id] = { + 'action': next_action, + 'sample_num': self.num_samples_issued, + } + self.num_samples_issued += 1 + self._lock.release() + + logging.info('next_action=%s', next_action) + dp_response = service_pb2.DecisionPointdp_response() + dp_response.action.extend(param_dict_to_proto(next_action)) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + method_name = 'finalize_episode' + logging.debug('>>>> In %s of %s', method_name, _file_name) + # logging.info('Running for exhaustive search....') + + self._lock.acquire() + # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) + self.complete_samples[self.active_samples[ + request.worker_id]['sample_num']] = { + 'outcome': request.decision_outcome.outcome_value, + 'action': self.active_samples[request.worker_id]['action'], } - self.num_samples_issued += 1 - self._lock.release() - - logging.info('next_action=%s', next_action) - dp_response = service_pb2.DecisionPointdp_response() - dp_response.action.extend(param_dict_to_proto(next_action)) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - method_name = 'finalize_episode' - logging.debug('>>>> In %s of %s', method_name, _file_name) - # logging.info('Running for exhaustive search....') - - self._lock.acquire() - # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) - self.complete_samples[self.active_samples[ - request.worker_id]['sample_num']] = { - 'outcome': request.decision_outcome.outcome_value, - 'action': self.active_samples[request.worker_id]['action'], - } - del self.active_samples[request.worker_id] - self._lock.release() - - # logging.info('FinalizeEpisode active_samples=%s' % self.active_samples) - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - method_name = 'current_status' - logging.debug('>>>> In %s of %s', method_name, _file_name) - response = ('[SensitivityAnalysis:\n') - response += f' #active_samples={len(self.active_samples)}\n' - response += ' completed_samples=\n' - response += 'sample_num, ' + ', '.join(list( - self.actions)) + ', outcome\n' - - cur = [0] * len(self.actions) - keys = sorted(self.actions.keys()) - logging.info('self.complete_samples=%s', self.complete_samples) - for s in sorted(self.complete_samples.items(), - key=lambda x: x[1]['outcome'], - reverse=True): - response += str(s[0]) + ', ' - response += ', '.join([str(s[1]['action'][key]) for key in keys]) - response += ', ' + str(s[1]['outcome']) + '\n' - response += ']' - logging.debug('<<<< Out %s of %s', method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=response) - - @overrides - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - method_name = 'fetch_optimal_action' - return service_pb2.CurrentStatusResponse(response_str='') + del self.active_samples[request.worker_id] + self._lock.release() + + # logging.info('FinalizeEpisode active_samples=%s' % self.active_samples) + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + method_name = 'current_status' + logging.debug('>>>> In %s of %s', method_name, _file_name) + response = ('[SensitivityAnalysis:\n') + response += f' #active_samples={len(self.active_samples)}\n' + response += ' completed_samples=\n' + response += 'sample_num, ' + ', '.join(list(self.actions)) + ', outcome\n' + + cur = [0] * len(self.actions) + keys = sorted(self.actions.keys()) + logging.info('self.complete_samples=%s', self.complete_samples) + for s in sorted(self.complete_samples.items(), + key=lambda x: x[1]['outcome'], + reverse=True): + response += str(s[0]) + ', ' + response += ', '.join([str(s[1]['action'][key]) for key in keys]) + response += ', ' + str(s[1]['outcome']) + '\n' + response += ']' + logging.debug('<<<< Out %s of %s', method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=response) + + @overrides + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + method_name = 'fetch_optimal_action' + return service_pb2.CurrentStatusResponse(response_str='') diff --git a/sight_service/service_root.py b/sight_service/service_root.py index 37b54da..ab6b3bf 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -15,7 +15,7 @@ def warn(*args, **kwargs): - pass + pass import warnings @@ -25,23 +25,23 @@ def warn(*args, **kwargs): from concurrent import futures # from helpers.logs.logs_handler import logger as logging import logging + from absl import app from absl import flags - -import grpc from dotenv import load_dotenv +import grpc load_dotenv() import os -from overrides import overrides -import uuid - -from typing import Any, Dict, List, Tuple import time +from typing import Any, Dict, List, Tuple +import uuid -from sight_service import service_utils +from overrides import overrides +from readerwriterlock import rwlock from sight.proto import sight_pb2 +from sight_service import service_utils # from sight_service.acme_optimizer import Acme from sight_service.bayesian_opt import BayesianOpt from sight_service.exhaustive_search import ExhaustiveSearch @@ -55,7 +55,6 @@ def warn(*args, **kwargs): from sight_service.smc_py import SMCPy from sight_service.vizier import Vizier from sight_service.worklist_scheduler_opt import WorklistScheduler -from readerwriterlock import rwlock _PORT = flags.DEFINE_integer('port', 8080, 'The port to listen on') _file_name = "service_root.py" @@ -68,294 +67,262 @@ def warn(*args, **kwargs): def generate_unique_number() -> int: - return uuid.uuid4().int & (1 << 63) - 1 + return uuid.uuid4().int & (1 << 63) - 1 def calculate_resolve_time(start_time): - method_name = "calculate_resolve_time" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - resolve_time = time.time() - start_time - _resolve_times.append(resolve_time) - avg_resolve_time = sum(_resolve_times) / len(_resolve_times) - logging.info( - " logging.info : Average Resolve Time From Server: %s seconds", - round(avg_resolve_time, 4)) - logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) + method_name = "calculate_resolve_time" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + resolve_time = time.time() - start_time + _resolve_times.append(resolve_time) + avg_resolve_time = sum(_resolve_times) / len(_resolve_times) + logging.info(" logging.info : Average Resolve Time From Server: %s seconds", + round(avg_resolve_time, 4)) + logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) class Optimizers: - """ + """ Optimizer class to create request specific optimizer and use the methods provided in those to work with future requests. """ - def __init__(self): - self.instances: Dict[str, OptimizerInstance] = {} - self.instances_lock = rwlock.RWLockFair() + def __init__(self): + self.instances: Dict[str, OptimizerInstance] = {} + self.instances_lock = rwlock.RWLockFair() - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - """Creates more specific optimizer and use them while responding to clients accordingly. + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + """Creates more specific optimizer and use them while responding to clients accordingly. """ - method_name = "launch" - logging.info(">>>>>>> In %s method of %s file.", method_name, - "Optimizers") - - optimizer_type = request.decision_config_params.optimizer_type - logging.debug(">>>>>>> In %s method of %s file. optimizer_type=%s", - method_name, _file_name, optimizer_type) - with self.instances_lock.gen_wlock(): - if optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER: - self.instances[request.client_id] = Vizier() - return self.instances[request.client_id].launch(request) - elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_GENETIC_ALGORITHM: - self.instances[request.client_id] = GeneticAlgorithm() - return self.instances[request.client_id].launch(request) - elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_EXHAUSTIVE_SEARCH: - self.instances[request.client_id] = ExhaustiveSearch() - return self.instances[request.client_id].launch(request) - # elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_ACME: - # self.instances[request.client_id] = Acme() - # obj = self.instances[request.client_id].launch(request) - # # logging.info("self of optimizers class: %s", str(self.__dict__)) - # return obj - elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_LLM: - self.instances[request.client_id] = LLM() - obj = self.instances[request.client_id].launch(request) - return obj - elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_BAYESIAN_OPT: - self.instances[request.client_id] = BayesianOpt() - obj = self.instances[request.client_id].launch(request) - return obj - elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SENSITIVITY_ANALYSIS: - self.instances[request.client_id] = SensitivityAnalysis() - obj = self.instances[request.client_id].launch(request) - return obj - elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_NEVER_GRAD: - self.instances[request.client_id] = NeverGradOpt() - obj = self.instances[request.client_id].launch(request) - return obj - elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SMC_PY: - self.instances[request.client_id] = SMCPy() - obj = self.instances[request.client_id].launch(request) - return obj - elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER: - self.instances[request.client_id] = WorklistScheduler() - obj = self.instances[request.client_id].launch(request) - return obj - else: - return service_pb2.LaunchResponse( - display_string=f"OPTIMIZER '{optimizer_type}' NOT VALID!!") - logging.info("<<<<<< Out %s method of %s file.", method_name, - _file_name) - - def get_instance(self, client_id: str) -> OptimizerInstance: - # method_name = "get_instance" - # logging.debug(">>>>>>> In %s method of %s file.", method_name, _file_name) - with self.instances_lock.gen_rlock(): - if (client_id in self.instances): - instance_obj = self.instances[client_id] - return instance_obj - else: - #add better mechanism, this require in close rpc for now - return None - # logging.debug("<<<<<< Out %s method of %s file.", method_name, _file_name) + method_name = "launch" + logging.info(">>>>>>> In %s method of %s file.", method_name, "Optimizers") + + optimizer_type = request.decision_config_params.optimizer_type + logging.debug(">>>>>>> In %s method of %s file. optimizer_type=%s", + method_name, _file_name, optimizer_type) + with self.instances_lock.gen_wlock(): + if optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER: + self.instances[request.client_id] = Vizier() + return self.instances[request.client_id].launch(request) + elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_GENETIC_ALGORITHM: + self.instances[request.client_id] = GeneticAlgorithm() + return self.instances[request.client_id].launch(request) + elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_EXHAUSTIVE_SEARCH: + self.instances[request.client_id] = ExhaustiveSearch() + return self.instances[request.client_id].launch(request) + # elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_ACME: + # self.instances[request.client_id] = Acme() + # obj = self.instances[request.client_id].launch(request) + # # logging.info("self of optimizers class: %s", str(self.__dict__)) + # return obj + elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_LLM: + self.instances[request.client_id] = LLM() + obj = self.instances[request.client_id].launch(request) + return obj + elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_BAYESIAN_OPT: + self.instances[request.client_id] = BayesianOpt() + obj = self.instances[request.client_id].launch(request) + return obj + elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SENSITIVITY_ANALYSIS: + self.instances[request.client_id] = SensitivityAnalysis() + obj = self.instances[request.client_id].launch(request) + return obj + elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_NEVER_GRAD: + self.instances[request.client_id] = NeverGradOpt() + obj = self.instances[request.client_id].launch(request) + return obj + elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SMC_PY: + self.instances[request.client_id] = SMCPy() + obj = self.instances[request.client_id].launch(request) + return obj + elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER: + self.instances[request.client_id] = WorklistScheduler() + obj = self.instances[request.client_id].launch(request) + return obj + else: + return service_pb2.LaunchResponse( + display_string=f"OPTIMIZER '{optimizer_type}' NOT VALID!!") + logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) + + def get_instance(self, client_id: str) -> OptimizerInstance: + # method_name = "get_instance" + # logging.debug(">>>>>>> In %s method of %s file.", method_name, _file_name) + with self.instances_lock.gen_rlock(): + if (client_id in self.instances): + instance_obj = self.instances[client_id] + return instance_obj + else: + #add better mechanism, this require in close rpc for now + return None + # logging.debug("<<<<<< Out %s method of %s file.", method_name, _file_name) class SightService(service_pb2_grpc.SightServiceServicer): - """Service class to handle the grpc request send via sight client. + """Service class to handle the grpc request send via sight client. """ - def __init__(self): - super().__init__() - self.optimizers = Optimizers() - - def Test(self, request, context): - method_name = "Test" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) - obj = service_pb2.TestResponse(val="222") - logging.info("<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + def __init__(self): + super().__init__() + self.optimizers = Optimizers() - # def GetWeights(self, request, context): - # method_name = "GetWeights" - # logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - # start_time = time.time() - # obj = self.optimizers.get_instance(request.client_id).get_weights(request) - # # calculate_resolve_time(start_time) - # logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) - # return obj - - def DecisionPoint(self, request, context): - method_name = "DecisionPoint" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) - start_time = time.time() - obj = self.optimizers.get_instance( - request.client_id).decision_point(request) - # calculate_resolve_time(start_time) - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + def Test(self, request, context): + method_name = "Test" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + obj = service_pb2.TestResponse(val="222") + logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj + + # def GetWeights(self, request, context): + # method_name = "GetWeights" + # logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + # start_time = time.time() + # obj = self.optimizers.get_instance(request.client_id).get_weights(request) + # # calculate_resolve_time(start_time) + # logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) + # return obj + + def DecisionPoint(self, request, context): + method_name = "DecisionPoint" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + start_time = time.time() + obj = self.optimizers.get_instance( + request.client_id).decision_point(request) + # calculate_resolve_time(start_time) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj - def Tell(self, request, context): - method_name = "Tell" - logging.debug(">>>>>>> In %s method of %s file.", method_name, - _file_name) - - return self.optimizers.get_instance(request.client_id).tell(request) - logging.debug("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - - def Listen(self, request, context): - method_name = "Listen" - logging.debug(">>>>>>> In %s method of %s file.", method_name, - _file_name) - - return self.optimizers.get_instance(request.client_id).listen(request) - logging.debug("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - - def CurrentStatus(self, request, context): - method_name = "CurrentStatus" - # logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - - return self.optimizers.get_instance( - request.client_id).current_status(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - - def FetchOptimalAction(self, request, context): - method_name = "FetchOptimalAction" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) - - obj = self.optimizers.get_instance( - request.client_id).fetch_optimal_action(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + def Tell(self, request, context): + method_name = "Tell" + logging.debug(">>>>>>> In %s method of %s file.", method_name, _file_name) - def ProposeAction(self, request, context): - method_name = "ProposeAction" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) + return self.optimizers.get_instance(request.client_id).tell(request) + logging.debug("<<<<<<< Out %s method of %s file.", method_name, _file_name) - obj = self.optimizers.get_instance( - request.client_id).propose_action(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + def Listen(self, request, context): + method_name = "Listen" + logging.debug(">>>>>>> In %s method of %s file.", method_name, _file_name) - def GetOutcome(self, request, context): - method_name = "GetOutcome" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) + return self.optimizers.get_instance(request.client_id).listen(request) + logging.debug("<<<<<<< Out %s method of %s file.", method_name, _file_name) - obj = self.optimizers.get_instance( - request.client_id).GetOutcome(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + def CurrentStatus(self, request, context): + method_name = "CurrentStatus" + # logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - def FinalizeEpisode(self, request, context): - method_name = "FinalizeEpisode" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) + return self.optimizers.get_instance( + request.client_id).current_status(request) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) - obj = self.optimizers.get_instance( - request.client_id).finalize_episode(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + def FetchOptimalAction(self, request, context): + method_name = "FetchOptimalAction" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - def Launch(self, request, context): - method_name = "Launch" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) - # start_time = time.time() - obj = self.optimizers.launch(request) - # calculate_resolve_time(start_time) - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + obj = self.optimizers.get_instance( + request.client_id).fetch_optimal_action(request) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj - def Create(self, request, context): - method_name = "Create" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) - # start_time = time.time() - unique_id = generate_unique_number() - # calculate_resolve_time(start_time) - - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return service_pb2.CreateResponse(id=unique_id, path_prefix="/tmp/") - - def Close(self, request, context): - method_name = "Close" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) - - # only call if it's launch called, otherwise no entry of opt for that client - if (self.optimizers.get_instance(request.client_id)): - obj = self.optimizers.get_instance( - request.client_id).close(request) - else: - obj = service_pb2.CloseResponse() - - #? do we need to remove entry from optimizer dict, if available?? - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + def ProposeAction(self, request, context): + method_name = "ProposeAction" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - def WorkerAlive(self, request, context): - method_name = "WorkerAlive" - logging.info(">>>>>>> In %s method of %s file.", method_name, - _file_name) + obj = self.optimizers.get_instance( + request.client_id).propose_action(request) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj - obj = self.optimizers.get_instance( - request.client_id).WorkerAlive(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, - _file_name) - return obj + def GetOutcome(self, request, context): + method_name = "GetOutcome" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + obj = self.optimizers.get_instance(request.client_id).GetOutcome(request) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj -def serve(): - """Main method that listens on port 8080 and handle requests received from client. - """ - method_name = "serve" + def FinalizeEpisode(self, request, context): + method_name = "FinalizeEpisode" logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - server = grpc.server(futures.ThreadPoolExecutor(max_workers=500), - options=[ - ('grpc.max_receive_message_length', - 512 * 1024 * 1024), - ]) - service_pb2_grpc.add_SightServiceServicer_to_server(SightService(), server) - server.add_insecure_port(f"[::]:{_PORT.value}") - server.start() - logging.info(f"server is up and running on port : {_PORT.value}") - - # flask_app.run(debug=True, host="0.0.0.0", port=_PORT.value) - server.wait_for_termination() + obj = self.optimizers.get_instance( + request.client_id).finalize_episode(request) logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj + def Launch(self, request, context): + method_name = "Launch" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + # start_time = time.time() + obj = self.optimizers.launch(request) + # calculate_resolve_time(start_time) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj -def main(argv): - method_name = "__main__" - logging.basicConfig(level=logging.INFO) + def Create(self, request, context): + method_name = "Create" logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - try: - app.run(serve()) - except BaseException as e: - logging.error("Error occurred : ") - logging.error(e) + # start_time = time.time() + unique_id = generate_unique_number() + # calculate_resolve_time(start_time) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return service_pb2.CreateResponse(id=unique_id, path_prefix="/tmp/") + + def Close(self, request, context): + method_name = "Close" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + + # only call if it's launch called, otherwise no entry of opt for that client + if (self.optimizers.get_instance(request.client_id)): + obj = self.optimizers.get_instance(request.client_id).close(request) + else: + obj = service_pb2.CloseResponse() + + #? do we need to remove entry from optimizer dict, if available?? + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj + + def WorkerAlive(self, request, context): + method_name = "WorkerAlive" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + + obj = self.optimizers.get_instance(request.client_id).WorkerAlive(request) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj + + +def serve(): + """Main method that listens on port 8080 and handle requests received from client. + """ + method_name = "serve" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + + server = grpc.server(futures.ThreadPoolExecutor(max_workers=500), + options=[ + ('grpc.max_receive_message_length', + 512 * 1024 * 1024), + ]) + service_pb2_grpc.add_SightServiceServicer_to_server(SightService(), server) + server.add_insecure_port(f"[::]:{_PORT.value}") + server.start() + logging.info(f"server is up and running on port : {_PORT.value}") + + # flask_app.run(debug=True, host="0.0.0.0", port=_PORT.value) + server.wait_for_termination() + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + + +def main(argv): + method_name = "__main__" + logging.basicConfig(level=logging.INFO) + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + try: + app.run(serve()) + except BaseException as e: + logging.error("Error occurred : ") + logging.error(e) + logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) if __name__ == "__main__": - app.run(main) + app.run(main) diff --git a/sight_service/service_utils.py b/sight_service/service_utils.py index ccde91d..7dc2691 100644 --- a/sight_service/service_utils.py +++ b/sight_service/service_utils.py @@ -13,12 +13,13 @@ # limitations under the License. """Common resources used in the gRPC sight_service example.""" -import os import json +import os import time -from helpers.logs.logs_handler import logger as logging + from absl import flags from google.cloud import spanner +from helpers.logs.logs_handler import logger as logging OPERATION_TIMEOUT_SECONDS = 240 _file_name = "server_utils.py" @@ -26,185 +27,182 @@ def write_to_JSON(new_log_entry): - """Writes in the database locally as json file. + """Writes in the database locally as json file. Returns: """ - fname = "service/decision/decision_db.json" + fname = "service/decision/decision_db.json" - with open(fname, "r+") as sight_service_db_file: - # First we load existing data into a dict. - log_data = json.load(sight_service_db_file) - # Join new_data with log_data inside emp_details - log_data["log_details"].append(new_log_entry) - # Sets file's current position at offset. - sight_service_db_file.seek(0) - # convert back to json. - json.dump(log_data, sight_service_db_file, indent=4) + with open(fname, "r+") as sight_service_db_file: + # First we load existing data into a dict. + log_data = json.load(sight_service_db_file) + # Join new_data with log_data inside emp_details + log_data["log_details"].append(new_log_entry) + # Sets file's current position at offset. + sight_service_db_file.seek(0) + # convert back to json. + json.dump(log_data, sight_service_db_file, indent=4) def create_database(instance_id, database_id, log_table_id, study_table_id): - """Creates a database and tables for sample data.""" - - method_name = "create_database" - logging.debug(">>>> In %s of %s", method_name, _file_name) - spanner_client = spanner.Client(project=os.environ['PROJECT_ID']) - - instance = spanner_client.instance(instance_id) - if instance.exists(): - print("Instance with ID {} exists.".format(instance_id)) - else: - config_name = "{}/instanceConfigs/regional-us-central1".format( - spanner_client.project_name) - - instance = spanner_client.instance( - instance_id, - configuration_name=config_name, - display_name="Log Data", - node_count=1, - ) - - operation = instance.create() - - print("Waiting for operation to complete...") - operation.result(OPERATION_TIMEOUT_SECONDS) - - print("Created instance {}".format(instance_id)) - - database = instance.database(database_id) - if database.exists(): - print("Database with ID {} exists.".format(database_id)) - else: - operation = database.create() - print("Waiting for operation to complete...") - operation.result(OPERATION_TIMEOUT_SECONDS) - print("Created database {} on instance {}".format( - database_id, instance_id)) - - operation = database.update_ddl([ - """CREATE TABLE """ + log_table_id + """ ( + """Creates a database and tables for sample data.""" + + method_name = "create_database" + logging.debug(">>>> In %s of %s", method_name, _file_name) + spanner_client = spanner.Client(project=os.environ['PROJECT_ID']) + + instance = spanner_client.instance(instance_id) + if instance.exists(): + print("Instance with ID {} exists.".format(instance_id)) + else: + config_name = "{}/instanceConfigs/regional-us-central1".format( + spanner_client.project_name) + + instance = spanner_client.instance( + instance_id, + configuration_name=config_name, + display_name="Log Data", + node_count=1, + ) + + operation = instance.create() + + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + + print("Created instance {}".format(instance_id)) + + database = instance.database(database_id) + if database.exists(): + print("Database with ID {} exists.".format(database_id)) + else: + operation = database.create() + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + print("Created database {} on instance {}".format(database_id, instance_id)) + + operation = database.update_ddl([ + """CREATE TABLE """ + log_table_id + """ ( Id INT64 NOT NULL, LogFormat INT64, LogPathPrefix STRING(MAX), LogOwner STRING(MAX), LogLabel STRING(MAX) ) PRIMARY KEY (Id)""" - ]) - print("Waiting for operation to complete...") - operation.result(OPERATION_TIMEOUT_SECONDS) - print("Created {} table on database {} on instance {}".format( - log_table_id, database_id, instance_id)) - - operation = database.update_ddl([ - """CREATE TABLE """ + study_table_id + """ ( + ]) + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + print("Created {} table on database {} on instance {}".format( + log_table_id, database_id, instance_id)) + + operation = database.update_ddl([ + """CREATE TABLE """ + study_table_id + """ ( LogId INT64 NOT NULL, StudyName STRING(MAX) ) PRIMARY KEY (LogId)""" - ]) - print("Waiting for operation to complete...") - operation.result(OPERATION_TIMEOUT_SECONDS) - print("Created {} table on database {} on instance {}".format( - study_table_id, database_id, instance_id)) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + ]) + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + print("Created {} table on database {} on instance {}".format( + study_table_id, database_id, instance_id)) + logging.debug("<<<< Out %s of %s", method_name, _file_name) def Insert_In_StudyDetails_Table(study_details, instance_id, database_id, study_table_id): - """adds study details to table mapped to unique LogId.""" - method_name = "Insert_In_StudyDetails_Table" - logging.debug(">>>> In %s of %s", method_name, _file_name) + """adds study details to table mapped to unique LogId.""" + method_name = "Insert_In_StudyDetails_Table" + logging.debug(">>>> In %s of %s", method_name, _file_name) - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) - def insert_StudyDetails(transaction): - query = ( - f"INSERT {study_table_id} (LogId, StudyName) VALUES" - f" ({study_details['LogId']}, '{study_details['StudyName']}')") - # print("StudyDetail query : ", query) + def insert_StudyDetails(transaction): + query = (f"INSERT {study_table_id} (LogId, StudyName) VALUES" + f" ({study_details['LogId']}, '{study_details['StudyName']}')") + # print("StudyDetail query : ", query) - row_ct = transaction.execute_update(query) - print("{} record inserted to spanner table {}".format( - row_ct, study_table_id)) + row_ct = transaction.execute_update(query) + print("{} record inserted to spanner table {}".format( + row_ct, study_table_id)) - database.run_in_transaction(insert_StudyDetails) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + database.run_in_transaction(insert_StudyDetails) + logging.debug("<<<< Out %s of %s", method_name, _file_name) def Fetch_From_StudyDetails_Table(log_id, instance_id, database_id, study_table_id): - """fetch study name from table mapped to unique LogId.""" - method_name = "Fetch_From_StudyDetails_Table" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) - - with database.snapshot() as snapshot: - query = f"SELECT StudyName FROM {study_table_id} WHERE LogId = {log_id}" - results = snapshot.execute_sql(query) - - # print(results) - for row in results: - # print("For LogId : {} => StudyName: {}".format(log_id ,row[0])) - return row[0] - logging.debug("<<<< Out %s of %s", method_name, _file_name) + """fetch study name from table mapped to unique LogId.""" + method_name = "Fetch_From_StudyDetails_Table" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) + + with database.snapshot() as snapshot: + query = f"SELECT StudyName FROM {study_table_id} WHERE LogId = {log_id}" + results = snapshot.execute_sql(query) + + # print(results) + for row in results: + # print("For LogId : {} => StudyName: {}".format(log_id ,row[0])) + return row[0] + logging.debug("<<<< Out %s of %s", method_name, _file_name) def Insert_In_LogDetails_Table(new_log_entry, instance_id, database_id, table_id): - """Writes in the sight service database to spanner table. + """Writes in the sight service database to spanner table. Returns: """ - method_name = "Insert_In_LogDetails_Table" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) - - def insert_LogDetails(transaction): - query = ( - f"INSERT {table_id} (Id, LogFormat, LogPathPrefix, LogOwner, LogLabel)" - f" VALUES ({new_log_entry['Id']}, {new_log_entry['LogFormat']}," - f" '{new_log_entry['LogPathPrefix']}', '{new_log_entry['LogOwner']}'," - f" '{new_log_entry['LogLabel']}')") - # print("LogDetail query : ", query) - - row_ct = transaction.execute_update(query) - print("{} record inserted to spanner table {}".format( - row_ct, table_id)) - - database.run_in_transaction(insert_LogDetails) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + method_name = "Insert_In_LogDetails_Table" + logging.debug(">>>> In %s of %s", method_name, _file_name) + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) -def Insert_In_ClientData_Table(client_details, instance_id, database_id, - clientdata_table_id): - """adds client details to table.""" + def insert_LogDetails(transaction): + query = ( + f"INSERT {table_id} (Id, LogFormat, LogPathPrefix, LogOwner, LogLabel)" + f" VALUES ({new_log_entry['Id']}, {new_log_entry['LogFormat']}," + f" '{new_log_entry['LogPathPrefix']}', '{new_log_entry['LogOwner']}'," + f" '{new_log_entry['LogLabel']}')") + # print("LogDetail query : ", query) - method_name = "Insert_In_ClientData_Table" - logging.debug(">>>> In %s of %s", method_name, _file_name) + row_ct = transaction.execute_update(query) + print("{} record inserted to spanner table {}".format(row_ct, table_id)) - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) + database.run_in_transaction(insert_LogDetails) + logging.debug("<<<< Out %s of %s", method_name, _file_name) - def insert_ClientDetails(transaction): - query = ( - f"INSERT {clientdata_table_id} (sight_id, env, network_path," - f" learner_path, replay_address) VALUES ({client_details['sight_id']}," - f" '{client_details['env']}', '{client_details['network_path']}'," - f" '{client_details['learner_path']}'," - f" '{client_details['replay_address']}')") - # print("StudyDetail query : ", query) - row_ct = transaction.execute_update(query) - print("{} record inserted to spanner table {}".format( - row_ct, clientdata_table_id)) - - database.run_in_transaction(insert_ClientDetails) - logging.debug("<<<< Out %s of %s", method_name, _file_name) +def Insert_In_ClientData_Table(client_details, instance_id, database_id, + clientdata_table_id): + """adds client details to table.""" + + method_name = "Insert_In_ClientData_Table" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + spanner_client = spanner.Client() + instance = spanner_client.instance(instance_id) + database = instance.database(database_id) + + def insert_ClientDetails(transaction): + query = ( + f"INSERT {clientdata_table_id} (sight_id, env, network_path," + f" learner_path, replay_address) VALUES ({client_details['sight_id']}," + f" '{client_details['env']}', '{client_details['network_path']}'," + f" '{client_details['learner_path']}'," + f" '{client_details['replay_address']}')") + # print("StudyDetail query : ", query) + + row_ct = transaction.execute_update(query) + print("{} record inserted to spanner table {}".format( + row_ct, clientdata_table_id)) + + database.run_in_transaction(insert_ClientDetails) + logging.debug("<<<< Out %s of %s", method_name, _file_name) diff --git a/sight_service/single_action_optimizer.py b/sight_service/single_action_optimizer.py index 8f11bf5..550723d 100644 --- a/sight_service/single_action_optimizer.py +++ b/sight_service/single_action_optimizer.py @@ -14,25 +14,26 @@ """An instance of a Sight optimizer dedicated to a single experiment.""" from concurrent import futures +from typing import Any, Dict, List, Sequence, Tuple + from helpers.logs.logs_handler import logger as logging -from typing import Any, Dict, List, Tuple, Sequence +from sight.proto import sight_pb2 from sight_service.optimizer_instance import OptimizerInstance from sight_service.proto import service_pb2 -from sight.proto import sight_pb2 _file_name = "single_action_optimizer.py" class SingleActionOptimizer(OptimizerInstance): - """An SingleActionOptimizer class that is generic for all optimizers. + """An SingleActionOptimizer class that is generic for all optimizers. An optimizer containing base methods which specialized optimizers will override while communicating with client. """ - def __init__(self): - super().__init__() - self.unique_id = 1 - self.pending_samples = {} - self.active_samples = {} - self.completed_samples = {} + def __init__(self): + super().__init__() + self.unique_id = 1 + self.pending_samples = {} + self.active_samples = {} + self.completed_samples = {} diff --git a/sight_service/smc_py.py b/sight_service/smc_py.py index bd0bb0e..fc68bca 100644 --- a/sight_service/smc_py.py +++ b/sight_service/smc_py.py @@ -13,242 +13,235 @@ # limitations under the License. """LLM-based optimization for driving Sight applications.""" -from helpers.logs.logs_handler import logger as logging -from overrides import overrides +import json +import os +import queue +import threading from typing import Any, Dict, List, Tuple +from helpers.logs.logs_handler import logger as logging +import numpy as np +from overrides import overrides from scipy.stats import uniform -from sight_service.optimizer_instance import param_dict_to_proto +from sight.proto import sight_pb2 from sight_service.optimizer_instance import OptimizerInstance +from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 -from sight.proto import sight_pb2 -import json -import numpy as np -import os -import queue from smcpy import AdaptiveSampler as Sampler -from smcpy import VectorMCMC, VectorMCMCKernel -import threading +from smcpy import VectorMCMC +from smcpy import VectorMCMCKernel # Initialize model class ModelSamplingDriver(): - ''' + ''' Driver for communicating with SMC. ''' - def __init__(self, param_names: List[str], priors: List, std_dev: float): - self._buf_size = 50 - self._model_inputs_meta_q = queue.Queue(1) - self._model_inputs_q = queue.Queue(self._buf_size) - self._model_outputs_meta_q = queue.Queue(1) - self._model_outputs_q = queue.Queue(self._buf_size) - - # Define prior distributions & MCMC kernel - self._vector_mcmc = VectorMCMC(self.evaluate, [0], priors, std_dev) - self._mcmc_kernel = VectorMCMCKernel(self._vector_mcmc, - param_order=param_names) - self._smc = Sampler(self._mcmc_kernel) - self._num_mcmc_samples = 5 - - def sample(self): - step_list, mll_list = self._smc.sample( - num_particles=self._buf_size, - num_mcmc_samples=self._num_mcmc_samples, - target_ess=0.8) - self._model_inputs_meta_q.put(-1) - # print ('step_list=', step_list.__dict__) - # print ('step_list=', step_list.mean()) - # print ('mll_list=', mll_list) - - print(f'phi_sequence={self._smc.phi_sequence}') - print(f'fbf norm index={self._smc.req_phi_index}') - print('marginal log likelihood = {}'.format(mll_list[-1])) - print('parameter means = {}'.format(step_list[-1].compute_mean())) - - def evaluate(self, params): - print('<<< ModelSamplingDriver evaluate() #params=', len(params)) - self._model_inputs_meta_q.put(len(params)) - for i, p in enumerate(params): - self._model_inputs_q.put({'idx': i, 'params': p}) - - results = [None] * len(params) - for i in range(len(params)): - v = self._model_outputs_q.get() - results[v['idx']] = v['result'] - print('>>> ModelSamplingDriver evaluate() #results=', len(results)) - return np.array(results) + def __init__(self, param_names: List[str], priors: List, std_dev: float): + self._buf_size = 50 + self._model_inputs_meta_q = queue.Queue(1) + self._model_inputs_q = queue.Queue(self._buf_size) + self._model_outputs_meta_q = queue.Queue(1) + self._model_outputs_q = queue.Queue(self._buf_size) + + # Define prior distributions & MCMC kernel + self._vector_mcmc = VectorMCMC(self.evaluate, [0], priors, std_dev) + self._mcmc_kernel = VectorMCMCKernel(self._vector_mcmc, + param_order=param_names) + self._smc = Sampler(self._mcmc_kernel) + self._num_mcmc_samples = 5 + + def sample(self): + step_list, mll_list = self._smc.sample( + num_particles=self._buf_size, + num_mcmc_samples=self._num_mcmc_samples, + target_ess=0.8) + self._model_inputs_meta_q.put(-1) + # print ('step_list=', step_list.__dict__) + # print ('step_list=', step_list.mean()) + # print ('mll_list=', mll_list) + + print(f'phi_sequence={self._smc.phi_sequence}') + print(f'fbf norm index={self._smc.req_phi_index}') + print('marginal log likelihood = {}'.format(mll_list[-1])) + print('parameter means = {}'.format(step_list[-1].compute_mean())) + + def evaluate(self, params): + print('<<< ModelSamplingDriver evaluate() #params=', len(params)) + self._model_inputs_meta_q.put(len(params)) + for i, p in enumerate(params): + self._model_inputs_q.put({'idx': i, 'params': p}) + + results = [None] * len(params) + for i in range(len(params)): + v = self._model_outputs_q.get() + results[v['idx']] = v['result'] + print('>>> ModelSamplingDriver evaluate() #results=', len(results)) + return np.array(results) class SMCPy(OptimizerInstance): - """Uses the SMCPy library to choose the parameters of the code. + """Uses the SMCPy library to choose the parameters of the code. Attributes: possible_values: Maps each action attributes to the list of possible values of this attribute. """ - def __init__(self): - super(SMCPy, self).__init__() - self.num_samples_issued = 0 - self.active_samples = {} - self.complete_samples = {} - self.possible_values = {} - self._lock = threading.RLock() - self._driver = None - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - response = super(SMCPy, self).launch(request) - - # self.possible_values = {} - # for i, key in enumerate(sorted(self.actions.keys())): - # if self.actions[key].valid_float_values: - # self.possible_values[key] = list(self.actions[key].valid_float_values) - # elif self.actions[key].step_size: - # self.possible_values[key] = [] - # cur = self.actions[key].min_value - # while cur <= self.actions[key].max_value: - # self.possible_values[key].append(cur) - # cur += self.actions[key].step_size - # print('possible_values=%s' % self.possible_values) - - self._param_names = list(sorted(self.actions.keys())) - self._driver = ModelSamplingDriver(param_names=self._param_names, - priors=[ - uniform( - self.actions[key].min_value, + def __init__(self): + super(SMCPy, self).__init__() + self.num_samples_issued = 0 + self.active_samples = {} + self.complete_samples = {} + self.possible_values = {} + self._lock = threading.RLock() + self._driver = None + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + response = super(SMCPy, self).launch(request) + + # self.possible_values = {} + # for i, key in enumerate(sorted(self.actions.keys())): + # if self.actions[key].valid_float_values: + # self.possible_values[key] = list(self.actions[key].valid_float_values) + # elif self.actions[key].step_size: + # self.possible_values[key] = [] + # cur = self.actions[key].min_value + # while cur <= self.actions[key].max_value: + # self.possible_values[key].append(cur) + # cur += self.actions[key].step_size + # print('possible_values=%s' % self.possible_values) + + self._param_names = list(sorted(self.actions.keys())) + self._driver = ModelSamplingDriver(param_names=self._param_names, + priors=[ + uniform(self.actions[key].min_value, self.actions[key].max_value) - for key in self._param_names - ], - std_dev=0.5) - self._smc_thread = threading.Thread(target=self._driver.sample, - args=()) - self._smc_thread.start() - - self._num_samples_in_cur_batch = 0 - self._sample_idx = 0 - self._num_samples_complete = 0 - self._num_samples_remaining = 0 - - response.display_string = 'SMCPy Start' - print('response=%s' % response) - return response - - def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: - """Returns the dict representation of a DecisionParams proto""" - d = {} - for a in dp: - d[a.key] = a.value.double_value - return d - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - logging.info('DecisionPoint request=%s', request) - logging.info('DecisionPoint self._lock=%s', self._lock) - - self._lock.acquire() - logging.info( - 'decision_point() _sample_idx=%s, self._num_samples_in_cur_batch=%s, self._num_samples_remaining=%s, self._num_samples_complete=%s', - self._sample_idx, self._num_samples_in_cur_batch, - self._num_samples_remaining, self._num_samples_complete) - - dp_response = service_pb2.DecisionPointResponse() - logging.info('dp_response=%s', dp_response) - params = [] - if self._sample_idx == self._num_samples_in_cur_batch and \ - self._num_samples_complete < self._num_samples_remaining: - logging.info('AT_RETRY') - self._lock.release() - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_RETRY - return dp_response - - logging.info('Start new batch') - # Start new batch - if self._sample_idx == self._num_samples_in_cur_batch: - logging.info('Starting new batch') - self._num_samples_in_cur_batch = self._driver._model_inputs_meta_q.get( - ) - self._sample_idx = 0 - self._num_samples_complete = 0 - - logging.info('Getting Params') - - params = self._driver._model_inputs_q.get()['params'] - - self.active_samples[request.worker_id] = { - 'action': params, - 'sample_num': self.num_samples_issued, - 'idx': self._sample_idx, - } - self._sample_idx += 1 - - self.num_samples_issued += 1 - self._lock.release() - - for i, value in enumerate(params): - a = dp_response.action.add() - a.key = self._param_names[i] - a.value.double_value = float(value) - - print('DecisionPoint response=%s' % dp_response) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - logging.info('FinalizeEpisode request=%s', request) - d = {} - for a in request.decision_point.choice_params: - d[a.key] = a.value.double_value - result = [d[key] for key in self._param_names] - - self._lock.acquire() - self._driver._model_outputs_q.put({ - 'idx': - self.active_samples[request.worker_id]['idx'], - 'result': - result, - }) - self._num_samples_complete += 1 - - logging.info('FinalizeEpisode outcome=%s / %s', - request.decision_outcome.reward, d) - del self.active_samples[request.worker_id] - self._lock.release() - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - response = '[SMCPy (num_ask=#%s, num_tell=#%s)\n' % ( - self._optimizer.num_ask, self._optimizer.num_tell) - - self._lock.acquire() - response += 'sample_num, ' + ', '.join(list( - self.actions)) + ', outcome\n' - cur = [0] * len(self.actions) - keys = sorted(self.actions.keys()) - logging.info('self.complete_samples=%s', self.complete_samples) - for s in sorted(self.complete_samples.items(), - key=lambda x: x[1]['outcome'], - reverse=True): - response += str(s[0]) + ', ' - response += ', '.join([str(s[1]['action'][key]) for key in keys]) - response += ', ' + str(s[1]['outcome']) + '\n' - - response += 'pareto_front:\n' - for trial in self._optimizer.pareto_front(): - response += ', '.join([str(trial.args[0][key]) - for key in keys]) + '\n' - response += ']\n' - self._lock.release() - - return service_pb2.CurrentStatusResponse(response_str=response) + for key in self._param_names + ], + std_dev=0.5) + self._smc_thread = threading.Thread(target=self._driver.sample, args=()) + self._smc_thread.start() + + self._num_samples_in_cur_batch = 0 + self._sample_idx = 0 + self._num_samples_complete = 0 + self._num_samples_remaining = 0 + + response.display_string = 'SMCPy Start' + print('response=%s' % response) + return response + + def _params_to_dict(self, dp: sight_pb2) -> Dict[str, float]: + """Returns the dict representation of a DecisionParams proto""" + d = {} + for a in dp: + d[a.key] = a.value.double_value + return d + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + logging.info('DecisionPoint request=%s', request) + logging.info('DecisionPoint self._lock=%s', self._lock) + + self._lock.acquire() + logging.info( + 'decision_point() _sample_idx=%s, self._num_samples_in_cur_batch=%s, self._num_samples_remaining=%s, self._num_samples_complete=%s', + self._sample_idx, self._num_samples_in_cur_batch, + self._num_samples_remaining, self._num_samples_complete) + + dp_response = service_pb2.DecisionPointResponse() + logging.info('dp_response=%s', dp_response) + params = [] + if self._sample_idx == self._num_samples_in_cur_batch and \ + self._num_samples_complete < self._num_samples_remaining: + logging.info('AT_RETRY') + self._lock.release() + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_RETRY + return dp_response + + logging.info('Start new batch') + # Start new batch + if self._sample_idx == self._num_samples_in_cur_batch: + logging.info('Starting new batch') + self._num_samples_in_cur_batch = self._driver._model_inputs_meta_q.get() + self._sample_idx = 0 + self._num_samples_complete = 0 + + logging.info('Getting Params') + + params = self._driver._model_inputs_q.get()['params'] + + self.active_samples[request.worker_id] = { + 'action': params, + 'sample_num': self.num_samples_issued, + 'idx': self._sample_idx, + } + self._sample_idx += 1 + + self.num_samples_issued += 1 + self._lock.release() + + for i, value in enumerate(params): + a = dp_response.action.add() + a.key = self._param_names[i] + a.value.double_value = float(value) + + print('DecisionPoint response=%s' % dp_response) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + logging.info('FinalizeEpisode request=%s', request) + d = {} + for a in request.decision_point.choice_params: + d[a.key] = a.value.double_value + result = [d[key] for key in self._param_names] + + self._lock.acquire() + self._driver._model_outputs_q.put({ + 'idx': self.active_samples[request.worker_id]['idx'], + 'result': result, + }) + self._num_samples_complete += 1 + + logging.info('FinalizeEpisode outcome=%s / %s', + request.decision_outcome.reward, d) + del self.active_samples[request.worker_id] + self._lock.release() + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + response = '[SMCPy (num_ask=#%s, num_tell=#%s)\n' % ( + self._optimizer.num_ask, self._optimizer.num_tell) + + self._lock.acquire() + response += 'sample_num, ' + ', '.join(list(self.actions)) + ', outcome\n' + cur = [0] * len(self.actions) + keys = sorted(self.actions.keys()) + logging.info('self.complete_samples=%s', self.complete_samples) + for s in sorted(self.complete_samples.items(), + key=lambda x: x[1]['outcome'], + reverse=True): + response += str(s[0]) + ', ' + response += ', '.join([str(s[1]['action'][key]) for key in keys]) + response += ', ' + str(s[1]['outcome']) + '\n' + + response += 'pareto_front:\n' + for trial in self._optimizer.pareto_front(): + response += ', '.join([str(trial.args[0][key]) for key in keys]) + '\n' + response += ']\n' + self._lock.release() + + return service_pb2.CurrentStatusResponse(response_str=response) diff --git a/sight_service/vizier.py b/sight_service/vizier.py index af1fa82..d143789 100644 --- a/sight_service/vizier.py +++ b/sight_service/vizier.py @@ -13,18 +13,18 @@ # limitations under the License. """Vizier Bayesian optimizer for driving Sight applications.""" -from helpers.logs.logs_handler import logger as logging +from datetime import datetime import os -from overrides import overrides from typing import Any, Dict, List, Tuple -from datetime import datetime -from absl import flags -from google.cloud import aiplatform +from absl import flags from dotenv import load_dotenv -from sight_service.proto import service_pb2 -from sight_service.optimizer_instance import param_dict_to_proto +from google.cloud import aiplatform +from helpers.logs.logs_handler import logger as logging +from overrides import overrides from sight_service.optimizer_instance import OptimizerInstance +from sight_service.optimizer_instance import param_dict_to_proto +from sight_service.proto import service_pb2 load_dotenv() PROJECT_ID = os.environ['PROJECT_ID'] @@ -37,175 +37,166 @@ def _get_vizier_study_display_name(client_id: str, label: str) -> str: - return ('Sight_' + label.replace(' ', '_') + '_' + str(client_id) + '_' + - datetime.now().strftime('%Y%m%d_%H%M%S')) + return ('Sight_' + label.replace(' ', '_') + '_' + str(client_id) + '_' + + datetime.now().strftime('%Y%m%d_%H%M%S')) def _get_vizier_study_config(client_id: str, label: str, study_config_param): - """Generate a Vizier StudyConfig from command-line flags.""" - method_name = "_get_vizier_study_config" - logging.debug(">>>> In %s of %s", method_name, _file_name) - study_params = [] - for attr in study_config_param.action_attrs: - study_params.append({ - 'parameter_id': attr, - 'double_value_spec': { - 'min_value': study_config_param.action_attrs[attr].min_value, - 'max_value': study_config_param.action_attrs[attr].max_value, - }, - }) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return { - 'display_name': _get_vizier_study_display_name(client_id, label), - 'study_spec': { - 'algorithm': 'ALGORITHM_UNSPECIFIED', - 'parameters': study_params, - 'metrics': [{ - 'metric_id': 'outcome', - 'goal': 'MAXIMIZE' - }], + """Generate a Vizier StudyConfig from command-line flags.""" + method_name = "_get_vizier_study_config" + logging.debug(">>>> In %s of %s", method_name, _file_name) + study_params = [] + for attr in study_config_param.action_attrs: + study_params.append({ + 'parameter_id': attr, + 'double_value_spec': { + 'min_value': study_config_param.action_attrs[attr].min_value, + 'max_value': study_config_param.action_attrs[attr].max_value, }, - } + }) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return { + 'display_name': _get_vizier_study_display_name(client_id, label), + 'study_spec': { + 'algorithm': 'ALGORITHM_UNSPECIFIED', + 'parameters': study_params, + 'metrics': [{ + 'metric_id': 'outcome', + 'goal': 'MAXIMIZE' + }], + }, + } class Vizier(OptimizerInstance): - """Vizier specific implementation of OptimizerInstance class. + """Vizier specific implementation of OptimizerInstance class. """ - def __init__(self): - super().__init__() - self.vizier_study = '' - self.current_trial: Dict[str, str] = {} - self.vizier_url = '' - self._total_count = 0 - self._completed_count = 0 - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - method_name = "launch" - logging.debug(">>>> In %s of %s", method_name, _file_name) - launch_response = super(Vizier, self).launch(request) - - self._total_count = request.decision_config_params.num_trials - study_config = _get_vizier_study_config(request.client_id, - request.label, - request.decision_config_params) - vizier_response = _vizier_client.create_study( - parent=f'projects/{PROJECT_ID}/locations/{PROJECT_REGION}', - study=study_config) - vizier_url = ('https://pantheon.corp.google.com/vertex-ai/locations/' + - PROJECT_REGION + '/studies/' + - vizier_response.name.split('/')[-1] + '?project=' + - PROJECT_ID) - self.vizier_url = vizier_url - - self.vizier_study = vizier_response.name - logging.info('updated self : %s', str(self.__dict__)) - - launch_response.display_string = vizier_url - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return launch_response - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - method_name = "decision_point" - logging.debug(">>>> In %s of %s", method_name, _file_name) - response = (_vizier_client.suggest_trials({ - 'parent': - self.vizier_study, - 'suggestion_count': - 1, - 'client_id': - request.worker_id, - }).result().trials) - - self.current_trial[request.worker_id] = response[0].name - - dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend( - param_dict_to_proto({ - param.parameter_id: param.value - for param in response[0].parameters - })) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - method_name = "finalize_episode" - logging.debug(">>>> In %s of %s", method_name, _file_name) - metrics = [] - metrics_obj = {} - metrics_obj['metric_id'] = request.decision_outcome.outcome_label - metrics_obj['value'] = request.decision_outcome.reward - metrics.append(metrics_obj) - - if request.worker_id not in self.current_trial: - logging.info('Given worker not found......') - logging.info('current key(worker) is = %s', request.worker_id) - logging.info('current instance = %s', str(self)) - return service_pb2.FinalizeEpisodeResponse( - response_str=f'Worker {request.worker_id} has no known trial!') - - logging.info('FinalizeEpisode metrics=%s', metrics) - _vizier_client.complete_trial({ - 'name': - self.current_trial[request.worker_id], - 'final_measurement': { - 'metrics': metrics - }, - }) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - method_name = "current_status" - logging.debug(">>>> In %s of %s", method_name, _file_name) - # optimal = _vizier_client.list_optimal_trials({ - # 'parent': self.vizier_study, - # }) - print('user can check status of vizier study here : ', self.vizier_url) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse( - response_str=str(self.vizier_url)) - - @overrides - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - method_name = "fetch_optimal_action" - logging.debug(">>>> In %s of %s", method_name, _file_name) - optimal = _vizier_client.list_optimal_trials({ - 'parent': - self.vizier_study, - }) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CurrentStatusResponse(response_str=str(optimal)) - - @overrides - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) - if (self._completed_count == self._total_count): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - # elif(not self.pending_samples): - # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY - else: - # Increasing count here so that multiple workers can't enter the dp call for same sample at last - self._completed_count += 1 - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) + def __init__(self): + super().__init__() + self.vizier_study = '' + self.current_trial: Dict[str, str] = {} + self.vizier_url = '' + self._total_count = 0 + self._completed_count = 0 + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + method_name = "launch" + logging.debug(">>>> In %s of %s", method_name, _file_name) + launch_response = super(Vizier, self).launch(request) + + self._total_count = request.decision_config_params.num_trials + study_config = _get_vizier_study_config(request.client_id, request.label, + request.decision_config_params) + vizier_response = _vizier_client.create_study( + parent=f'projects/{PROJECT_ID}/locations/{PROJECT_REGION}', + study=study_config) + vizier_url = ('https://pantheon.corp.google.com/vertex-ai/locations/' + + PROJECT_REGION + '/studies/' + + vizier_response.name.split('/')[-1] + '?project=' + + PROJECT_ID) + self.vizier_url = vizier_url + + self.vizier_study = vizier_response.name + logging.info('updated self : %s', str(self.__dict__)) + + launch_response.display_string = vizier_url + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return launch_response + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + method_name = "decision_point" + logging.debug(">>>> In %s of %s", method_name, _file_name) + response = (_vizier_client.suggest_trials({ + 'parent': self.vizier_study, + 'suggestion_count': 1, + 'client_id': request.worker_id, + }).result().trials) + + self.current_trial[request.worker_id] = response[0].name + + dp_response = service_pb2.DecisionPointResponse() + dp_response.action.extend( + param_dict_to_proto({ + param.parameter_id: param.value for param in response[0].parameters + })) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + method_name = "finalize_episode" + logging.debug(">>>> In %s of %s", method_name, _file_name) + metrics = [] + metrics_obj = {} + metrics_obj['metric_id'] = request.decision_outcome.outcome_label + metrics_obj['value'] = request.decision_outcome.reward + metrics.append(metrics_obj) + + if request.worker_id not in self.current_trial: + logging.info('Given worker not found......') + logging.info('current key(worker) is = %s', request.worker_id) + logging.info('current instance = %s', str(self)) + return service_pb2.FinalizeEpisodeResponse( + response_str=f'Worker {request.worker_id} has no known trial!') + + logging.info('FinalizeEpisode metrics=%s', metrics) + _vizier_client.complete_trial({ + 'name': self.current_trial[request.worker_id], + 'final_measurement': { + 'metrics': metrics + }, + }) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + method_name = "current_status" + logging.debug(">>>> In %s of %s", method_name, _file_name) + # optimal = _vizier_client.list_optimal_trials({ + # 'parent': self.vizier_study, + # }) + print('user can check status of vizier study here : ', self.vizier_url) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=str(self.vizier_url)) + + @overrides + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + method_name = "fetch_optimal_action" + logging.debug(">>>> In %s of %s", method_name, _file_name) + optimal = _vizier_client.list_optimal_trials({ + 'parent': self.vizier_study, + }) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CurrentStatusResponse(response_str=str(optimal)) + + @overrides + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) + if (self._completed_count == self._total_count): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + # elif(not self.pending_samples): + # worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + else: + # Increasing count here so that multiple workers can't enter the dp call for same sample at last + self._completed_count += 1 + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index 6af02af..7462389 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -13,288 +13,283 @@ # limitations under the License. """Exhaustive search for driving Sight applications.""" -from helpers.logs.logs_handler import logger as logging -from readerwriterlock import rwlock -from overrides import overrides +import threading from typing import Any, Dict, List, Tuple +from helpers.logs.logs_handler import logger as logging +from overrides import overrides +from readerwriterlock import rwlock from sight.proto import sight_pb2 -from sight_service.proto import service_pb2 -from sight_service.single_action_optimizer import SingleActionOptimizer -from sight_service.optimizer_instance import param_dict_to_proto +from sight.widgets.decision import utils # from sight_service.optimizer_instance import OptimizerInstance +from sight_service.optimizer_instance import param_dict_to_proto from sight_service.optimizer_instance import param_proto_to_dict -import threading -from sight.widgets.decision import utils +from sight_service.proto import service_pb2 +from sight_service.single_action_optimizer import SingleActionOptimizer _file_name = "exhaustive_search.py" class WorklistScheduler(SingleActionOptimizer): - """Exhaustively searches over all the possible values of the action attributes. + """Exhaustively searches over all the possible values of the action attributes. Attributes: possible_values: Maps each action attributes to the list of possible values of this attribute. """ - def __init__(self): - super().__init__() - self.next_sample_to_issue = [] - self.last_sample = False - self.exp_completed = False - self.possible_values = {} - self.max_reward_sample = {} - self.pending_lock = rwlock.RWLockFair() - self.active_lock = rwlock.RWLockFair() - self.completed_lock = rwlock.RWLockFair() - - @overrides - def launch( - self, - request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: - method_name = "launch" - logging.debug(">>>> In %s of %s", method_name, _file_name) - response = super(WorklistScheduler, self).launch(request) - response.display_string = 'Worklist Scheduler SUCCESS!' - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return response - - @overrides - def propose_action( - self, request: service_pb2.ProposeActionRequest - ) -> service_pb2.ProposeActionResponse: - # print('request in propose actions: ', request) - - attributes = param_proto_to_dict(request.attributes) - action_attrs = param_proto_to_dict(request.action_attrs) - - with self.pending_lock.gen_wlock(): - self.pending_samples[self.unique_id] = [action_attrs, attributes] - - # print('self.pending_samples : ', - # self.pending_samples) - # print('self.active_samples : ', - # self.active_samples) - # print('self.completed_samples : ', - # self.completed_samples) - print('self.unique_id : ', self.unique_id) - - # Create response - response = service_pb2.ProposeActionResponse(action_id=self.unique_id) - self.unique_id += 1 - return response - - @overrides - def GetOutcome( - self, request: service_pb2.GetOutcomeRequest - ) -> service_pb2.GetOutcomeResponse: - # print('self.pending_samples : ', - # self.pending_samples) - # print('self.active_samples : ', - # self.active_samples) - # print('self.completed_samples : ', - # self.completed_samples) - with self.completed_lock.gen_rlock(): - completed_samples = self.completed_samples - with self.pending_lock.gen_rlock(): - pending_samples = self.pending_samples - with self.active_lock.gen_rlock(): - active_samples = self.active_samples - - response = service_pb2.GetOutcomeResponse() - if (request.unique_ids): - required_samples = list(request.unique_ids) - for sample_id in required_samples: - outcome = response.outcome.add() - outcome.action_id = sample_id - if (sample_id in completed_samples): - sample_details = self.completed_samples[sample_id] - outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED - outcome.reward = sample_details['reward'] - outcome.action_attrs.extend( - param_dict_to_proto(sample_details['action'])) - outcome.outcome_attrs.extend( - param_dict_to_proto(sample_details['outcome'])) - outcome.attributes.extend( - param_dict_to_proto(sample_details['attribute'])) - elif (sample_id in pending_samples): - outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.PENDING - outcome.response_str = '!! requested sample not yet assigned to any worker !!' - elif any(value['id'] == sample_id - for value in active_samples.values()): - outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.ACTIVE - outcome.response_str = '!! requested sample not completed yet !!' - else: - outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.NOT_EXIST - outcome.response_str = f'!! requested sample Id {sample_id} does not exist !!' - - print("!! NOT EXIST !!") - with self.active_lock.gen_rlock(): - print(self.active_samples) - with self.pending_lock.gen_rlock(): - print(self.pending_samples) - with self.completed_lock.gen_rlock(): - print(self.completed_samples) - else: - for sample_id in completed_samples.keys(): - sample_details = completed_samples[sample_id] - outcome = response.outcome.add() - outcome.action_id = sample_id - outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED - outcome.reward = sample_details['reward'] - - outcome.action_attrs.extend( - param_dict_to_proto(sample_details['action'])) - - outcome.outcome_attrs.extend( - param_dict_to_proto(sample_details['outcome'])) - - outcome.attributes.extend( - param_dict_to_proto(sample_details['attribute'])) - - # print('response here: ', response) - return response - - @overrides - def decision_point( - self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: - method_name = "decision_point" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - # print('self.pending_samples : ', - # self.pending_samples) - # print('self.active_samples : ', - # self.active_samples) - # print('self.completed_samples : ', - # self.completed_samples) - # print('self.unique_id : ', self.unique_id) - - dp_response = service_pb2.DecisionPointResponse() - # if(self.exp_completed): - # logging.info("sight experiment completed, killing the worker") - # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_DONE - # else: - # if self.pending_samples: - - # todo : meetashah : add logic to fetch action stored from propose actions and send it as repsonse - # key, sample = self.pending_samples.popitem() - # fetching the key in FIFO manner - - #? this part now handled by worker alive rpc - # with self.pending_lock.gen_wlock(): - # key = next(iter(self.pending_samples)) - # sample = self.pending_samples.pop(key) - - # with self.active_lock.gen_wlock(): - # self.active_samples[request.worker_id] = {'id': key, 'sample': sample} - - with self.active_lock.gen_rlock(): - if (request.worker_id in self.active_samples): - sample = self.active_samples[request.worker_id]['sample'] - else: - raise ValueError("key not foung in active_samples") - next_action = sample[0] - logging.info('next_action=%s', next_action) - # raise SystemExit - dp_response.action.extend(param_dict_to_proto(next_action)) - # print('self.active_samples : ', self.active_samples) - # print('self.pending_samples : ', self.pending_samples) - # print('self.completed_samples : ', self.completed_samples) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - # else: - # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_RETRY - - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return dp_response - - @overrides - def finalize_episode( - self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: - method_name = "finalize_episode" - logging.debug(">>>> In %s of %s", method_name, _file_name) - - # logging.info("req in finalize episode of dummy.py : %s", request) - - with self.active_lock.gen_rlock(): - sample_dict = self.active_samples[request.worker_id] - - with self.completed_lock.gen_wlock(): - self.completed_samples[sample_dict['id']] = { - # 'action': self.pending_samples[unique_action_id], - 'action': - param_proto_to_dict(request.decision_point.choice_params), - 'attribute': - sample_dict['sample'][1], - 'reward': - request.decision_outcome.reward, - 'outcome': - param_proto_to_dict(request.decision_outcome.outcome_params) - } - - with self.active_lock.gen_wlock(): - del self.active_samples[request.worker_id] - - # print('self.active_samples : ', self.active_samples) - # print('self.pending_samples : ', self.pending_samples) - # print('self.completed_samples : ', self.completed_samples) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.FinalizeEpisodeResponse(response_str='Success!') - - @overrides - def current_status( - self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: - method_name = "current_status" - logging.debug(">>>> In %s of %s", method_name, _file_name) - # add logic to check status - ref from exhaustive search - - @overrides - def fetch_optimal_action( - self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: - method_name = "fetch_optimal_action" - logging.debug(">>>> In %s of %s", method_name, _file_name) - # add logic to check status - ref from exhaustive search - logging.debug("<<<< Out %s of %s", method_name, _file_name) - - @overrides - def close(self, - request: service_pb2.CloseRequest) -> service_pb2.CloseResponse: - method_name = "close" - logging.debug(">>>> In %s of %s", method_name, _file_name) - self.exp_completed = True - logging.info( - "sight experiment completed...., changed exp_completed to True") - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.CloseResponse(response_str="success") - - @overrides - def WorkerAlive( - self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) - if (self.exp_completed): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - elif (not self.pending_samples): - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + def __init__(self): + super().__init__() + self.next_sample_to_issue = [] + self.last_sample = False + self.exp_completed = False + self.possible_values = {} + self.max_reward_sample = {} + self.pending_lock = rwlock.RWLockFair() + self.active_lock = rwlock.RWLockFair() + self.completed_lock = rwlock.RWLockFair() + + @overrides + def launch(self, + request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: + method_name = "launch" + logging.debug(">>>> In %s of %s", method_name, _file_name) + response = super(WorklistScheduler, self).launch(request) + response.display_string = 'Worklist Scheduler SUCCESS!' + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return response + + @overrides + def propose_action( + self, request: service_pb2.ProposeActionRequest + ) -> service_pb2.ProposeActionResponse: + # print('request in propose actions: ', request) + + attributes = param_proto_to_dict(request.attributes) + action_attrs = param_proto_to_dict(request.action_attrs) + + with self.pending_lock.gen_wlock(): + self.pending_samples[self.unique_id] = [action_attrs, attributes] + + # print('self.pending_samples : ', + # self.pending_samples) + # print('self.active_samples : ', + # self.active_samples) + # print('self.completed_samples : ', + # self.completed_samples) + print('self.unique_id : ', self.unique_id) + + # Create response + response = service_pb2.ProposeActionResponse(action_id=self.unique_id) + self.unique_id += 1 + return response + + @overrides + def GetOutcome( + self, + request: service_pb2.GetOutcomeRequest) -> service_pb2.GetOutcomeResponse: + # print('self.pending_samples : ', + # self.pending_samples) + # print('self.active_samples : ', + # self.active_samples) + # print('self.completed_samples : ', + # self.completed_samples) + with self.completed_lock.gen_rlock(): + completed_samples = self.completed_samples + with self.pending_lock.gen_rlock(): + pending_samples = self.pending_samples + with self.active_lock.gen_rlock(): + active_samples = self.active_samples + + response = service_pb2.GetOutcomeResponse() + if (request.unique_ids): + required_samples = list(request.unique_ids) + for sample_id in required_samples: + outcome = response.outcome.add() + outcome.action_id = sample_id + if (sample_id in completed_samples): + sample_details = self.completed_samples[sample_id] + outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED + outcome.reward = sample_details['reward'] + outcome.action_attrs.extend( + param_dict_to_proto(sample_details['action'])) + outcome.outcome_attrs.extend( + param_dict_to_proto(sample_details['outcome'])) + outcome.attributes.extend( + param_dict_to_proto(sample_details['attribute'])) + elif (sample_id in pending_samples): + outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.PENDING + outcome.response_str = '!! requested sample not yet assigned to any worker !!' + elif any(value['id'] == sample_id for value in active_samples.values()): + outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.ACTIVE + outcome.response_str = '!! requested sample not completed yet !!' else: - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - # put sample in active sample list?? - with self.pending_lock.gen_wlock(): - key = next(iter(self.pending_samples)) - sample = self.pending_samples.pop(key) - - with self.active_lock.gen_wlock(): - self.active_samples[request.worker_id] = { - 'id': key, - 'sample': sample - } - print("self.active_samples : ", self.active_samples) - - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) + outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.NOT_EXIST + outcome.response_str = f'!! requested sample Id {sample_id} does not exist !!' + + print("!! NOT EXIST !!") + with self.active_lock.gen_rlock(): + print(self.active_samples) + with self.pending_lock.gen_rlock(): + print(self.pending_samples) + with self.completed_lock.gen_rlock(): + print(self.completed_samples) + else: + for sample_id in completed_samples.keys(): + sample_details = completed_samples[sample_id] + outcome = response.outcome.add() + outcome.action_id = sample_id + outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED + outcome.reward = sample_details['reward'] + + outcome.action_attrs.extend( + param_dict_to_proto(sample_details['action'])) + + outcome.outcome_attrs.extend( + param_dict_to_proto(sample_details['outcome'])) + + outcome.attributes.extend( + param_dict_to_proto(sample_details['attribute'])) + + # print('response here: ', response) + return response + + @overrides + def decision_point( + self, request: service_pb2.DecisionPointRequest + ) -> service_pb2.DecisionPointResponse: + method_name = "decision_point" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + # print('self.pending_samples : ', + # self.pending_samples) + # print('self.active_samples : ', + # self.active_samples) + # print('self.completed_samples : ', + # self.completed_samples) + # print('self.unique_id : ', self.unique_id) + + dp_response = service_pb2.DecisionPointResponse() + # if(self.exp_completed): + # logging.info("sight experiment completed, killing the worker") + # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_DONE + # else: + # if self.pending_samples: + + # todo : meetashah : add logic to fetch action stored from propose actions and send it as repsonse + # key, sample = self.pending_samples.popitem() + # fetching the key in FIFO manner + + #? this part now handled by worker alive rpc + # with self.pending_lock.gen_wlock(): + # key = next(iter(self.pending_samples)) + # sample = self.pending_samples.pop(key) + + # with self.active_lock.gen_wlock(): + # self.active_samples[request.worker_id] = {'id': key, 'sample': sample} + + with self.active_lock.gen_rlock(): + if (request.worker_id in self.active_samples): + sample = self.active_samples[request.worker_id]['sample'] + else: + raise ValueError("key not foung in active_samples") + next_action = sample[0] + logging.info('next_action=%s', next_action) + # raise SystemExit + dp_response.action.extend(param_dict_to_proto(next_action)) + # print('self.active_samples : ', self.active_samples) + # print('self.pending_samples : ', self.pending_samples) + # print('self.completed_samples : ', self.completed_samples) + dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + # else: + # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_RETRY + + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return dp_response + + @overrides + def finalize_episode( + self, request: service_pb2.FinalizeEpisodeRequest + ) -> service_pb2.FinalizeEpisodeResponse: + method_name = "finalize_episode" + logging.debug(">>>> In %s of %s", method_name, _file_name) + + # logging.info("req in finalize episode of dummy.py : %s", request) + + with self.active_lock.gen_rlock(): + sample_dict = self.active_samples[request.worker_id] + + with self.completed_lock.gen_wlock(): + self.completed_samples[sample_dict['id']] = { + # 'action': self.pending_samples[unique_action_id], + 'action': + param_proto_to_dict(request.decision_point.choice_params), + 'attribute': + sample_dict['sample'][1], + 'reward': + request.decision_outcome.reward, + 'outcome': + param_proto_to_dict(request.decision_outcome.outcome_params) + } + + with self.active_lock.gen_wlock(): + del self.active_samples[request.worker_id] + + # print('self.active_samples : ', self.active_samples) + # print('self.pending_samples : ', self.pending_samples) + # print('self.completed_samples : ', self.completed_samples) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.FinalizeEpisodeResponse(response_str='Success!') + + @overrides + def current_status( + self, request: service_pb2.CurrentStatusRequest + ) -> service_pb2.CurrentStatusResponse: + method_name = "current_status" + logging.debug(">>>> In %s of %s", method_name, _file_name) + # add logic to check status - ref from exhaustive search + + @overrides + def fetch_optimal_action( + self, request: service_pb2.FetchOptimalActionRequest + ) -> service_pb2.FetchOptimalActionResponse: + method_name = "fetch_optimal_action" + logging.debug(">>>> In %s of %s", method_name, _file_name) + # add logic to check status - ref from exhaustive search + logging.debug("<<<< Out %s of %s", method_name, _file_name) + + @overrides + def close(self, + request: service_pb2.CloseRequest) -> service_pb2.CloseResponse: + method_name = "close" + logging.debug(">>>> In %s of %s", method_name, _file_name) + self.exp_completed = True + logging.info( + "sight experiment completed...., changed exp_completed to True") + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.CloseResponse(response_str="success") + + @overrides + def WorkerAlive( + self, request: service_pb2.WorkerAliveRequest + ) -> service_pb2.WorkerAliveResponse: + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) + if (self.exp_completed): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + elif (not self.pending_samples): + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY + else: + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + # put sample in active sample list?? + with self.pending_lock.gen_wlock(): + key = next(iter(self.pending_samples)) + sample = self.pending_samples.pop(key) + + with self.active_lock.gen_wlock(): + self.active_samples[request.worker_id] = {'id': key, 'sample': sample} + print("self.active_samples : ", self.active_samples) + + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/streamlit/streamlit_app.py b/streamlit/streamlit_app.py index e6a5fea..8edfe76 100644 --- a/streamlit/streamlit_app.py +++ b/streamlit/streamlit_app.py @@ -1,38 +1,44 @@ -import streamlit as st +import time + from google.cloud import bigquery import pandas as pd -import time import plotly.graph_objects as go +import streamlit as st client = bigquery.Client() + @st.cache_data(ttl=600) def run_child_details_query(query): - query_job = client.query(query) - rows_raw = query_job.result() - rows = [{row['optimizer']: row['sight_id']} for row in rows_raw] - return rows + query_job = client.query(query) + rows_raw = query_job.result() + rows = [{row['optimizer']: row['sight_id']} for row in rows_raw] + return rows + # Perform query. @st.cache_data(ttl=600) def run_query(query): - # print('query : ', query) - query_job = client.query(query) - rows_raw = query_job.result() - rows = [row["outcome_value"] for row in rows_raw] - return rows + # print('query : ', query) + query_job = client.query(query) + rows_raw = query_job.result() + rows = [row["outcome_value"] for row in rows_raw] + return rows + def get_child_details_query(super_id): - return ('SELECT ' - '(SELECT av.value FROM UNNEST(attribute) av WHERE av.key = "optimizer") AS optimizer, ' - 'link.linked_sight_id AS sight_id ' - 'FROM ' - f'sight_logs.{super_id}_log ' - 'WHERE ' - 'sub_type = "ST_LINK";') + return ( + 'SELECT ' + '(SELECT av.value FROM UNNEST(attribute) av WHERE av.key = "optimizer") AS optimizer, ' + 'link.linked_sight_id AS sight_id ' + 'FROM ' + f'sight_logs.{super_id}_log ' + 'WHERE ' + 'sub_type = "ST_LINK";') + def get_exp_details_query(sight_log_id): - return f'SELECT decision_outcome.outcome_value FROM `cameltrain.sight_logs.{sight_log_id}_log` WHERE decision_outcome.outcome_value IS NOT NULL ORDER BY `order`.timestamp_ns ASC;' + return f'SELECT decision_outcome.outcome_value FROM `cameltrain.sight_logs.{sight_log_id}_log` WHERE decision_outcome.outcome_value IS NOT NULL ORDER BY `order`.timestamp_ns ASC;' st.set_page_config(layout="wide", initial_sidebar_state="expanded") @@ -43,54 +49,57 @@ def get_exp_details_query(sight_log_id): st.title("Comparison study of all the optimizers") if log_id: - super_id = st.text_input("Enter Parent experiment sight ID:", value=log_id, disabled=True) + super_id = st.text_input("Enter Parent experiment sight ID:", + value=log_id, + disabled=True) else: - super_id = st.text_input("Enter Parent experiment sight ID:") + super_id = st.text_input("Enter Parent experiment sight ID:") if super_id: - experiment_ids = {} - query = get_child_details_query(super_id) - rows = run_child_details_query(query) - for row in rows: - experiment_ids.update(row) - - progress_text = "Operation in progress. Please wait." - my_bar = st.progress(0, text=progress_text) - all_data = {} - # Collect data for all experiments - total_optimizers = len(experiment_ids) - progress_increment = 100 / total_optimizers - count = 1 - # with st.spinner("Fetching data..."): - for opt, experiment_id in experiment_ids.items(): - query = get_exp_details_query(experiment_id) - # with st.spinner(f"Fetching data for optimizer: {opt}"): - rows = run_query(query) - all_data[opt] = rows - my_bar.progress(int(count * progress_increment), text=progress_text) - count += 1 - time.sleep(1) - my_bar.empty() - - # Combine data into a DataFrame - df = pd.DataFrame(all_data) - - # Display the DataFrame - if st.checkbox('Show raw data'): - st.subheader('Optimizer wise generated rewards over each iteration') - st.write(df) - - st.subheader('Comparing Optimizer performance') - fig = go.Figure() - for column in df.columns: - fig.add_trace(go.Scatter(x=df.index, y=df[column], mode='lines', name=column)) - - fig.update_layout( - title="Rewards vs Iterations (All Experiments)", - xaxis_title="Iterations", - yaxis_title="Rewards", - width=1500, # Adjust width as needed - height=600 # Adjust height as needed - ) - - st.plotly_chart(fig) + experiment_ids = {} + query = get_child_details_query(super_id) + rows = run_child_details_query(query) + for row in rows: + experiment_ids.update(row) + + progress_text = "Operation in progress. Please wait." + my_bar = st.progress(0, text=progress_text) + all_data = {} + # Collect data for all experiments + total_optimizers = len(experiment_ids) + progress_increment = 100 / total_optimizers + count = 1 + # with st.spinner("Fetching data..."): + for opt, experiment_id in experiment_ids.items(): + query = get_exp_details_query(experiment_id) + # with st.spinner(f"Fetching data for optimizer: {opt}"): + rows = run_query(query) + all_data[opt] = rows + my_bar.progress(int(count * progress_increment), text=progress_text) + count += 1 + time.sleep(1) + my_bar.empty() + + # Combine data into a DataFrame + df = pd.DataFrame(all_data) + + # Display the DataFrame + if st.checkbox('Show raw data'): + st.subheader('Optimizer wise generated rewards over each iteration') + st.write(df) + + st.subheader('Comparing Optimizer performance') + fig = go.Figure() + for column in df.columns: + fig.add_trace( + go.Scatter(x=df.index, y=df[column], mode='lines', name=column)) + + fig.update_layout( + title="Rewards vs Iterations (All Experiments)", + xaxis_title="Iterations", + yaxis_title="Rewards", + width=1500, # Adjust width as needed + height=600 # Adjust height as needed + ) + + st.plotly_chart(fig) From 9145c19870ff88d5e920e694953826c698b9bcda Mon Sep 17 00:00:00 2001 From: Abhinav Rao Date: Tue, 8 Oct 2024 22:11:31 +0000 Subject: [PATCH 10/25] added setuptools explicit version --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index dea0cb0..97e4443 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,8 @@ cd venvs virtualenv sight_env --python=python3.10 source ~/venvs/sight_env/bin/activate -# Install necessary dependencies from requirement.txt file +# Install setup.py compatible setuptools and necessary dependencies from requirement.txt file +pip install setuptools==58.2.0 pip install -r ~/x-sight/py/sight/requirements.txt ``` Note : if error ```ModuleNotFoundError: No module named 'virtualenv'``` occurs, try installing virtualenv using pip, From fe834e4e145869220fc03c9a1db8a0bfe74e13d4 Mon Sep 17 00:00:00 2001 From: Meet-Shah Date: Wed, 16 Oct 2024 11:40:35 +0000 Subject: [PATCH 11/25] added instructions for formatter and setting up VM server --- README.md | 103 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 53 insertions(+), 50 deletions(-) diff --git a/README.md b/README.md index f9ff3df..ecda808 100644 --- a/README.md +++ b/README.md @@ -16,9 +16,7 @@ This document details the key steps needed to start using the Sight system. ## Prerequisites -### Fetching Sight code: - -#### Install packages: +#### Installation of Basic Packages : ```bash # Install basic libraries @@ -48,7 +46,9 @@ sudo apt install docker-ce # Sudoless Docker sudo addgroup docker sudo usermod -aG docker $USER -# In order for the above to take effect, logout and log back in (or reboot if that does not work), or use newgrp docker to change your primary group within a terminal. +# In order for the above to take effect, logout and log back in +# (or reboot if that does not work), or use newgrp docker to +# change your primary group within a terminal. ``` ##### for GCP (on VM) @@ -82,7 +82,7 @@ sudo usermod -aG docker $USER #### Get code from github: ```bash -#clone and fetch latest sigh code from gerrit +# clone and fetch latest sigh code from github cd ~/ git clone https://github.com/theteamatx/x-sight.git ``` @@ -141,28 +141,32 @@ cd venvs virtualenv sight_env --python=python3.10 source ~/venvs/sight_env/bin/activate -# Install setup.py compatible setuptools and necessary dependencies from requirement.txt file +# Install setup.py compatible setuptools pip install setuptools==58.2.0 +# Install necessary dependencies from requirement.txt file pip install -r ~/x-sight/py/sight/requirements.txt ``` Note : if error ```ModuleNotFoundError: No module named 'virtualenv'``` occurs, try installing virtualenv using pip, ```sudo pip install virtualenv``` +#### Activate virtual env: + ```bash -# Set python path to x-sight directory and reload the bashrc file +# Set python path and reload the bashrc file echo 'export PYTHONPATH="$HOME/x-sight/py:$HOME/x-sight:$PYTHONPATH"' >> ~/.bashrc source ~/.bashrc +# Activate virtual env source ~/venvs/sight_env/bin/activate cd ~/x-sight ``` -#### Setup yapf in vscode +#### Setup yapf formatter in vscode -add extention eeyore.yapf in vscode. -add .vscode folder in the repo and create settings.json file which will override the defauls settings of the vscode -add following code snippet there or change accordingly if you already have custom setting +- add extention eeyore.yapf in vscode. +- add .vscode folder in the repo and create settings.json file which will override the defauls settings of the vscode +- add following code snippet there or change accordingly if you already have custom setting -``` +```bash { "[python]": { "editor.formatOnSaveMode": "file", @@ -176,54 +180,27 @@ add following code snippet there or change accordingly if you already have custo ``` you might need to restart vscode to see the changes -If you are setting up this first time and want to apply this style to existing repo -create .config folder in the repo - -1. Create .style.yapf file with following content in .config folder -``` -[style] -based_on_style = google -indent_width = 2 -column_limit = 80 -``` - -2. Create .isort.cfg file with following content in .config folder -``` -[settings] -profile = google -use_parentheses = true -line_length = 80 -multi_line_output = 3 -``` - -run the following commands from root folder of the repo to apply those style changes -``` -yapf -ir -vv --style .config/.style.yapf . -isort . --settings-path .config/ -v -``` -#### setup pre-commit hook +### User Permissions: +Set up your gcloud CLI configurations using -this pre-commit hook checks for the same formatting style we just setup locally ``` -pip install pre-commit +gcloud auth login +gcloud auth application-default login +gcloud auth configure-docker gcr.io ``` -- make sure you created .style.yapf and .isort.cfg file in .config folder from the previous step. -- make sure your repo contains .pre-commit-config.yaml file in root directory of repo - - - -### User Permissions: - -Note : all the follow up commands using $PROJECT_ID assumes you have it already set to your gcp project id. If not, set it via +Note : all the following commands using $PROJECT_ID assumes you have it already set to your gcp project id. If not, set it via ```bash export PROJECT_ID=YOUR_ACTUAL_PROJECT_ID ``` -For completing rest of the task from prerequisites, one needs either owner role -and directly continue to [this](#heading=h.gmwxj9f1df9f) section or one can +For completing rest of the task from prerequisites, +- one needs owner role and directly continue to the [next](#custom-rolesservice-account) section + +or +- one can create Sight Manager role as following and assign that role to any user and delegate the remaining tasks from prerequisites. @@ -820,3 +797,29 @@ python sight_service/service_root.py And from another terminal session, User can run any valid command from [this](#example-training-invocation-commands) section and change the flag ```--deployment_mode=local``` to indicate that sight_service is running locally. +### VM server + +As cloud-run periodically restarts the container, if you want to run the experiment for longer duration (24 hours), you can deploy the server on vm, client and worker can hit that server using internal IP of the VM. Here's the setup you'll need + +- Create VM with sight-service-account +- SSH into the VM and pull the server docker image on VM +``` +docker pull gcr.io/cameltrain/sight-dev-service:15oct +``` +- Bind your cloudtop's 8080 port to VM's 8080 port so, we can send request to server on VM using localhost from cloudtop (as this VM and our cloudtop are not in the same network, we can't connect to it's internal/external IP) +```bash +gcloud compute ssh $INSTANCE_NAME --project $PROJECT_ID --zone $INSTANCE_ZONE -- -o ProxyCommand='corp-ssh-helper %h %p' -L localhost:8080:localhost:8080 +``` +- once you ssh'ed into the vm from the above command, start server container on VM +``` +docker run -it -p 8080:8080 gcr.io/cameltrain/sight-dev-service:15oct +``` +- this will bind container's 8080 port to VM's 8080 port, which enventually points to cloudtop's 8080 port so, client can direcly ping localhost from cloudtop but will be using the server deployed on VM + +Note : + +while spawning mulitple workers, It may hit the limit of maximum allowed IP address in the given region, so we're spawning workers with only internal IP. This creates another issue as this VM, can't connect with any external link but what if our server is deployed on cloud run? + +for that, we have to enable Private Google Access in the default subnet vpc network to allow all the VMs in that region to reach the external IP addresses of Google APIs and services, in our case - cloud run. + +So, make sure the subnet of the vpc network for region you specified while spawning workers has this Private Google Access enabled From 49d97d95bde553fc14d4d77730c0f8a8c9dc94b5 Mon Sep 17 00:00:00 2001 From: Hrushikesh Makode <152846252+hrushikeshm-g@users.noreply.github.com> Date: Fri, 18 Oct 2024 04:25:49 +0000 Subject: [PATCH 12/25] message queue and testing refactoring added (#61) * Restructured simulation analysis code made log extraction a single-binary operation * Small fixes to make remote trials work well with simulations and VM * fix * added metadata in tag of worker image * changed to cloud logger and some auto-format changes * more logging changes * Refinements * dsub local changes * proposal propose action outcome JSONified * sight.silent change and upgraded dsub * cloud log fix * incremental * formatter changes * more formatter changes * added setuptools explicit version * added changes in requirements.txt for different usecases * updated client dependencies with instruction of formatter * more * remove prints * Merged more * more merging * msg queue added for single point optimizer * test-case pre-commit added * github action added * folder placed correctly * dependecies added * sight_service req added * fix for dsub_local after stable merge * local setting for yapf formatter * handle ports related logic * Dev changes (#56) (#62) * Restructured simulation analysis code made log extraction a single-binary operation * Small fixes to make remote trials work well with simulations and VM * fix * added metadata in tag of worker image * changed to cloud logger and some auto-format changes * more logging changes * Refinements * dsub local changes * proposal propose action outcome JSONified * sight.silent change and upgraded dsub * cloud log fix * incremental * formatter changes * more formatter changes * added setuptools explicit version * added changes in requirements.txt for different usecases * updated client dependencies with instruction of formatter * more * remove prints * Merged more * more merging * fix for dsub_local after stable merge * local setting for yapf formatter * handle ports related logic --------- Co-authored-by: Greg Bronevetsky Co-authored-by: hrushikeshm-g Co-authored-by: Abhinav Rao Co-authored-by: bronevet-abc <75458629+bronevet-abc@users.noreply.github.com> * phase2 msg queue * phase 3 --------- Co-authored-by: Greg Bronevetsky Co-authored-by: Meet-Shah Co-authored-by: Abhinav Rao Co-authored-by: bronevet-abc <75458629+bronevet-abc@users.noreply.github.com> --- .github/super-linter.env | 8 + .github/workflows/functional-tests.yml | 44 ++ .pre-commit-config.yaml | 16 +- README.md | 339 +++++++++++---- py/tests/discover_and_run_tests.py | 83 ++++ sight_service/message_queue.py | 400 ++++++++++++++++++ sight_service/single_action_optimizer.py | 47 +- sight_service/tests/colorful_tests.py | 27 ++ .../tests/functional/test_message_queue.py | 193 +++++++++ sight_service/worklist_scheduler_opt.py | 217 ++++------ 10 files changed, 1158 insertions(+), 216 deletions(-) create mode 100644 .github/super-linter.env create mode 100644 .github/workflows/functional-tests.yml create mode 100644 py/tests/discover_and_run_tests.py create mode 100644 sight_service/message_queue.py create mode 100644 sight_service/tests/colorful_tests.py create mode 100644 sight_service/tests/functional/test_message_queue.py diff --git a/.github/super-linter.env b/.github/super-linter.env new file mode 100644 index 0000000..d8b9533 --- /dev/null +++ b/.github/super-linter.env @@ -0,0 +1,8 @@ +VALIDATE_ALL_CODEBASE=false +VALIDATE_PYTHON_MYPY=false +VALIDATE_PYTHON_BLACK=false +VALIDATE_PYTHON_FLAKE8=false +VALIDATE_DOCKERFILE_HADOLINT=false +VALIDATE_CSS=false +DEFAULT_BRANCH='main' +FILTER_REGEX_EXCLUDE='.*\.vscode/.*\.json|.*/third_party/.*' diff --git a/.github/workflows/functional-tests.yml b/.github/workflows/functional-tests.yml new file mode 100644 index 0000000..9270d5d --- /dev/null +++ b/.github/workflows/functional-tests.yml @@ -0,0 +1,44 @@ +name: Run test-cases + +on: + push: + +jobs: + test: + runs-on: ubuntu-latest + + env: + PYTHONPATH: ${{ github.workspace }} # Add the tests directory to PYTHONPATH + + strategy: + matrix: + test-type: [functional] # Test types + + steps: + # Checkout the repository to the runner + - name: Checkout code + uses: actions/checkout@v2 + + - name: Print Python Path + run: | + which python + python -c "import sys; print(sys.executable)" + python -c "import sys; print(sys.path)" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r sight_service/requirements.txt + pip install absl-py==1.4.0 + pip install colorama==0.4.6 + + - name: Set and print python path + run: | + echo "PYTHONPATH=$PYTHONPATH:$(pwd)/py" >> $GITHUB_ENV + which python + python -c "import sys; print(sys.executable)" + python -c "import sys; print(sys.path)" + + - name: Run ${{ matrix.test-type }} test-cases + run: | + python py/tests/discover_and_run_tests.py --type ${{ matrix.test-type }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1258dd9..cee2895 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,10 +3,10 @@ repos: rev: v4.0.1 hooks: - id: trailing-whitespace - exclude: ^.*\.patch$ + exclude: ^.*\.patch$ - id: end-of-file-fixer - exclude: ^.*\.patch$ + exclude: ^.*\.patch$ - id: check-yaml @@ -14,10 +14,18 @@ repos: rev: v0.32.0 hooks: - id: yapf - args: ['--style', '.config/.style.yapf'] + args: ["--style", ".config/.style.yapf"] - repo: https://github.com/pre-commit/mirrors-isort rev: v5.10.1 hooks: - id: isort - args: ['--settings-path', '.config/.isort.cfg'] + args: ["--settings-path", ".config/.isort.cfg"] + + - repo: local + hooks: + - id: run-functional-tests + name: Run all functional unit-tests + entry: python py/tests/discover_and_run_tests.py --type functional + language: system + always_run: true diff --git a/README.md b/README.md index ecda808..0a1a194 100644 --- a/README.md +++ b/README.md @@ -2,21 +2,21 @@ This document details the key steps needed to start using the Sight system. -1. The [Prerequisites](#prerequisites) section details all the steps +1. The [Prerequisites](#prerequisites) section details all the steps needed to configure Sight to work within a given GCP project, including accounts, accessing the Sight codebase and launching the Sight server. -2. The [Logging API](#logging-api) section summarizes Sight's APIs +2. The [Logging API](#logging-api) section summarizes Sight's APIs for logging the application's execution so that it can be visualized, searched or analyzed by external tools. -3. Section [Decision API](#decision-api) describes Sight's APIs for +3. Section [Decision API](#decision-api) describes Sight's APIs for using external ML libraries to control or optimize the application's execution to achieve the user's objectives. -4. Section [Sight Utilities](#sight-utilities) details useful supporting +4. Section [Sight Utilities](#sight-utilities) details useful supporting tools that make it easier to use Sight. ## Prerequisites -#### Installation of Basic Packages : +### Installation of Basic Packages ```bash # Install basic libraries @@ -26,7 +26,7 @@ sudo apt install virtualenv sudo apt-get install python3-pip ``` -#### Install docker: +#### Install docker ##### for Google-internal user (on cloudtop) @@ -79,7 +79,7 @@ sudo docker run hello-world sudo usermod -aG docker $USER ``` -#### Get code from github: +#### Get code from github ```bash # clone and fetch latest sigh code from github @@ -130,7 +130,7 @@ pyenv global After installing the supported Python version (3.9 or 3.10), you can proceed with creating virtualenv and install required depencies. -#### Create virtual env: +#### Create virtual env ```bash # Create and set-up new virtual environment @@ -146,10 +146,11 @@ pip install setuptools==58.2.0 # Install necessary dependencies from requirement.txt file pip install -r ~/x-sight/py/sight/requirements.txt ``` + Note : if error ```ModuleNotFoundError: No module named 'virtualenv'``` occurs, try installing virtualenv using pip, ```sudo pip install virtualenv``` -#### Activate virtual env: +#### Activate virtual env ```bash # Set python path and reload the bashrc file @@ -160,13 +161,13 @@ source ~/venvs/sight_env/bin/activate cd ~/x-sight ``` -#### Setup yapf formatter in vscode +#### Setup yapf in vscode -- add extention eeyore.yapf in vscode. -- add .vscode folder in the repo and create settings.json file which will override the defauls settings of the vscode -- add following code snippet there or change accordingly if you already have custom setting +add extention eeyore.yapf in vscode. +add .vscode folder in the repo and create settings.json file which will override the defauls settings of the vscode +add following code snippet there or change accordingly if you already have custom setting -```bash +```json { "[python]": { "editor.formatOnSaveMode": "file", @@ -178,39 +179,209 @@ cd ~/x-sight "yapf.args": ["--style", "{based_on_style: Google, indent_width: 2, column_limit: 80}"], } ``` + you might need to restart vscode to see the changes -### User Permissions: +If you are setting up this first time and want to apply this style to existing repo +create .config folder in the repo -Set up your gcloud CLI configurations using +1. Create .style.yapf file with following content in .config folder +```text +[style] +based_on_style = google +indent_width = 2 +column_limit = 80 ``` -gcloud auth login -gcloud auth application-default login -gcloud auth configure-docker gcr.io + +1. Create .isort.cfg file with following content in .config folder + +```text +[settings] +profile = google +use_parentheses = true +line_length = 80 +multi_line_output = 3 ``` -Note : all the following commands using $PROJECT_ID assumes you have it already set to your gcp project id. If not, set it via +run the following commands from root folder of the repo to apply those style changes + ```bash -export PROJECT_ID=YOUR_ACTUAL_PROJECT_ID +yapf -ir -vv --style .config/.style.yapf . +isort . --settings-path .config/ -v +``` + +#### setup pre-commit hook + +this pre-commit hook checks for the same formatting style we just setup locally + +```bash +pip install pre-commit +``` + +- make sure you created .style.yapf and .isort.cfg file in .config folder from the previous step. +- make sure your repo contains .pre-commit-config.yaml file in root directory of repo + +## Test Cases + +### 🧪 Test Suite Structure and Automation Overview + +Welcome to the test automation setup of this repository! Our test suite is designed for clarity, scalability, and seamless automation using **pre-commit hooks** and **GitHub Actions**. This ensures that all code changes are validated through a robust testing pipeline before integration. + +### 📂 Test Suite Structure + +The test cases are organized in a **folder-based hierarchy** to provide clear separation between different features and test types. This organization makes it easy to locate, manage, and run specific tests. Here's an overview of the structure: + +```text +root/ + ├── feature1/ + │ └── tests/ + │ ├── functional/ + │ ├── integration/ + │ └── performance/ + ├── feature2/ + │ └── tests/ + │ ├── functional/ + │ ├── integration/ + │ └── performance/ + └── tests/ + └── discover_and_run_tests.py + +``` + +### 🔍 Test Types Explained + +- **Functional Tests**: Located under each feature’s `tests/functional/` folder. These tests validate that each feature works according to the requirements. +- **Integration Tests**: Found in `tests/integration/`, these tests verify the interactions between different modules and components. +- **Performance Tests**: Stored in `tests/performance/`, these tests assess the performance and responsiveness of different features. + +### 🛠 Utility Scripts + +- **`discover_and_run_tests.py`**: This script is responsible for discovering and running tests based on the type and pattern specified. It automates test discovery and execution using `unittest` and a custom test runner for enhanced output. + +### ⚙️ Automation with Pre-commit Hooks + +We use **pre-commit hooks** to ensure that tests are run before any commit is made. This prevents breaking changes from being committed to the repository. The configuration is set up in `.pre-commit-config.yml`: + +```yaml +.pre-commit-config.yml + +- repo: local + hooks: + - id: run-functional-tests + name: Run all functional unit-tests + entry: python tests/discover_and_run_tests.py --type functional + language: system + always_run: +``` + +#### 💡 How It Works + +- **Hook ID**: `run-functional-tests` – This hook is configured to run all functional tests before a commit is made, ensuring no breaking changes are introduced. +- **Entry Point**: The hook uses the command `python tests/discover_and_run_tests.py --type functional` to execute the `discover_and_run_tests.py` script. The `--type functional` argument specifies that only functional tests are to be executed. +- **Always Run**: The `always_run` option ensures that the hook executes every time a commit attempt is made, making it a reliable safeguard against introducing untested changes. + +### 🚀 Continuous Integration with GitHub Actions + +GitHub Actions automate the execution of tests whenever changes are pushed to the repository. The configuration is defined in `.github/workflows/functional-test.yml`: + +```yaml +## .github/workflows/functional-test.yml + +name: Run test-cases + +on: + push: + +jobs: + test: + runs-on: ubuntu-latest + + env: + PYTHONPATH: ${{ github.workspace }} # Set the PYTHONPATH + + strategy: + matrix: + test-type: [functional] # Test types + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Print Python Path + run: | + which python + python -c "import sys; print(sys.executable)" + python -c "import sys; print(sys.path)" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install -r fvs/requirements.txt + pip install absl-py==1.4.0 + pip install colorama==0.4.6 + + - name: Run ${{ matrix.test-type }} test-cases + run: | + python tests/discover_and_run_tests.py --type ${{ matrix.test-type }} ``` +#### 🔧 Key Details + +- **Job Trigger**: The GitHub Action is triggered on every push event to the repository, ensuring that tests are continuously run and code changes are validated automatically. +- **Test Strategy**: A matrix strategy is used to specify different types of tests (e.g., functional). This makes it easy to expand the testing setup by adding new types of tests in the future. +- **Dependencies**: The action installs dependencies listed in the `requirements.txt` files, ensuring the testing environment is properly set up before tests are executed. This guarantees that the correct versions and dependencies are used during the test run. + +### 🎯 Running Tests Locally + +To manually run tests, you can use the `discover_and_run_tests.py` script. This allows developers to run specific test types directly from their local environment. Example usage: + +```bash +python tests/discover_and_run_tests.py --type functional +``` + +- `--type`: Specify the type of tests to run (e.g., `functional`, `integration`, `performance`). + +- `--pattern`: Optionally use --pattern test_*.py to specify the naming pattern for test files (default is test_*.py) + +This flexible setup enables developers to execute tests based on specific criteria, ensuring efficient and targeted test runs. + +### 📢 Summary + +By structuring the tests and integrating **pre-commit hooks** and **GitHub Actions**, we achieve the following: + +- **Code Quality**: Consistent testing ensures that code changes are validated before they are integrated, maintaining high code quality. +- **Immediate Feedback**: Developers receive immediate feedback on code changes, preventing errors and issues from being introduced into the codebase. +- **Continuous Integration**: Automated testing with GitHub Actions ensures that the repository remains stable and reliable as new changes are pushed. + +This setup enhances the development experience, ensuring robust, high-quality software delivery and minimizing the risk of integration issues. 🚀 + +### User Permissions + +Note : all the follow up commands using $PROJECT_ID assumes you have it already set to your gcp project id. If not, set it via + +```bash +export PROJECT_ID=YOUR_ACTUAL_PROJECT_ID +``` For completing rest of the task from prerequisites, + - one needs owner role and directly continue to the [next](#custom-rolesservice-account) section or + - one can create Sight Manager role as following and assign that role to any user and delegate the remaining tasks from prerequisites. -#### Creating Sight Manager role: +#### Creating Sight Manager role ```bash gcloud iam roles create sight_manager --project=$PROJECT_ID --file=infra/sight-manager-role.yaml ``` -#### Assigning role to User: +#### Assigning role to User ```bash gcloud projects add-iam-policy-binding $PROJECT_ID \ @@ -224,28 +395,30 @@ permissions to run the Sight. Make sure following APIs are enabled in your gcp project -- aiplatform.googleapis.com -- lifesciences.googleapis.com -- run.googleapis.com -- cloudresourcemanager.googleapis.com +- aiplatform.googleapis.com +- lifesciences.googleapis.com +- run.googleapis.com +- cloudresourcemanager.googleapis.com -### Custom Roles/Service-account: +### Custom Roles/Service-account -#### Sight User: +#### Sight User + +1) Create a custom role for User working with Sight from the sight-user-role.yaml file available in the root directory of repo. -1) Create a custom role for User working with Sight from the sight-user-role.yaml file available in the root directory of repo. ```bash gcloud iam roles create sight_user --project=$PROJECT_ID --file=infra/sight-user-role.yaml ``` -2) Assign the custom role to user. +2) Assign the custom role to user. ```bash gcloud projects add-iam-policy-binding $PROJECT_ID \ --member="user:$USER_ACCOUNT" \ --role="projects/$PROJECT_ID/roles/sight_user" ``` -#### Sight Service Account: + +#### Sight Service Account 1) Create the service account for sight related work @@ -271,7 +444,7 @@ account can have required permissions. --role="projects/$PROJECT_ID/roles/sight_service_account" ``` -### Launching Default-service: +### Launching Default-service 1) Add your project details to .env file in the code @@ -294,7 +467,7 @@ account can have required permissions. gcloud run deploy sight-default --image=gcr.io/$PROJECT_ID/sight-default:latest --allow-unauthenticated --service-account=sight-service-account@$PROJECT_ID.iam.gserviceaccount.com --concurrency=default --cpu=2 --memory=8Gi --min-instances=1 --max-instances=1 --no-cpu-throttling --region=us-central1 --project=$PROJECT_ID ``` -### Hosting worker image: +### Hosting worker image Host the worker image in a cloud which will be used as default image by the workers spawned using sight unless specified otherwise. @@ -414,7 +587,7 @@ log may be viewed: ![image](images/image4.png) -## Decision API: +## Decision API ### Overview @@ -441,17 +614,17 @@ application into a form that allows Sight to run it many times as it trains the model and then start execution via Sight's `decision.run()` function. Currently Sight supports the following package types: -1. If the application logic is implemented via a dm_env-type environment (e.g. +1. If the application logic is implemented via a dm_env-type environment (e.g. the OpenAI gym) users can provide this environment with no modification and Sight will call the gym's step and reward functions. -1. Alternatively, application logic can be encapsulated in a call-back driver +1. Alternatively, application logic can be encapsulated in a call-back driver function that calls Sight's Decision API directly. Sight will then call this driver function one or more times during the model's training process, as well as during normal program execution. ### Decision API using an Environment -- To start the program users must create their environment object and pass it +- To start the program users must create their environment object and pass it to the `decision.run()` function. This environment may be implemented by the application developer (e.g. ShowerEnv) or may be a ready-made environment from AI gym: @@ -463,7 +636,7 @@ Sight supports the following package types: ) ``` -- The code below provides an example implementation of a gym, which must +- The code below provides an example implementation of a gym, which must implement methods reset(), step(), action_spec() and observation_spec(). ```python @@ -528,13 +701,13 @@ identify the questions that need to be asked, the format of the answers that need to be given and add explicit calls where the application asks for guidance and reports back on the outcomes of the ML algorithm's suggestions. -- The first step is to implement the application's entry point as a driver +- The first step is to implement the application's entry point as a driver function that is passed as a parameter to the `decision.run()`call. -- Next developers must document the properties of the application's dynamic +- Next developers must document the properties of the application's dynamic execution that will be communicated to the ML algorithm as context for the decisions it makes. These are passed to the `state_attrs`parameter and document the range of possible values of each state parameter. -- Finally, developers must document the format of the guidance it needs back +- Finally, developers must document the format of the guidance it needs back from the ML algorithm as a set of named attributes and their allowed value ranges. These are passed to the `action_attrs`parameter. @@ -559,27 +732,27 @@ and reports back on the outcomes of the ML algorithm's suggestions. ) ``` -- The driver function performs all the logic of the application and calls the +- The driver function performs all the logic of the application and calls the following functions to interact with the ML algorithm: - - `data_structures.log_var():`When the application logs a variable + - `data_structures.log_var():`When the application logs a variable explicitly named as a state variable, it is communicated to the ML - algorithm as part of the application's current state. ` ` - - `decision.decision_point()`: Asks the ML algorithm for suggestions about + algorithm as part of the application's current state. `` + - `decision.decision_point()`: Asks the ML algorithm for suggestions about how the application should execute. The response a dict with same keys as the `action_attrs`parameter in the `decision.run()`call and values in the allowed range for each such parameter. - ```python - action = decision_point("label", sight) - ``` + ```python + action = decision_point("label", sight) + ``` - - `decision.decision_outcome()`: Communicates to the ML algorithm + - `decision.decision_outcome()`: Communicates to the ML algorithm the effectiveness of the preceding suggestion, with higher values of the `reward` parameter indicating that the - ```python - decision_outcome("label",updated_timestep.reward,sight) - ``` + ```python + decision_outcome("label",updated_timestep.reward,sight) + ``` - Driver function should looks something like: @@ -615,75 +788,75 @@ and reports back on the outcomes of the ML algorithm's suggestions. ) ``` -- In the case of the dm_env type RL environment provided, the default driver +- In the case of the dm_env type RL environment provided, the default driver function gets run multiple times while running the simulation, if the custom function is not provided. The usage of decision_point and decision_outcome call in this driver function is shown below: -### Running Decision API-enabled applications on the command line: +### Running Decision API-enabled applications on the command line Applications the use the Decision API may be run in two modes: -- Training: the ML algorithm observes many runs of the application to learn +- Training: the ML algorithm observes many runs of the application to learn how to make high-quality decisions, and -- Run: the ML algorithm has been trained and the application is run normally, +- Run: the ML algorithm has been trained and the application is run normally, guided by the trained ML algorithm. To run the application in training mode users must run the application's binary while setting the command line flag ```--decision_mode``` as ```train``` and must use the following flags to control the training process: -- ```deployment_mode```: The procedure to use when training a model to drive +- ```deployment_mode```: The procedure to use when training a model to drive applications that use the Decision API. - - distributed: The application is executed in parallel on the GCP cloud. - - docker_local: The application is executed locally in a docker container - - local: The application is executed locally in the current OS environment + - distributed: The application is executed in parallel on the GCP cloud. + - docker_local: The application is executed locally in a docker container + - local: The application is executed locally in the current OS environment -- ```optimizer_type```: The optimizer to be used while training (vizier, dm-acme, +- ```optimizer_type```: The optimizer to be used while training (vizier, dm-acme, exhaustive_search) -- num_train_workers: Number of workers to use on the GCP cloud in a training +- num_train_workers: Number of workers to use on the GCP cloud in a training run in distributed mode. -- num_trials: Total number of training trials to perform across all the local +- num_trials: Total number of training trials to perform across all the local or distributed worker nodes. -- docker_image: docker image to used by worker nodes while running the taks +- docker_image: docker image to used by worker nodes while running the taks -- log_path: path to store the logs of workers +- log_path: path to store the logs of workers Once the ML model has been trained users can use this model to guide ordinary application runs by executing the application's binary while setting the command line flag: ```--decision_mode``` as ```run``` and ```--trained_model_log_id``` as ```$log_id``` of generated sight run while training. -### Example demo applications: +### Example demo applications To make it easier to experiment with the Decision API the sight/demo directory contains the following demo applications that use the Decision API in the different ways described above: -1. shower_demo_without_env.py (dm_acme): driver function that uses the Decision +1. shower_demo_without_env.py (dm_acme): driver function that uses the Decision API explicitly. -1. shower_demo_with_env.py (dm_acme): uses the Decision API implicitly via an +1. shower_demo_with_env.py (dm_acme): uses the Decision API implicitly via an RL environment that is driven from Sight. -1. gym_demo_env.py (dm_acme): uses the Decision API implicitly to drive an AI +1. gym_demo_env.py (dm_acme): uses the Decision API implicitly to drive an AI gym environment that is specified via the ```--env_name``` command line flag. -1. sweetness.py: Simple program that tries to learn the level of sweetness a +1. sweetness.py: Simple program that tries to learn the level of sweetness a user likes and uses the explicit Decision API to describe the state and action attributes, as well as the decision point and outcome. Used most effectively with the following optimizers: vizier, exhaustive_search. -1. volterra_lotka.py: Simulation of the Volterra-Lotka predator-prey model +1. volterra_lotka.py: Simulation of the Volterra-Lotka predator-prey model using the explicit Decision API. Used most effectively with the following optimizers: vizier, exhaustive_search. -#### Example Training Invocation Commands: +#### Example Training Invocation Commands To make it easier to start experimenting with Sight, below are some example commands for running demo files with different optimizers. -##### Without any environment: +##### Without any environment To use the sight for training optimizer, without any environment, run following command with all the mandatory flags mentioned [here](#running-decision-api-enabled-applications-on-the-command-line): @@ -699,7 +872,7 @@ python py/sight/demo/shower_demo_without_env.py \ --docker_image=gcr.io/$PROJECT_ID/sight-worker ``` -##### With environment: +##### With environment To use the sight for training optimizer with gym environment, add env_name flag in addition to all the mandatory flags for any other dm_env type environment, no @@ -717,7 +890,7 @@ python py/sight/demo/gym_demo_env.py \ --env_name=CartPole-v1 ``` -#### Vizier: +#### Vizier To use sight with vertex AI vizier for hyperparameter turning one can use the following @@ -732,7 +905,7 @@ python py/sight/demo/sweetness.py \ --docker_image=gcr.io/$PROJECT_ID/sight-worker ``` -#### Exhaustive Search: +#### Exhaustive Search ```python python py/sight/demo/sweetness.py \ @@ -794,8 +967,8 @@ For this, User can run service_root script from one terminal session cd ~/x-sight python sight_service/service_root.py ``` -And from another terminal session, User can run any valid command from [this](#example-training-invocation-commands) section and change the flag ```--deployment_mode=local``` to indicate that sight_service is running locally. +And from another terminal session, User can run any valid command from [this](#example-training-invocation-commands) section and change the flag ```--deployment_mode=local``` to indicate that sight_service is running locally. ### VM server @@ -803,17 +976,23 @@ As cloud-run periodically restarts the container, if you want to run the experim - Create VM with sight-service-account - SSH into the VM and pull the server docker image on VM -``` + +```bash docker pull gcr.io/cameltrain/sight-dev-service:15oct ``` + - Bind your cloudtop's 8080 port to VM's 8080 port so, we can send request to server on VM using localhost from cloudtop (as this VM and our cloudtop are not in the same network, we can't connect to it's internal/external IP) + ```bash gcloud compute ssh $INSTANCE_NAME --project $PROJECT_ID --zone $INSTANCE_ZONE -- -o ProxyCommand='corp-ssh-helper %h %p' -L localhost:8080:localhost:8080 ``` + - once you ssh'ed into the vm from the above command, start server container on VM -``` + +```bash docker run -it -p 8080:8080 gcr.io/cameltrain/sight-dev-service:15oct ``` + - this will bind container's 8080 port to VM's 8080 port, which enventually points to cloudtop's 8080 port so, client can direcly ping localhost from cloudtop but will be using the server deployed on VM Note : diff --git a/py/tests/discover_and_run_tests.py b/py/tests/discover_and_run_tests.py new file mode 100644 index 0000000..6d2ce50 --- /dev/null +++ b/py/tests/discover_and_run_tests.py @@ -0,0 +1,83 @@ +"""Discovering and running tests.""" + +import os +import sys +import unittest + +from absl import app +from absl import flags +from tests.colorful_tests import ColorfulTestRunner + +# Define command-line flags using absl +FLAGS = flags.FLAGS +_TEST_TYPE = flags.DEFINE_string( + "type", + None, + "Specify the type of tests to run (e.g., 'functional', 'integration'," + " 'performance',..).", +) +_FILE_PATTERN = flags.DEFINE_string( + "pattern", + "test_*.py", + "Specify the file pattern to match (default is 'test_*.py').", +) + + +def discover_and_run_tests(test_type=None, pattern="test_*.py"): + """Discover and run tests. + + Args: + test_type: The type of tests to run (e.g., 'functional', 'integration', + 'performance'). + pattern: The file pattern to match for test files (default is + 'test_*.py'). + """ + + ls_paths = [] + + # Walk through all directories and subdirectories starting from the current + # directory. + for path, _, _ in os.walk("."): + # Filter out paths that contain 'pycache' or virtual environment + # directories. Also, ensure that the path contains a 'tests/' directory and + # optionally match the 'test_type' if specified. + if ( + "pycache" not in path + and ".venv" not in path + and "tests/" in path + and (test_type in path if test_type else True) + ): + # Add the path to the list of discovered test directories. + ls_paths.append(path) + + # Print the list of discovered test paths (for debugging purposes). + print(f"ls_paths : {ls_paths}") + + # Iterate through each discovered path containing test cases. + for path in ls_paths: + # Convert the relative path to an absolute path for clarity and reliability. + absolute_path = os.path.abspath(path) + print(f"abs_path => {absolute_path}") + + # Create a test loader to find and load test cases based on the specified + # pattern. + loader = unittest.TestLoader() + discovered = loader.discover(absolute_path, pattern=pattern) + + # Run the discovered test cases using the custom ColorfulTestRunner. + runner = ColorfulTestRunner(verbosity=2) + result = runner.run(discovered) + + # If any test fails, exit with a status code of 1 to indicate failure. + if not result.wasSuccessful(): + sys.exit(1) + + +def main(argv): + del argv # Unused + # Call the function with values obtained from the command-line flags. + discover_and_run_tests(test_type=_TEST_TYPE.value, pattern=_FILE_PATTERN.value) + + +if __name__ == "__main__": + app.run(main) diff --git a/sight_service/message_queue.py b/sight_service/message_queue.py new file mode 100644 index 0000000..df8c187 --- /dev/null +++ b/sight_service/message_queue.py @@ -0,0 +1,400 @@ +"""A message queue implementation using reader-writer locks.""" + +import abc +import copy +import enum +from typing import Any, Callable, Dict, Generic, Optional, Protocol, TypeVar +import uuid + +from helpers.logs.logs_handler import logger as logging +from overrides import overrides +from readerwriterlock import rwlock + +# Alias for message ID type +ID = int + + +class MessageState(enum.Enum): + """The state of a message in the message queue. + """ + PENDING = 'pending' + ACTIVE = 'active' + COMPLETED = 'completed' + NOT_FOUND = 'not found' + + def __str__(self): + return str(self.value) + + +class UUIDStrategy(abc.ABC): + """An abstract base class for generating unique IDs. + + This defines a strategy interface for generating unique IDs. Subclasses + should implement the `generate_id` method to provide different ways of + creating unique identifiers. + """ + + @abc.abstractmethod + def generate_id(self) -> ID: + pass + + +class IncrementalUUID(UUIDStrategy): + """A strategy for generating unique IDs incrementally. + + This strategy generates sequential unique IDs starting from 1 and + incrementing by 1 for each new ID. + """ + + def __init__(self): + self.current_id = 1 + + @overrides + def generate_id(self) -> int: + unique_id = self.current_id + self.current_id += 1 + return unique_id + + +class RandomUUID(UUIDStrategy): + """A strategy for generating unique IDs using UUIDs. + + This strategy generates unique IDs using a random UUID converted to its + integer representation. It provides more randomness compared to the + incremental strategy. + """ + + @overrides + def generate_id(self) -> int: + return uuid.uuid4().int # Using the integer representation of UUID + + +# Define a generic type variable for messages +T = TypeVar('T') + + +class IMessageQueue(Protocol, Generic[T]): + """A message queue is a data structure that stores messages. + + #### State machine for each message: + ##### 1. NotInQueue -> The message does not exist in the queue. + ##### 2. Pending -> The message is added to the queue but not yet processed. + ##### 3. Active -> The message is assigned to a worker for processing. + ##### 4. Completed -> The message is processed and moved to the completed + state. + """ + + def push_message(self, message: T) -> ID: + """Pushes a message to the queue. + + Args: + message: The message to push. + """ + ... + + def create_active_batch( + self, worker_id: str, new_batch_size: Optional[int] = None + ) -> Dict[ID, T]: + """Move a batch of messages for a given worker into active list. + + Args: + worker_id: The ID of the worker that will process the messages. + new_batch_size: The size of the batch to process. If not provided, the + default batch size will be used. + + Returns: + A dictionary of messages that were processed, keyed by message ID. + """ + ... + + def complete_message( + self, message_id: ID, worker_id: str, update_fn: Callable[[T], T] = None + ) -> None: + """Completes a message of the given message ID of the given worker it moves it to the completed queue. + + Args: + message_id: The ID of the message to complete. + worker_id: The ID of the worker that completed the message. + update_fn: A function that takes the current message and returns the updated message. + + Note: + The use of `extra_details` is unusual and specific to this + implementation. + It allows additional data to be recorded for a completed message if the + message + is of type `dict`. This can be useful for tracking metadata or other + information + relevant to the message's processing history. + """ + ... + + def get_status(self) -> Dict[str, int]: + """Returns the status of the message queue.""" + ... + + def get_all_messages(self) -> Dict[str, Dict[ID, T]]: + """Returns all messages in the message queue.""" + ... + + def get_pending(self) -> Dict[ID, T]: + """Returns all pending messages in the queue.""" + ... + + def get_active(self) -> Dict[str, Dict[ID, T]]: + """Returns all active messages in the queue.""" + ... + + def get_completed(self) -> Dict[ID, T]: + """Returns all completed messages in the queue.""" + ... + + def find_message_location(self, message_id: ID) -> MessageState: + """Returns the location of the message in the message queue.""" + ... + + def is_message_in_pending(self, message_id: ID) -> bool: + """Checks if the message is in the pending state.""" + ... + + def is_message_in_active(self, message_id: ID) -> bool: + """Checks if the message is in the active state.""" + ... + + def is_message_in_completed(self, message_id: ID) -> bool: + """Checks if the message is in the completed state.""" + ... + +class MessageQueue(IMessageQueue[T]): + """A message queue is a data structure that stores messages. + + ##### State machine for each message: + ##### 1. NotInQueue -> The message does not exist in the queue. + ##### 2. Pending -> The message is added to the queue but not yet processed. + ##### 3. Active -> The message is assigned to a worker for processing. + ##### 4. Completed -> The message is processed and moved to the completed + state. + + + Attributes: + id_generator: The ID generator used to generate unique IDs for messages. + pending: A dictionary of pending messages, keyed by message ID. + active: A dictionary of active messages, keyed by worker ID and message ID. + completed: A dictionary of completed messages, keyed by message ID. + batch_size: The size of the batch to process. (default: 1) + pending_lock: The lock used to synchronize access to the pending messages. + active_lock: The lock used to synchronize access to the active messages. + completed_lock: The lock used to synchronize access to the completed + messages. + """ + + # Locking Procedure: + # ----------------- + # This class uses a Reader-Writer locking mechanism to protect access to + # shared resources (the message queues). + # - The reader-writer locks allow concurrent reads but ensure exclusive access + # for writes, improving efficiency when multiple reads are performed + # simultaneously. + # - There are three distinct locks used: + # 1. `pending_lock` - Protects the `pending` messages dictionary. + # 2. `active_lock` - Protects the `active` messages dictionary. + # 3. `completed_lock` - Protects the `completed` messages dictionary. + # - Each lock is associated with a specific queue state to ensure that + # operations on each state are thread-safe and do not interfere with each + # other. + + def __init__( + self, + id_generator: UUIDStrategy, + batch_size: int = 1, + lock_factory: Callable[[], rwlock.RWLockFairD] = rwlock.RWLockFairD, + ): + self.id_generator = id_generator + self.pending: Dict[ID, T] = {} + self.active: Dict[str, Dict[ID, T]] = {} + self.completed: Dict[ID, T] = {} + + self.batch_size = batch_size + self.pending_lock = lock_factory() + self.active_lock = lock_factory() + self.completed_lock = lock_factory() + + def __str__(self) -> str: + all_messages = self.get_all_messages() + + result = ['MessageQueue:'] + result.append(' Pending Messages:') + for msg_id, message in all_messages['pending'].items(): + result.append(f' ID: {msg_id}, Message: {message}') + + result.append(' Active Messages:') + for msg_id, message in all_messages['active'].items(): + result.append(f' ID: {msg_id}, Message: {message}') + + result.append(' Completed Messages:') + for msg_id, message in all_messages['completed'].items(): + result.append(f' ID: {msg_id}, Message: {message}') + + return '\n'.join(result) + + @overrides + def push_message(self, message: T) -> ID: + """Pushes a message to the queue. + + Args: + message: The message to push. + + Returns: + The unique ID of the pushed message. + """ + unique_id = self.id_generator.generate_id() + with self.pending_lock.gen_wlock(): + self.pending[unique_id] = message + return unique_id + + @overrides + def create_active_batch( + self, worker_id: str, new_batch_size: Optional[int] = None + ) -> Dict[ID, T]: + """Move a batch of messages for a given worker into active list. + + Args: + worker_id: The ID of the worker that will process the messages. + new_batch_size: The size of the batch to process. If not provided, the + default batch size will be used. + + Returns: + A dictionary of messages that were processed, keyed by message ID. + """ + batch_size = ( + new_batch_size if new_batch_size is not None else self.batch_size + ) + batch: Dict[ID, T] = {} + + with self.pending_lock.gen_wlock(): + for _ in range(min(batch_size, len(self.pending))): + message_id = next(iter(self.pending)) + message = self.pending.pop(message_id) + batch[message_id] = message + + with self.active_lock.gen_wlock(): + if worker_id not in self.active: + self.active[worker_id] = {} + self.active[worker_id].update(batch) + + return batch + + @overrides + def complete_message( + self, message_id: ID, worker_id: str, update_fn: Callable[[T], T] = None + ) -> None: + """Completes a message of the given message ID of the given worker it moves it to the completed queue. + + Args: + message_id: The ID of the message to complete. + worker_id: The ID of the worker that completed the message. + update_fn: A function that takes the current message and returns the updated message. + """ + with self.active_lock.gen_wlock(): + if message_id in self.active.get(worker_id, {}): + message = self.active[worker_id][message_id] + del self.active[worker_id][message_id] + + if update_fn is not None: + logging.info('Before update_fn msg: %s', message) + message = update_fn(message) # Apply the lambda to update the message + logging.info('After update_fn msg: %s', message) + + with self.completed_lock.gen_wlock(): + self.completed[message_id] = message + else: + raise ValueError( + f'Failed while completing the msg ,as Message ID {message_id} not found for worker {worker_id}' + ) + + @overrides + def get_status(self) -> Dict[str, int]: + """Returns the status of the message queue.""" + with self.pending_lock.gen_rlock(): + pending_len = len(self.pending) + with self.active_lock.gen_rlock(): + active_len = sum(len(batch) for batch in self.active.values()) + with self.completed_lock.gen_rlock(): + completed_len = len(self.completed) + + return { + 'pending': pending_len, + 'active': active_len, + 'completed': completed_len, + } + + @overrides + def get_all_messages(self) -> Dict[str, Any]: + """Returns all messages in the message queue.""" + with self.pending_lock.gen_rlock(): + pending_copy = copy.deepcopy(self.pending) + with self.active_lock.gen_rlock(): + active_copy = copy.deepcopy(self.active) + with self.completed_lock.gen_rlock(): + completed_copy = copy.deepcopy(self.completed) + + return { + 'pending': pending_copy, + 'active': active_copy, + 'completed': completed_copy, + } + + @overrides + def get_pending(self) -> Dict[ID, T]: + """Returns all pending messages in the queue.""" + with self.pending_lock.gen_rlock(): + return copy.deepcopy(self.pending) + + @overrides + def get_active(self) -> Dict[str, Dict[ID, T]]: + """Returns all active messages in the queue.""" + with self.active_lock.gen_rlock(): + return copy.deepcopy(self.active) + + @overrides + def get_completed(self) -> Dict[ID, T]: + """Returns all completed messages in the queue.""" + with self.completed_lock.gen_rlock(): + return copy.deepcopy(self.completed) + + + @overrides + def is_message_in_pending(self,message_id: ID) -> bool: + """Returns the true if the message in the pending queue.""" + with self.pending_lock.gen_rlock(): + return message_id in self.pending + + @overrides + def is_message_in_active(self,message_id: ID) -> bool: + """Returns the true if the message in the active queue.""" + with self.active_lock.gen_rlock(): + for _, messages in self.active.items(): + return message_id in messages + + @overrides + def is_message_in_completed(self,message_id: ID) -> bool: + """Returns the true if the message in the completed queue.""" + with self.completed_lock.gen_rlock(): + return message_id in self.completed + + @overrides + def find_message_location(self, message_id: ID) -> MessageState: + """Returns the location of the message in the message queue.""" + with self.pending_lock.gen_rlock(): + if message_id in self.pending: + return MessageState.PENDING + + with self.active_lock.gen_rlock(): + for _, messages in self.active.items(): + if message_id in messages: + return MessageState.ACTIVE + + with self.completed_lock.gen_rlock(): + if message_id in self.completed: + return MessageState.COMPLETED + + return MessageState.NOT_FOUND diff --git a/sight_service/single_action_optimizer.py b/sight_service/single_action_optimizer.py index 550723d..8097646 100644 --- a/sight_service/single_action_optimizer.py +++ b/sight_service/single_action_optimizer.py @@ -14,16 +14,58 @@ """An instance of a Sight optimizer dedicated to a single experiment.""" from concurrent import futures +import dataclasses from typing import Any, Dict, List, Sequence, Tuple from helpers.logs.logs_handler import logger as logging from sight.proto import sight_pb2 +from sight_service.message_queue import IMessageQueue +from sight_service.message_queue import IncrementalUUID +from sight_service.message_queue import MessageQueue from sight_service.optimizer_instance import OptimizerInstance from sight_service.proto import service_pb2 _file_name = "single_action_optimizer.py" +@dataclasses.dataclass() +class MessageDetails: + """Message details for a single message. + + Attributes: + reward: The reward for the action. + outcome: The outcome of the action. + action: The action taken. + attributes: The attributes of the action. + """ + + action: Dict[str, str] + attributes: Dict[str, str] + reward: float + outcome: Dict[str, str] + + @classmethod + def create(cls, action, attributes, reward=None, outcome=None): + return cls(action, attributes, reward, outcome) + + def update(self, reward=None, outcome=None, action=None, attributes=None): + if reward is not None: + self.reward = reward + if outcome is not None: + self.outcome = outcome + if action is not None: + self.action = action + if attributes is not None: + self.attributes = attributes + return self + + def __str__(self): + return (f"MessageDetails(\n" + f"action: {self.action},\n" + f"attributes: {self.attributes},\n" + f"reward: {self.reward},\n" + f"outcome: {self.outcome}\n)") + class SingleActionOptimizer(OptimizerInstance): """An SingleActionOptimizer class that is generic for all optimizers. @@ -33,7 +75,4 @@ class SingleActionOptimizer(OptimizerInstance): def __init__(self): super().__init__() - self.unique_id = 1 - self.pending_samples = {} - self.active_samples = {} - self.completed_samples = {} + self.queue: IMessageQueue = MessageQueue[MessageDetails](id_generator=IncrementalUUID()) diff --git a/sight_service/tests/colorful_tests.py b/sight_service/tests/colorful_tests.py new file mode 100644 index 0000000..4870e47 --- /dev/null +++ b/sight_service/tests/colorful_tests.py @@ -0,0 +1,27 @@ +import unittest + +from colorama import Fore +from colorama import init +from colorama import Style + +# Initialize colorama +init(autoreset=True) + + +class ColorfulTestResult(unittest.TextTestResult): + + def addSuccess(self, test): + super().addSuccess(test) + self.stream.write('\n' + Fore.GREEN + 'PASS' + Style.RESET_ALL + '\n') + + def addFailure(self, test, err): + super().addFailure(test, err) + self.stream.write('\n' + Fore.RED + 'FAIL' + Style.RESET_ALL + '\n') + + def addError(self, test, err): + super().addError(test, err) + self.stream.write('\n' + Fore.YELLOW + 'ERROR' + Style.RESET_ALL + '\n') + + +class ColorfulTestRunner(unittest.TextTestRunner): + resultclass = ColorfulTestResult diff --git a/sight_service/tests/functional/test_message_queue.py b/sight_service/tests/functional/test_message_queue.py new file mode 100644 index 0000000..34cf62c --- /dev/null +++ b/sight_service/tests/functional/test_message_queue.py @@ -0,0 +1,193 @@ +"""Tests for the MessageQueue class.""" + +import unittest + +import sight_service.message_queue as mq +from sight_service.tests import colorful_tests + + +class TestMessageQueue(unittest.TestCase): + """Tests for MessageQueue class. + + Attributes: + incremental_id_generator: IncrementalUUID() + queue: MessageQueue[int] + """ + + def setUp(self): + """Set up the MessageQueue and IncrementalUUID for testing.""" + super().setUp() + # Use IncrementalUUID for most tests to have predictable IDs + self.incremental_id_generator = mq.IncrementalUUID() + self.queue = mq.MessageQueue[int]( + id_generator=self.incremental_id_generator, batch_size=2 + ) + + def test_add_message_with_incremental_id(self): + message_id = self.queue.push_message(100) + self.assertEqual(message_id, 1) + status = self.queue.get_status() + self.assertEqual(status['pending'], 1) + self.assertEqual(status['active'], 0) + self.assertEqual(status['completed'], 0) + + def test_process_messages(self): + """Test process_messages() with a batch size of 2 and 3 messages in the queue.""" + self.queue.push_message(100) + self.queue.push_message(200) + self.queue.push_message(300) + + batch = self.queue.create_active_batch(worker_id='worker1') + self.assertEqual(len(batch), 2) + self.assertIn(1, batch) + self.assertIn(2, batch) + + status = self.queue.get_status() + self.assertEqual(status['pending'], 1) + self.assertEqual(status['active'], 2) + self.assertEqual(status['completed'], 0) + + def test_complete_message(self): + """Test complete_message() with a batch size of 2 and 2 messages in the queue.""" + self.queue.push_message(100) + self.queue.push_message(200) + + batch = self.queue.create_active_batch(worker_id='worker1') + self.assertEqual(len(batch), 2) + self.queue.complete_message(1, 'worker1') + + status = self.queue.get_status() + self.assertEqual(status['pending'], 0) + self.assertEqual(status['active'], 1) + self.assertEqual(status['completed'], 1) + self.assertIn(1, self.queue.get_completed()) + + def test_complete_message_with_lambda_update(self): + self.queue.push_message(500) + batch = self.queue.create_active_batch(worker_id='worker2') + self.assertEqual(len(batch), 1) + + # Apply a lambda function that doubles the message value + update_fn = lambda msg: msg * 2 + self.queue.complete_message(1, 'worker2', update_fn) + + # Verify that the message was updated using the lambda function + completed_msg = self.queue.get_completed()[1] + self.assertEqual(completed_msg, 1000) # 500 * 2 + + def test_get_pending_messages(self): + self.queue.push_message(100) + self.queue.push_message(200) + pending_messages = self.queue.get_pending() + self.assertEqual(len(pending_messages), 2) + + def test_get_active_messages(self): + self.queue.push_message(100) + self.queue.push_message(200) + self.queue.create_active_batch(worker_id='worker1') + active_messages = self.queue.get_active() + self.assertIn('worker1', active_messages) + self.assertEqual(len(active_messages['worker1']), 2) + + def test_get_completed_messages(self): + self.queue.push_message(100) + self.queue.create_active_batch(worker_id='worker1') + self.queue.complete_message(1, 'worker1') + completed_messages = self.queue.get_completed() + self.assertEqual(len(completed_messages), 1) + + def test_process_and_complete_message(self): + """Test process_messages() and complete_message() with a batch size of 1.""" + self.queue.push_message(100) + self.queue.push_message(200) + self.queue.push_message(300) + + batch = self.queue.create_active_batch( + worker_id='worker1', new_batch_size=1 + ) + self.assertEqual(len(batch), 1) + self.assertIn(1, batch) + + self.queue.complete_message(1, 'worker1') + status = self.queue.get_status() + self.assertEqual(status['pending'], 2) + self.assertEqual(status['active'], 0) + self.assertEqual(status['completed'], 1) + + def test_complete_message_not_found(self): + self.queue.push_message(100) + self.queue.push_message(200) + + batch = self.queue.create_active_batch(worker_id='worker1') + self.assertEqual(len(batch), 2) + with self.assertRaises(ValueError): + self.queue.complete_message(999, 'worker1') + + def test_empty_process_messages(self): + batch = self.queue.create_active_batch(worker_id='worker1') + self.assertEqual(len(batch), 0) + status = self.queue.get_status() + self.assertEqual(status['pending'], 0) + self.assertEqual(status['active'], 0) + self.assertEqual(status['completed'], 0) + + def test_find_message_location(self): + self.queue.push_message(100) + self.queue.push_message(200) + self.queue.create_active_batch(worker_id='worker1') + location = self.queue.find_message_location(1) + self.assertEqual(location, mq.MessageState.ACTIVE) + + def test_get_all_messages(self): + """Test get_all_messages() with 2 pending messages and 1 completed message.""" + + self.queue.push_message(100) + self.queue.push_message(200) + + all_messages = self.queue.get_all_messages() + self.assertEqual(len(all_messages['pending']), 2) + self.assertEqual( + len(all_messages['active']), 0 + ) # No messages should be in active yet + self.assertEqual(len(all_messages['completed']), 0) + + # Process the messages, which should move them + # to 'active' under a specific worker_id + self.queue.create_active_batch(worker_id='worker1') + all_messages = self.queue.get_all_messages() + + # After processing, 'pending' should be empty, 'active' + # should have 2 messages under 'worker1' + self.assertEqual(len(all_messages['pending']), 0) + self.assertIn('worker1', all_messages['active']) + self.assertEqual(len(all_messages['active']['worker1']), 2) + self.assertEqual(len(all_messages['completed']), 0) + + self.queue.complete_message(1, 'worker1') + all_messages = self.queue.get_all_messages() + + # 'pending' should still be empty, 'active' should have + # 1 message under 'worker1', and 'completed' should have 1 message + self.assertEqual(len(all_messages['pending']), 0) + self.assertIn('worker1', all_messages['active']) + self.assertEqual(len(all_messages['active']['worker1']), 1) + self.assertEqual(len(all_messages['completed']), 1) + + def test_add_message_with_uuid(self): + """Test add_message() with a UUID ID generator.""" + uuid_id_generator = mq.RandomUUID() + queue_with_uuid = mq.MessageQueue[str]( + id_generator=uuid_id_generator, batch_size=2 + ) + + message_id1 = queue_with_uuid.push_message('Task A') + message_id2 = queue_with_uuid.push_message('Task B') + + # Check that the UUIDs are unique and have been assigned correctly + self.assertNotEqual(message_id1, message_id2) + self.assertIn(message_id1, queue_with_uuid.get_pending()) + self.assertIn(message_id2, queue_with_uuid.get_pending()) + + +if __name__ == '__main__': + unittest.main(testRunner=colorful_tests.ColorfulTestRunner()) diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index 5955047..96ead12 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -24,6 +24,7 @@ from sight_service.optimizer_instance import param_dict_to_proto from sight_service.optimizer_instance import param_proto_to_dict from sight_service.proto import service_pb2 +from sight_service.single_action_optimizer import MessageDetails from sight_service.single_action_optimizer import SingleActionOptimizer _file_name = "exhaustive_search.py" @@ -44,9 +45,16 @@ def __init__(self): self.exp_completed = False self.possible_values = {} self.max_reward_sample = {} - self.pending_lock = rwlock.RWLockFair() - self.active_lock = rwlock.RWLockFair() - self.completed_lock = rwlock.RWLockFair() + + + def add_outcome_to_outcome_response(self,msg_details : MessageDetails, sample_id, outcome: service_pb2.GetOutcomeResponse.outcome): + outcome.action_id = sample_id + outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED + outcome.reward = msg_details.reward + outcome.action_attrs.extend(param_dict_to_proto(msg_details.action)) + outcome.outcome_attrs.extend(param_dict_to_proto(msg_details.outcome)) + outcome.attributes.extend(param_dict_to_proto(msg_details.attributes)) + @overrides def launch(self, @@ -61,114 +69,85 @@ def launch(self, @overrides def propose_action( self, request: service_pb2.ProposeActionRequest - ) -> service_pb2.ProposeActionResponse: + ) -> service_pb2.ProposeActionResponse: # print('request in propose actions: ', request) attributes = param_proto_to_dict(request.attributes) action_attrs = param_proto_to_dict(request.action_attrs) - with self.pending_lock.gen_wlock(): - self.pending_samples[self.unique_id] = [action_attrs, attributes] - - # print('self.pending_samples : ', - # self.pending_samples) - # print('self.active_samples : ', - # self.active_samples) - # print('self.completed_samples : ', - # self.completed_samples) - print('self.unique_id : ', self.unique_id) + message = MessageDetails.create(action=action_attrs,attributes=attributes) # Create response response = service_pb2.ProposeActionResponse(action_id=self.unique_id) self.unique_id += 1 return response + unique_id = self.queue.push_message(message) + + logging.info("self.queue => %s", self.queue) + + response = service_pb2.ProposeActionResponse(action_id=unique_id) + return response + @overrides def GetOutcome( self, request: service_pb2.GetOutcomeRequest) -> service_pb2.GetOutcomeResponse: - # print('self.pending_samples : ', - # self.pending_samples) - # print('self.active_samples : ', - # self.active_samples) - # print('self.completed_samples : ', - # self.completed_samples) - with self.completed_lock.gen_rlock(): - completed_samples = self.completed_samples - with self.pending_lock.gen_rlock(): - pending_samples = self.pending_samples - with self.active_lock.gen_rlock(): - active_samples = self.active_samples + + logging.info('self.queue => %s', self.queue) + + all_completed_messages = self.queue.get_completed() response = service_pb2.GetOutcomeResponse() - if (request.unique_ids): + if not request.unique_ids: + for sample_id in all_completed_messages: + outcome = response.outcome.add() + given_msg_details = all_completed_messages[sample_id] + self.add_outcome_to_outcome_response(msg_details=given_msg_details,sample_id=sample_id,outcome=response) + else: required_samples = list(request.unique_ids) for sample_id in required_samples: outcome = response.outcome.add() outcome.action_id = sample_id - if (sample_id in completed_samples): - sample_details = self.completed_samples[sample_id] - outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED - outcome.reward = sample_details['reward'] - outcome.action_attrs.extend( - param_dict_to_proto(sample_details['action'])) - outcome.outcome_attrs.extend( - param_dict_to_proto(sample_details['outcome'])) - outcome.attributes.extend( - param_dict_to_proto(sample_details['attribute'])) - elif (sample_id in pending_samples): + if sample_id in all_completed_messages: + given_msg_details = all_completed_messages[sample_id] + self.add_outcome_to_outcome_response(msg_details=given_msg_details,sample_id=sample_id,outcome=outcome) + elif self.queue.is_message_in_pending(sample_id): outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.PENDING outcome.response_str = '!! requested sample not yet assigned to any worker !!' - elif any(value['id'] == sample_id for value in active_samples.values()): + elif self.queue.is_message_in_active(sample_id): outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.ACTIVE outcome.response_str = '!! requested sample not completed yet !!' else: outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.NOT_EXIST outcome.response_str = f'!! requested sample Id {sample_id} does not exist !!' - - print("!! NOT EXIST !!") - with self.active_lock.gen_rlock(): - print(self.active_samples) - with self.pending_lock.gen_rlock(): - print(self.pending_samples) - with self.completed_lock.gen_rlock(): - print(self.completed_samples) - else: - for sample_id in completed_samples.keys(): - sample_details = completed_samples[sample_id] - outcome = response.outcome.add() - outcome.action_id = sample_id - outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED - outcome.reward = sample_details['reward'] - - outcome.action_attrs.extend( - param_dict_to_proto(sample_details['action'])) - - outcome.outcome_attrs.extend( - param_dict_to_proto(sample_details['outcome'])) - - outcome.attributes.extend( - param_dict_to_proto(sample_details['attribute'])) - - # print('response here: ', response) return response @overrides def decision_point( self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: + ) -> service_pb2.DecisionPointResponse: method_name = "decision_point" logging.debug(">>>> In %s of %s", method_name, _file_name) + logging.info('self.queue ==> %s', self.queue) - # print('self.pending_samples : ', - # self.pending_samples) - # print('self.active_samples : ', - # self.active_samples) - # print('self.completed_samples : ', - # self.completed_samples) - # print('self.unique_id : ', self.unique_id) + all_active_messages = self.queue.get_active() - dp_response = service_pb2.DecisionPointResponse() + response = service_pb2.DecisionPointResponse() + if request.worker_id in all_active_messages: + samples = all_active_messages[request.worker_id] + else: + raise ValueError("Key not found in active_samples") + next_action = list(samples.values())[0].action + logging.info('next_action=%s', next_action) + response.action.extend(param_dict_to_proto(next_action)) + response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return response + + # --- end + + # dp_response = service_pb2.DecisionPointResponse() # if(self.exp_completed): # logging.info("sight experiment completed, killing the worker") # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_DONE @@ -187,63 +166,47 @@ def decision_point( # with self.active_lock.gen_wlock(): # self.active_samples[request.worker_id] = {'id': key, 'sample': sample} - with self.active_lock.gen_rlock(): - if (request.worker_id in self.active_samples): - sample = self.active_samples[request.worker_id]['sample'] - else: - raise ValueError("key not foung in active_samples") - next_action = sample[0] - logging.info('next_action=%s', next_action) - # raise SystemExit - dp_response.action.extend(param_dict_to_proto(next_action)) - # print('self.active_samples : ', self.active_samples) - # print('self.pending_samples : ', self.pending_samples) - # print('self.completed_samples : ', self.completed_samples) - dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - # else: - # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_RETRY - - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return dp_response + # with self.active_lock.gen_rlock(): + # if (request.worker_id in self.active_samples): + # sample = self.active_samples[request.worker_id]['sample'] + # else: + # raise ValueError("key not foung in active_samples") + # next_action = sample[0] + # logging.info('next_action=%s', next_action) + # # raise SystemExit + # dp_response.action.extend(param_dict_to_proto(next_action)) + # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT + # logging.debug("<<<< Out %s of %s", method_name, _file_name) + # return dp_response @overrides def finalize_episode( self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: + ) -> service_pb2.FinalizeEpisodeResponse: method_name = "finalize_episode" logging.debug(">>>> In %s of %s", method_name, _file_name) - # logging.info("req in finalize episode of dummy.py : %s", request) - - with self.active_lock.gen_rlock(): - sample_dict = self.active_samples[request.worker_id] - - with self.completed_lock.gen_wlock(): - self.completed_samples[sample_dict['id']] = { - # 'action': self.pending_samples[unique_action_id], - 'action': - param_proto_to_dict(request.decision_point.choice_params), - 'attribute': - sample_dict['sample'][1], - 'reward': - request.decision_outcome.reward, - 'outcome': - param_proto_to_dict(request.decision_outcome.outcome_params) - } - - with self.active_lock.gen_wlock(): - del self.active_samples[request.worker_id] - - # print('self.active_samples : ', self.active_samples) - # print('self.pending_samples : ', self.pending_samples) - # print('self.completed_samples : ', self.completed_samples) + logging.info("self.queue => %s", self.queue) + + all_active_messages = self.queue.get_active() + + active_messages : Dict[str,MessageDetails] = all_active_messages[request.worker_id] + + for action_id, message in active_messages.items(): + self.queue.complete_message( + message_id=action_id, + worker_id=request.worker_id, + update_fn = lambda msg: msg.update(reward = request.decision_outcome.reward, outcome = param_proto_to_dict(request.decision_outcome.outcome_params), action = param_proto_to_dict(request.decision_point.choice_params)) + ) + logging.info("self.queue => %s", self.queue) + logging.debug("<<<< Out %s of %s", method_name, _file_name) return service_pb2.FinalizeEpisodeResponse(response_str='Success!') @overrides def current_status( self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: + ) -> service_pb2.CurrentStatusResponse: method_name = "current_status" logging.debug(">>>> In %s of %s", method_name, _file_name) # add logic to check status - ref from exhaustive search @@ -251,7 +214,7 @@ def current_status( @overrides def fetch_optimal_action( self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: + ) -> service_pb2.FetchOptimalActionResponse: method_name = "fetch_optimal_action" logging.debug(">>>> In %s of %s", method_name, _file_name) # add logic to check status - ref from exhaustive search @@ -271,24 +234,22 @@ def close(self, @overrides def WorkerAlive( self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: + ) -> service_pb2.WorkerAliveResponse: method_name = "WorkerAlive" logging.debug(">>>> In %s of %s", method_name, _file_name) + + logging.info("self.queue => %s", self.queue) + if (self.exp_completed): worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - elif (not self.pending_samples): + elif (not self.queue.get_status()["pending"]): worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY else: worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - # put sample in active sample list?? - with self.pending_lock.gen_wlock(): - key = next(iter(self.pending_samples)) - sample = self.pending_samples.pop(key) - with self.active_lock.gen_wlock(): - self.active_samples[request.worker_id] = {'id': key, 'sample': sample} - print("self.active_samples : ", self.active_samples) + self.queue.create_active_batch(worker_id=request.worker_id) + logging.info("self.queue => %s", self.queue) logging.info("worker_alive_status is %s", worker_alive_status) logging.debug("<<<< Out %s of %s", method_name, _file_name) return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) From f0cedf6077b2d8d115bde80f81de2724226391da Mon Sep 17 00:00:00 2001 From: hrushikeshm-g Date: Fri, 18 Oct 2024 05:09:42 +0000 Subject: [PATCH 13/25] merge-issue resovled --- sight_service/single_action_optimizer.py | 11 ++++++----- sight_service/worklist_scheduler_opt.py | 5 ----- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/sight_service/single_action_optimizer.py b/sight_service/single_action_optimizer.py index 8097646..e356771 100644 --- a/sight_service/single_action_optimizer.py +++ b/sight_service/single_action_optimizer.py @@ -60,11 +60,12 @@ def update(self, reward=None, outcome=None, action=None, attributes=None): return self def __str__(self): - return (f"MessageDetails(\n" - f"action: {self.action},\n" - f"attributes: {self.attributes},\n" - f"reward: {self.reward},\n" - f"outcome: {self.outcome}\n)") + return (f"[X]") + # (f"MessageDetails(\n" + # f"action: {self.action},\n" + # f"attributes: {self.attributes},\n" + # f"reward: {self.reward},\n" + # f"outcome: {self.outcome}\n)") class SingleActionOptimizer(OptimizerInstance): """An SingleActionOptimizer class that is generic for all optimizers. diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index 96ead12..c3826ef 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -77,11 +77,6 @@ def propose_action( message = MessageDetails.create(action=action_attrs,attributes=attributes) - # Create response - response = service_pb2.ProposeActionResponse(action_id=self.unique_id) - self.unique_id += 1 - return response - unique_id = self.queue.push_message(message) logging.info("self.queue => %s", self.queue) From 10f85ae903cb1db0270901afcdf036d2073cbf79 Mon Sep 17 00:00:00 2001 From: hrushikeshm-g Date: Fri, 18 Oct 2024 08:14:16 +0000 Subject: [PATCH 14/25] deep copy replace with shallow one --- sight_service/message_queue.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sight_service/message_queue.py b/sight_service/message_queue.py index df8c187..a64ea02 100644 --- a/sight_service/message_queue.py +++ b/sight_service/message_queue.py @@ -331,11 +331,11 @@ def get_status(self) -> Dict[str, int]: def get_all_messages(self) -> Dict[str, Any]: """Returns all messages in the message queue.""" with self.pending_lock.gen_rlock(): - pending_copy = copy.deepcopy(self.pending) + pending_copy = copy.copy(self.pending) with self.active_lock.gen_rlock(): - active_copy = copy.deepcopy(self.active) + active_copy = copy.copy(self.active) with self.completed_lock.gen_rlock(): - completed_copy = copy.deepcopy(self.completed) + completed_copy = copy.copy(self.completed) return { 'pending': pending_copy, @@ -347,19 +347,19 @@ def get_all_messages(self) -> Dict[str, Any]: def get_pending(self) -> Dict[ID, T]: """Returns all pending messages in the queue.""" with self.pending_lock.gen_rlock(): - return copy.deepcopy(self.pending) + return copy.copy(self.pending) @overrides def get_active(self) -> Dict[str, Dict[ID, T]]: """Returns all active messages in the queue.""" with self.active_lock.gen_rlock(): - return copy.deepcopy(self.active) + return copy.copy(self.active) @overrides def get_completed(self) -> Dict[ID, T]: """Returns all completed messages in the queue.""" with self.completed_lock.gen_rlock(): - return copy.deepcopy(self.completed) + return copy.copy(self.completed) @overrides From 04990f268fb8a58e6a337aa8f212ad984e706bb8 Mon Sep 17 00:00:00 2001 From: hrushikeshm-g Date: Fri, 18 Oct 2024 09:18:00 +0000 Subject: [PATCH 15/25] updated the shallow copy --- sight_service/message_queue.py | 8 -------- sight_service/worklist_scheduler_opt.py | 2 +- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/sight_service/message_queue.py b/sight_service/message_queue.py index a64ea02..6ed9397 100644 --- a/sight_service/message_queue.py +++ b/sight_service/message_queue.py @@ -117,14 +117,6 @@ def complete_message( worker_id: The ID of the worker that completed the message. update_fn: A function that takes the current message and returns the updated message. - Note: - The use of `extra_details` is unusual and specific to this - implementation. - It allows additional data to be recorded for a completed message if the - message - is of type `dict`. This can be useful for tracking metadata or other - information - relevant to the message's processing history. """ ... diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index c3826ef..a1a3c25 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -187,7 +187,7 @@ def finalize_episode( active_messages : Dict[str,MessageDetails] = all_active_messages[request.worker_id] - for action_id, message in active_messages.items(): + for action_id, message in list(active_messages.items()): self.queue.complete_message( message_id=action_id, worker_id=request.worker_id, From f87e24417a7173e29134d0ebf9902e79ca4d771d Mon Sep 17 00:00:00 2001 From: hrushikeshm-g Date: Fri, 18 Oct 2024 11:50:14 +0000 Subject: [PATCH 16/25] yapf pre-commit flag was missing --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cee2895..683840e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: rev: v0.32.0 hooks: - id: yapf - args: ["--style", ".config/.style.yapf"] + args: ["-ir", "--style", ".config/.style.yapf"] - repo: https://github.com/pre-commit/mirrors-isort rev: v5.10.1 From 7af3345514b7c00ed24b9baacff47660bd15b07f Mon Sep 17 00:00:00 2001 From: hrushikeshm-g Date: Wed, 23 Oct 2024 07:13:36 +0000 Subject: [PATCH 17/25] yapf and isort were conflicting --- .config/.style.yapf | 3 +++ .pre-commit-config.yaml | 27 ++++++++++++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/.config/.style.yapf b/.config/.style.yapf index a72817d..d12d364 100644 --- a/.config/.style.yapf +++ b/.config/.style.yapf @@ -2,3 +2,6 @@ based_on_style = google indent_width = 2 column_limit = 80 +split_before_logical_operator = False +blank_lines_between_top_level_imports_and_variables = 1 +dedent_closing_brackets = True diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 683840e..0cf0e06 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,17 +10,26 @@ repos: - id: check-yaml - - repo: https://github.com/pre-commit/mirrors-yapf - rev: v0.32.0 - hooks: - - id: yapf - args: ["-ir", "--style", ".config/.style.yapf"] + # - repo: https://github.com/google/yapf + # rev: v0.31.0 + # hooks: + # - id: yapf + # args: ["-ir", "--style", ".config/.style.yapf"] + + # - repo: https://github.com/pre-commit/mirrors-isort + # rev: v5.10.1 + # hooks: + # - id: isort + # args: ["--settings-path", ".config/.isort.cfg"] - - repo: https://github.com/pre-commit/mirrors-isort - rev: v5.10.1 + - repo: https://github.com/google/yapf + rev: v0.31.0 hooks: - - id: isort - args: ["--settings-path", ".config/.isort.cfg"] + - id: yapf + name: yapf and isort + entry: bash -c "yapf --style .config/.style.yapf -i $@ && isort --settings-path .config/.isort.cfg $@" + language: system + types: [python] - repo: local hooks: From 0de45b88c22c9f3941715545491feecccc1b4574 Mon Sep 17 00:00:00 2001 From: Greg Bronevetsky Date: Thu, 31 Oct 2024 14:48:53 +0000 Subject: [PATCH 18/25] Adding ability to track elapsed block time --- py/avrofile-schema.avsc | 5 + py/sight/proto/sight.proto | 1 + py/sight/proto/sight_pb2.py | 181 +++++++++++++------------- py/sight/sight.py | 19 +++ py/sight/widgets/decision/decision.py | 5 +- 5 files changed, 116 insertions(+), 95 deletions(-) diff --git a/py/avrofile-schema.avsc b/py/avrofile-schema.avsc index 3fdae87..ee32e62 100644 --- a/py/avrofile-schema.avsc +++ b/py/avrofile-schema.avsc @@ -734,6 +734,11 @@ "name": "elapsed_time_ns", "type": ["long", "null"], "doc": "" + }, + { + "name": "exclusive_elapsed_time_ns", + "type": ["long", "null"], + "doc": "" } ] }], diff --git a/py/sight/proto/sight.proto b/py/sight/proto/sight.proto index d68c4fd..e32b2e6 100644 --- a/py/sight/proto/sight.proto +++ b/py/sight/proto/sight.proto @@ -402,6 +402,7 @@ message BlockEnd { // at or near the time of the logged event. message Metrics { int64 elapsed_time_ns = 1; + int64 exclusive_elapsed_time_ns = 2; } Metrics metrics = 12; } diff --git a/py/sight/proto/sight_pb2.py b/py/sight/proto/sight_pb2.py index 98b30a4..123dd8f 100644 --- a/py/sight/proto/sight_pb2.py +++ b/py/sight/proto/sight_pb2.py @@ -7,19 +7,16 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder - # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() + from sight.proto import example_pb2 as sight_dot_proto_dot_example__pb2 -from sight.proto.widgets.pipeline.flume import ( - flume_pb2 as sight_dot_proto_dot_widgets_dot_pipeline_dot_flume_dot_flume__pb2 -) +from sight.proto.widgets.pipeline.flume import flume_pb2 as sight_dot_proto_dot_widgets_dot_pipeline_dot_flume_dot_flume__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xeb\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\x91\x04\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\xe7\x08\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\"\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\"\xfb\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"A\n\rDecisionParam\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xeb\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\x91\x04\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\x8a\t\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\x45\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\x12!\n\x19\x65xclusive_elapsed_time_ns\x18\x02 \x01(\x03\"\xfb\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"A\n\rDecisionParam\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3') @@ -562,91 +559,91 @@ _BLOCKSTART_SUBTYPE._serialized_start=4726 _BLOCKSTART_SUBTYPE._serialized_end=5255 _BLOCKEND._serialized_start=5278 - _BLOCKEND._serialized_end=6405 + _BLOCKEND._serialized_end=6440 _BLOCKEND_METRICS._serialized_start=5841 - _BLOCKEND_METRICS._serialized_end=5875 - _BLOCKEND_SUBTYPE._serialized_start=5878 - _BLOCKEND_SUBTYPE._serialized_end=6385 - _LISTSTART._serialized_start=6408 - _LISTSTART._serialized_end=6583 - _LISTSTART_SUBTYPE._serialized_start=6473 - _LISTSTART_SUBTYPE._serialized_end=6583 - _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_start=6585 - _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_end=6659 - _ATTRIBUTESTART._serialized_start=6661 - _ATTRIBUTESTART._serialized_end=6722 - _ATTRIBUTEEND._serialized_start=6724 - _ATTRIBUTEEND._serialized_end=6751 - _PARAMS._serialized_start=6754 - _PARAMS._serialized_end=7188 - _SIMULATIONSTART._serialized_start=7190 - _SIMULATIONSTART._serialized_end=7207 - _SIMULATIONPARAMETERSSTART._serialized_start=7209 - _SIMULATIONPARAMETERSSTART._serialized_end=7236 - _SIMULATIONTIMESTEPSTART._serialized_start=7239 - _SIMULATIONTIMESTEPSTART._serialized_end=7551 - _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_start=7414 - _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_end=7551 - _CONTINUOUSPROBDIST._serialized_start=7554 - _CONTINUOUSPROBDIST._serialized_end=7794 - _CONTINUOUSPROBDIST_GAUSSIAN._serialized_start=7702 - _CONTINUOUSPROBDIST_GAUSSIAN._serialized_end=7741 - _CONTINUOUSPROBDIST_UNIFORM._serialized_start=7743 - _CONTINUOUSPROBDIST_UNIFORM._serialized_end=7786 - _DISCRETEPROBDIST._serialized_start=7797 - _DISCRETEPROBDIST._serialized_end=7928 - _DISCRETEPROBDIST_UNIFORM._serialized_start=7877 - _DISCRETEPROBDIST_UNIFORM._serialized_end=7920 - _DECISIONCONFIGURATIONSTART._serialized_start=7931 - _DECISIONCONFIGURATIONSTART._serialized_end=11553 - _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_start=8392 - _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_end=8406 - _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_start=8409 - _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_end=8650 - _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_start=8508 - _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_end=8650 - _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_start=8652 - _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_end=8705 - _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_start=8707 - _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_end=8731 - _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_start=8734 - _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_end=9097 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_start=8928 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_end=9015 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_start=9017 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_end=9097 - _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_start=9099 - _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_end=9118 - _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_start=9120 - _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_end=9147 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_start=9150 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_end=9548 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_start=9267 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_end=9548 - _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_start=9550 - _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_end=9563 - _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_start=9565 - _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_end=9590 - _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_start=9593 - _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_end=10533 - _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_start=10535 - _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_end=10642 - _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_start=10645 - _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_end=10914 - _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_start=10916 - _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_end=11018 - _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_start=11020 - _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_end=11123 - _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_start=11125 - _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_end=11229 - _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_start=11232 - _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_end=11466 - _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_start=11468 - _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_end=11553 - _DECISIONPARAM._serialized_start=11555 - _DECISIONPARAM._serialized_end=11620 - _DECISIONPOINT._serialized_start=11623 - _DECISIONPOINT._serialized_end=11788 - _DECISIONOUTCOME._serialized_start=11791 - _DECISIONOUTCOME._serialized_end=11919 + _BLOCKEND_METRICS._serialized_end=5910 + _BLOCKEND_SUBTYPE._serialized_start=5913 + _BLOCKEND_SUBTYPE._serialized_end=6420 + _LISTSTART._serialized_start=6443 + _LISTSTART._serialized_end=6618 + _LISTSTART_SUBTYPE._serialized_start=6508 + _LISTSTART_SUBTYPE._serialized_end=6618 + _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_start=6620 + _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_end=6694 + _ATTRIBUTESTART._serialized_start=6696 + _ATTRIBUTESTART._serialized_end=6757 + _ATTRIBUTEEND._serialized_start=6759 + _ATTRIBUTEEND._serialized_end=6786 + _PARAMS._serialized_start=6789 + _PARAMS._serialized_end=7223 + _SIMULATIONSTART._serialized_start=7225 + _SIMULATIONSTART._serialized_end=7242 + _SIMULATIONPARAMETERSSTART._serialized_start=7244 + _SIMULATIONPARAMETERSSTART._serialized_end=7271 + _SIMULATIONTIMESTEPSTART._serialized_start=7274 + _SIMULATIONTIMESTEPSTART._serialized_end=7586 + _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_start=7449 + _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_end=7586 + _CONTINUOUSPROBDIST._serialized_start=7589 + _CONTINUOUSPROBDIST._serialized_end=7829 + _CONTINUOUSPROBDIST_GAUSSIAN._serialized_start=7737 + _CONTINUOUSPROBDIST_GAUSSIAN._serialized_end=7776 + _CONTINUOUSPROBDIST_UNIFORM._serialized_start=7778 + _CONTINUOUSPROBDIST_UNIFORM._serialized_end=7821 + _DISCRETEPROBDIST._serialized_start=7832 + _DISCRETEPROBDIST._serialized_end=7963 + _DISCRETEPROBDIST_UNIFORM._serialized_start=7912 + _DISCRETEPROBDIST_UNIFORM._serialized_end=7955 + _DECISIONCONFIGURATIONSTART._serialized_start=7966 + _DECISIONCONFIGURATIONSTART._serialized_end=11588 + _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_start=8427 + _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_end=8441 + _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_start=8444 + _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_end=8685 + _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_start=8543 + _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_end=8685 + _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_start=8687 + _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_end=8740 + _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_start=8742 + _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_end=8766 + _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_start=8769 + _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_end=9132 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_start=8963 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_end=9050 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_start=9052 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_end=9132 + _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_start=9134 + _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_end=9153 + _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_start=9155 + _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_end=9182 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_start=9185 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_end=9583 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_start=9302 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_end=9583 + _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_start=9585 + _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_end=9598 + _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_start=9600 + _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_end=9625 + _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_start=9628 + _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_end=10568 + _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_start=10570 + _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_end=10677 + _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_start=10680 + _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_end=10949 + _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_start=10951 + _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_end=11053 + _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_start=11055 + _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_end=11158 + _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_start=11160 + _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_end=11264 + _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_start=11267 + _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_end=11501 + _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_start=11503 + _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_end=11588 + _DECISIONPARAM._serialized_start=11590 + _DECISIONPARAM._serialized_end=11655 + _DECISIONPOINT._serialized_start=11658 + _DECISIONPOINT._serialized_end=11823 + _DECISIONOUTCOME._serialized_start=11826 + _DECISIONOUTCOME._serialized_end=11954 # @@protoc_insertion_point(module_scope) diff --git a/py/sight/sight.py b/py/sight/sight.py index e5192a7..8623cf9 100644 --- a/py/sight/sight.py +++ b/py/sight/sight.py @@ -207,6 +207,10 @@ def __init__( self.num_transitive_contents.set(Location()) self.active_block_labels = contextvars.ContextVar('active_block_labels') self.active_block_labels.set([]) + self.active_block_start_time = contextvars.ContextVar('active_block_start_time') + self.active_block_start_time.set([]) + self.active_block_deeper_elapsed_time = contextvars.ContextVar('active_block_deeper_elapsed_time') + self.active_block_deeper_elapsed_time.set([0]) self.attributes = {} self.open = True @@ -602,6 +606,9 @@ def enter_block(self, return self.location.get() self.active_block_labels.get().append(label) + self.active_block_start_time.get().append(time.time_ns()) + self.active_block_deeper_elapsed_time.get().append(0) + # self.emit_text_to_file( # self.line_prefix + label + '<<<' + self.line_suffix + '\n' # ) @@ -671,7 +678,19 @@ def exit_block(self, label: str, obj: sight_pb2.Object, frame=None) -> None: ).pos() obj.block_end.location_of_block_start = self.open_block_start_locations.get( )[-1] + + + elapsed_time_ns = time.time_ns() - self.active_block_start_time.get()[-1] + obj.block_end.metrics.elapsed_time_ns = elapsed_time_ns + obj.block_end.metrics.exclusive_elapsed_time_ns = elapsed_time_ns - self.active_block_deeper_elapsed_time.get()[-1] + + self.active_block_deeper_elapsed_time.get().pop() + self.active_block_deeper_elapsed_time.get()[-1] += elapsed_time_ns + obj.block_end.metrics.elapsed_time_ns = elapsed_time_ns + + self.open_block_start_locations.get().pop() + self.active_block_start_time.get().pop() if frame is None: # pytype: disable=attribute-error diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index e6a1d82..34b14df 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -914,6 +914,8 @@ def decision_outcome( def propose_actions(sight, action_dict): request = service_pb2.ProposeActionRequest() + if sight.params.silent_logger: + raise ValueError('Cannot use Decision API using Sight silent logger.') request.client_id = str(sight.id) actions_data = [] @@ -1020,10 +1022,7 @@ def finalize_episode(sight): # , optimizer_obj sight_pb2.DecisionConfigurationStart.OptimizerType. OT_WORKLIST_SCHEDULER, sight) req.decision_outcome.CopyFrom( - # get_fvs_outcome_proto('outcome', sight)) - # whole output of key "fvs_outcome" is stringified, not individual key-value get_decision_outcome_proto('outcome', sight)) - # print('request : ', req) optimizer_obj = optimizer.get_instance() optimizer_obj.finalize_episode(sight, req) elif _OPTIMIZER_TYPE.value == 'dm_acme': From 21f69bc842b8e5587c65d1f54851535f986723c1 Mon Sep 17 00:00:00 2001 From: Greg Bronevetsky Date: Fri, 1 Nov 2024 15:38:26 +0000 Subject: [PATCH 19/25] Final edits --- sight_service/deploy_service.sh | 5 + sight_service/requirements.txt | 2 +- sight_service/service_root.py | 169 ++++++++++-------------- sight_service/worklist_scheduler_opt.py | 2 +- 4 files changed, 79 insertions(+), 99 deletions(-) create mode 100755 sight_service/deploy_service.sh diff --git a/sight_service/deploy_service.sh b/sight_service/deploy_service.sh new file mode 100755 index 0000000..9211eb8 --- /dev/null +++ b/sight_service/deploy_service.sh @@ -0,0 +1,5 @@ +#!/usr/bin/bash +# Builds the docker container for the Sight service +docker build --tag gcr.io/$PROJECT_ID/sight-$1 -f sight_service/Dockerfile . +docker push gcr.io/$PROJECT_ID/sight-$1 +gcloud run deploy sight-$1 --image=gcr.io/$PROJECT_ID/sight-$1:latest --allow-unauthenticated --service-account=sight-service-account@$PROJECT_ID.iam.gserviceaccount.com --concurrency=default --cpu=2 --memory=8Gi --min-instances=1 --max-instances=1 --no-cpu-throttling --region=us-central1 --project=$PROJECT_ID \ No newline at end of file diff --git a/sight_service/requirements.txt b/sight_service/requirements.txt index 89cc84d..22c8323 100644 --- a/sight_service/requirements.txt +++ b/sight_service/requirements.txt @@ -3,7 +3,7 @@ python-dotenv grpcio readerwriterlock protobuf -bayesian-optimization +bayesian-optimization==1.4.3 google-cloud-logging overrides google-generativeai diff --git a/sight_service/service_root.py b/sight_service/service_root.py index c37a7d1..96a1ac2 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -22,14 +22,14 @@ def warn(*args, **kwargs): warnings.warn = warn -from concurrent import futures -# from helpers.logs.logs_handler import logger as logging -import logging - from absl import app from absl import flags +from concurrent import futures +from collections import defaultdict from dotenv import load_dotenv +import functools import grpc +import logging load_dotenv() @@ -37,6 +37,7 @@ def warn(*args, **kwargs): import time from typing import Any, Dict, List, Tuple import uuid +import sys # from overrides import overrides from readerwriterlock import rwlock @@ -57,7 +58,6 @@ def warn(*args, **kwargs): from sight_service.worklist_scheduler_opt import WorklistScheduler _PORT = flags.DEFINE_integer('port', 8080, 'The port to listen on') -_file_name = "service_root.py" _resolve_times = [] instanceId = os.getenv("SPANNER_INSTANCE_ID") @@ -70,15 +70,45 @@ def generate_unique_number() -> int: return uuid.uuid4().int & (1 << 63) - 1 +import logging + +func_to_elapsed_time = defaultdict(float) +func_call_count = defaultdict(float) + +def rpc_call(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + logging.info(f"<<<<<< {func.__name__}, file {os.path.basename(__file__)} with args={args}") + + if 'request' in kwargs: + if 'client_id' in kwargs['request'].keys(): + if kwargs['request'].client_id == 0: + raise ValueError(f'Empty log identifier in {func.__name__}.') + + start_time = time.time() + result = func(*args, **kwargs) + elapsed_time = time.time() - start_time + func_to_elapsed_time[func.__name__] += elapsed_time + func_call_count[func.__name__] += 1 + + logging.info('>>>>>> %s, file %s, elapsed: (this=%f, avg=%f, count=%d)', + func.__name__, + os.path.basename(__file__), + elapsed_time, + func_to_elapsed_time[func.__name__]/func_call_count[func.__name__], + func_call_count[func.__name__], + ) + return result + return wrapper + def calculate_resolve_time(start_time): - method_name = "calculate_resolve_time" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + logging.info(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) resolve_time = time.time() - start_time _resolve_times.append(resolve_time) avg_resolve_time = sum(_resolve_times) / len(_resolve_times) logging.info(" logging.info : Average Resolve Time From Server: %s seconds", round(avg_resolve_time, 4)) - logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) + logging.info("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) class Optimizers: @@ -95,12 +125,9 @@ def launch(self, request: service_pb2.LaunchRequest) -> service_pb2.LaunchResponse: """Creates more specific optimizer and use them while responding to clients accordingly. """ - method_name = "launch" - logging.info(">>>>>>> In %s method of %s file.", method_name, "Optimizers") - optimizer_type = request.decision_config_params.optimizer_type logging.debug(">>>>>>> In %s method of %s file. optimizer_type=%s", - method_name, _file_name, optimizer_type) + sys._getframe().f_code.co_name, os.path.basename(__file__), optimizer_type) with self.instances_lock.gen_wlock(): if optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER: self.instances[request.client_id] = Vizier() @@ -143,11 +170,11 @@ def launch(self, else: return service_pb2.LaunchResponse( display_string=f"OPTIMIZER '{optimizer_type}' NOT VALID!!") - logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) + + logging.info("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) def get_instance(self, client_id: str) -> OptimizerInstance: - # method_name = "get_instance" - # logging.debug(">>>>>>> In %s method of %s file.", method_name, _file_name) + # logging.debug(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) with self.instances_lock.gen_rlock(): if (client_id in self.instances): instance_obj = self.instances[client_id] @@ -155,7 +182,7 @@ def get_instance(self, client_id: str) -> OptimizerInstance: else: #add better mechanism, this require in close rpc for now return None - # logging.debug("<<<<<< Out %s method of %s file.", method_name, _file_name) + # logging.debug("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) class SightService(service_pb2_grpc.SightServiceServicer): @@ -165,137 +192,86 @@ class SightService(service_pb2_grpc.SightServiceServicer): def __init__(self): super().__init__() self.optimizers = Optimizers() + logging.info('SightService::__init__') + + @rpc_call def Test(self, request, context): - method_name = "Test" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - obj = service_pb2.TestResponse(val="222") - logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) - return obj + return service_pb2.TestResponse(val="222") # def GetWeights(self, request, context): - # method_name = "GetWeights" - # logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + # logging.info(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) # start_time = time.time() # obj = self.optimizers.get_instance(request.client_id).get_weights(request) # # calculate_resolve_time(start_time) - # logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) + # logging.info("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) # return obj + @rpc_call def DecisionPoint(self, request, context): - method_name = "DecisionPoint" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - start_time = time.time() - obj = self.optimizers.get_instance( + return self.optimizers.get_instance( request.client_id).decision_point(request) - # calculate_resolve_time(start_time) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) - return obj + @rpc_call def Tell(self, request, context): - method_name = "Tell" - logging.debug(">>>>>>> In %s method of %s file.", method_name, _file_name) - return self.optimizers.get_instance(request.client_id).tell(request) - logging.debug("<<<<<<< Out %s method of %s file.", method_name, _file_name) + @rpc_call def Listen(self, request, context): - method_name = "Listen" - logging.debug(">>>>>>> In %s method of %s file.", method_name, _file_name) - return self.optimizers.get_instance(request.client_id).listen(request) - logging.debug("<<<<<<< Out %s method of %s file.", method_name, _file_name) + @rpc_call def CurrentStatus(self, request, context): - method_name = "CurrentStatus" - # logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - return self.optimizers.get_instance( request.client_id).current_status(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + @rpc_call def FetchOptimalAction(self, request, context): - method_name = "FetchOptimalAction" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - - obj = self.optimizers.get_instance( + return self.optimizers.get_instance( request.client_id).fetch_optimal_action(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) - return obj + @rpc_call def ProposeAction(self, request, context): - method_name = "ProposeAction" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - - obj = self.optimizers.get_instance( + return self.optimizers.get_instance( request.client_id).propose_action(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) - return obj + @rpc_call def GetOutcome(self, request, context): - method_name = "GetOutcome" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - - obj = self.optimizers.get_instance(request.client_id).GetOutcome(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) - return obj + return self.optimizers.get_instance(request.client_id).GetOutcome(request) + @rpc_call def FinalizeEpisode(self, request, context): - method_name = "FinalizeEpisode" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - - obj = self.optimizers.get_instance( + return self.optimizers.get_instance( request.client_id).finalize_episode(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) - return obj + @rpc_call def Launch(self, request, context): - method_name = "Launch" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - # start_time = time.time() - obj = self.optimizers.launch(request) - # calculate_resolve_time(start_time) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) - return obj + return self.optimizers.launch(request) + @rpc_call def Create(self, request, context): - method_name = "Create" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - # start_time = time.time() unique_id = generate_unique_number() - # calculate_resolve_time(start_time) - - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) return service_pb2.CreateResponse(id=unique_id, path_prefix="/tmp/") + @rpc_call def Close(self, request, context): - method_name = "Close" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - # only call if it's launch called, otherwise no entry of opt for that client if (self.optimizers.get_instance(request.client_id)): obj = self.optimizers.get_instance(request.client_id).close(request) else: obj = service_pb2.CloseResponse() - #? do we need to remove entry from optimizer dict, if available?? - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) return obj + @rpc_call def WorkerAlive(self, request, context): - method_name = "WorkerAlive" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) - - obj = self.optimizers.get_instance(request.client_id).WorkerAlive(request) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) - return obj + return self.optimizers.get_instance(request.client_id).WorkerAlive(request) def serve(): """Main method that listens on port 8080 and handle requests received from client. """ - method_name = "serve" - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + logging.info(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) server = grpc.server(futures.ThreadPoolExecutor(max_workers=500), options=[ @@ -309,19 +285,18 @@ def serve(): # flask_app.run(debug=True, host="0.0.0.0", port=_PORT.value) server.wait_for_termination() - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + logging.info("<<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) def main(argv): - method_name = "__main__" logging.basicConfig(level=logging.INFO) - logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + logging.info(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) try: app.run(serve()) except BaseException as e: logging.error("Error occurred : ") logging.error(e) - logging.info("<<<<<<< Out %s method of %s file.", method_name, _file_name) + logging.info("<<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) if __name__ == "__main__": diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index a1a3c25..d26c5ce 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -47,7 +47,7 @@ def __init__(self): self.max_reward_sample = {} - def add_outcome_to_outcome_response(self,msg_details : MessageDetails, sample_id, outcome: service_pb2.GetOutcomeResponse.outcome): + def add_outcome_to_outcome_response(self,msg_details : MessageDetails, sample_id, outcome: service_pb2.GetOutcomeResponse.Outcome): outcome.action_id = sample_id outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED outcome.reward = msg_details.reward From c33c50d6159b2c5bdcf9ca4102c88850f9b17a85 Mon Sep 17 00:00:00 2001 From: Greg Bronevetsky Date: Mon, 4 Nov 2024 16:42:23 +0000 Subject: [PATCH 20/25] Service refactor, decision.py supports lists as outputs. --- py/sight/widgets/decision/decision.py | 4 ++-- sight_service/service_root.py | 11 +++++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index e6a1d82..b881217 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -679,12 +679,12 @@ def get_decision_outcome_proto(outcome_label: str, double_value=val, ) else: - if (isinstance(val, dict)): + if (isinstance(val, dict) or isinstance(val, list)): json_value = json.dumps(val) elif (isinstance(val, pd.Series)): json_value = json.dumps(val.to_dict()) else: - raise TypeError("value needs to be dict type") + raise TypeError(f'Value of {key} needs to be dict, list or pd.Series type. Actual type is {type(val)}, val={val}.') value = sight_pb2.Value(sub_type=sight_pb2.Value.ST_JSON, json_value=json_value) diff --git a/sight_service/service_root.py b/sight_service/service_root.py index 96a1ac2..7a4bb51 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -30,6 +30,7 @@ def warn(*args, **kwargs): import functools import grpc import logging +import math load_dotenv() @@ -73,6 +74,7 @@ def generate_unique_number() -> int: import logging func_to_elapsed_time = defaultdict(float) +func_to_elapsed_time_sq = defaultdict(float) func_call_count = defaultdict(float) def rpc_call(func): @@ -89,13 +91,18 @@ def wrapper(*args, **kwargs): result = func(*args, **kwargs) elapsed_time = time.time() - start_time func_to_elapsed_time[func.__name__] += elapsed_time + func_to_elapsed_time_sq[func.__name__] += elapsed_time*elapsed_time func_call_count[func.__name__] += 1 - logging.info('>>>>>> %s, file %s, elapsed: (this=%f, avg=%f, count=%d)', + mean = func_to_elapsed_time[func.__name__]/func_call_count[func.__name__] + mean_sq = func_to_elapsed_time_sq[func.__name__]/func_call_count[func.__name__] + + logging.info('>>>>>> %s, file %s, elapsed: (this=%f, avg=%f, rel_sd=%f, count=%d)', func.__name__, os.path.basename(__file__), elapsed_time, - func_to_elapsed_time[func.__name__]/func_call_count[func.__name__], + mean, + math.sqrt(mean_sq - mean*mean)/mean if mean != 0 else 0, func_call_count[func.__name__], ) return result From 7b532aa8eb9d947e6c26b291cd2630796d149eb0 Mon Sep 17 00:00:00 2001 From: Greg Bronevetsky Date: Mon, 4 Nov 2024 16:49:51 +0000 Subject: [PATCH 21/25] Swap from logging.info to logging.debug --- sight_service/service_root.py | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/sight_service/service_root.py b/sight_service/service_root.py index 7a4bb51..27ff463 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -80,7 +80,7 @@ def generate_unique_number() -> int: def rpc_call(func): @functools.wraps(func) def wrapper(*args, **kwargs): - logging.info(f"<<<<<< {func.__name__}, file {os.path.basename(__file__)} with args={args}") + logging.debug(f"<<<<<< {func.__name__}, file {os.path.basename(__file__)} with args={args}") if 'request' in kwargs: if 'client_id' in kwargs['request'].keys(): @@ -97,7 +97,7 @@ def wrapper(*args, **kwargs): mean = func_to_elapsed_time[func.__name__]/func_call_count[func.__name__] mean_sq = func_to_elapsed_time_sq[func.__name__]/func_call_count[func.__name__] - logging.info('>>>>>> %s, file %s, elapsed: (this=%f, avg=%f, rel_sd=%f, count=%d)', + logging.debug('>>>>>> %s, file %s, elapsed: (this=%f, avg=%f, rel_sd=%f, count=%d)', func.__name__, os.path.basename(__file__), elapsed_time, @@ -108,16 +108,6 @@ def wrapper(*args, **kwargs): return result return wrapper -def calculate_resolve_time(start_time): - logging.info(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) - resolve_time = time.time() - start_time - _resolve_times.append(resolve_time) - avg_resolve_time = sum(_resolve_times) / len(_resolve_times) - logging.info(" logging.info : Average Resolve Time From Server: %s seconds", - round(avg_resolve_time, 4)) - logging.info("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) - - class Optimizers: """ Optimizer class to create request specific optimizer and use the methods @@ -148,7 +138,7 @@ def launch(self, # elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_ACME: # self.instances[request.client_id] = Acme() # obj = self.instances[request.client_id].launch(request) - # # logging.info("self of optimizers class: %s", str(self.__dict__)) + # # logging.debug("self of optimizers class: %s", str(self.__dict__)) # return obj elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_LLM: self.instances[request.client_id] = LLM() @@ -178,7 +168,7 @@ def launch(self, return service_pb2.LaunchResponse( display_string=f"OPTIMIZER '{optimizer_type}' NOT VALID!!") - logging.info("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) + logging.debug("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) def get_instance(self, client_id: str) -> OptimizerInstance: # logging.debug(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) @@ -199,7 +189,7 @@ class SightService(service_pb2_grpc.SightServiceServicer): def __init__(self): super().__init__() self.optimizers = Optimizers() - logging.info('SightService::__init__') + logging.debug('SightService::__init__') @rpc_call @@ -207,11 +197,11 @@ def Test(self, request, context): return service_pb2.TestResponse(val="222") # def GetWeights(self, request, context): - # logging.info(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) + # logging.debug(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) # start_time = time.time() # obj = self.optimizers.get_instance(request.client_id).get_weights(request) # # calculate_resolve_time(start_time) - # logging.info("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) + # logging.debug("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) # return obj @rpc_call From 3cef3192316aaaeb7c64cfdf8da8c585d6bf961e Mon Sep 17 00:00:00 2001 From: Hrushikesh Makode <152846252+hrushikeshm-g@users.noreply.github.com> Date: Mon, 11 Nov 2024 07:09:26 +0000 Subject: [PATCH 22/25] rpc changes array to map (#67) * rpc changes array to map * comments resolved --- .config/.style.yapf | 6 +- .pre-commit-config.yaml | 10 +- py/avrofile-schema.avsc | 4399 +++++++++-------- py/sight/demo/propose_action.py | 11 +- py/sight/proto/sight.proto | 13 +- py/sight/proto/sight_pb2.py | 778 +-- py/sight/utility.py | 11 +- py/sight/utils/proto_conversion.py | 133 + py/sight/widgets/decision/decision.py | 135 +- py/sight/widgets/decision/optimizer_client.py | 36 +- .../single_action_optimizer_client.py | 10 +- sight_service/Dockerfile | 1 + sight_service/acme_optimizer.py | 35 - sight_service/bayesian_opt.py | 17 +- sight_service/exhaustive_search.py | 4 +- sight_service/genetic_algorithm.py | 4 +- sight_service/llm.py | 14 +- sight_service/nevergrad_opt.py | 12 +- sight_service/optimizer_instance.py | 58 - sight_service/proto/api_descriptor.pb | Bin 145366 -> 145890 bytes sight_service/proto/service.proto | 19 +- sight_service/proto/service_pb2.py | 128 +- sight_service/proto/service_pb2_grpc.py | 195 +- sight_service/sensitivity_analysis.py | 104 +- sight_service/service_root.py | 84 +- sight_service/smc_py.py | 6 +- sight_service/vizier.py | 7 +- sight_service/worklist_scheduler_opt.py | 92 +- 28 files changed, 3097 insertions(+), 3225 deletions(-) create mode 100644 py/sight/utils/proto_conversion.py diff --git a/.config/.style.yapf b/.config/.style.yapf index d12d364..c0c0a58 100644 --- a/.config/.style.yapf +++ b/.config/.style.yapf @@ -2,6 +2,6 @@ based_on_style = google indent_width = 2 column_limit = 80 -split_before_logical_operator = False -blank_lines_between_top_level_imports_and_variables = 1 -dedent_closing_brackets = True +ALLOW_SPLIT_BEFORE_DICT_VALUE = False +each_dict_entry_on_separate_line = True +split_complex_comprehension = True diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0cf0e06..5eb6d8a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,11 +16,11 @@ repos: # - id: yapf # args: ["-ir", "--style", ".config/.style.yapf"] - # - repo: https://github.com/pre-commit/mirrors-isort - # rev: v5.10.1 - # hooks: - # - id: isort - # args: ["--settings-path", ".config/.isort.cfg"] + - repo: https://github.com/pre-commit/mirrors-isort + rev: v5.10.1 + hooks: + - id: isort + args: ["--settings-path", ".config/.isort.cfg"] - repo: https://github.com/google/yapf rev: v0.31.0 diff --git a/py/avrofile-schema.avsc b/py/avrofile-schema.avsc index ee32e62..251f74d 100644 --- a/py/avrofile-schema.avsc +++ b/py/avrofile-schema.avsc @@ -1,2067 +1,2508 @@ { - "type": "record", - "name": "sight_schema", - "fields": [ - { - "name": "location", - "type": ["string", "null"], - "doc": "The current location in the nesting hierarchy of the log" - }, - { - "name": "index", - "type": ["long", "null"], - "doc": "Index of this object in the log, which provides a total order on log" - }, - { - "name": "log_uid", - "type": ["string", "null"], - "doc": "The unique ID of the log" - }, - { - "name": "attribute", - "type": [ "null", { - "type": "array", - "name": "type_attribute", - "items": [ "null", { - "type": "record", - "name": "attribute_schema", - "fields": [ + "type": "record", + "name": "sight_schema", + "fields": [ + { + "name": "location", + "type": ["string", "null"], + "doc": "The current location in the nesting hierarchy of the log" + }, + { + "name": "index", + "type": ["long", "null"], + "doc": "Index of this object in the log, which provides a total order on log" + }, + { + "name": "log_uid", + "type": ["string", "null"], + "doc": "The unique ID of the log" + }, + { + "name": "attribute", + "type": [ + "null", + { + "type": "array", + "name": "type_attribute", + "items": [ + "null", + { + "type": "record", + "name": "attribute_schema", + "fields": [ + { + "name": "key", + "type": ["string", "null"], + "doc": "key of attribute" + }, + { + "name": "value", + "type": ["string", "null"], + "doc": "value of key" + } + ] + } + ] + } + ], + "doc": "The attributes that are in-force at the time this object was logged" + }, + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type", + "symbols": [ + "ST_UNKNOWN", + "ST_TEXT", + "ST_BLOCK_START", + "ST_BLOCK_END", + "ST_ATTRIBUTE_START", + "ST_ATTRIBUTE_END", + "ST_FLUME_DO_FN_EMIT", + "ST_FLUME_DEPEND", + "ST_VALUE", + "ST_EXCEPTION", + "ST_TENSOR", + "ST_TENSORFLOW_EXAMPLE", + "ST_DECISION_POINT", + "ST_DECISION_OUTCOME", + "ST_GAP", + "ST_LINK", + "ST_PROPOSE_ACTION" + ], + "doc": "sub-type" + } + ], + "doc": "Sub-type of the attribute" + }, + { + "name": "text", + "type": [ + "null", + { + "namespace": "text", + "name": "text", + "type": "record", + "fields": [ + { + "name": "text", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type", + "symbols": ["ST_UNKNOWN", "ST_TEXT", "ST_HTML"], + "doc": "sub-type" + } + ], + "doc": "" + } + ] + } + ], + "doc": "text from oneOf type sub_type_message" + }, + { + "name": "block_start", + "type": [ + "null", + { + "namespace": "block_start", + "name": "block_start", + "type": "record", + "fields": [ + { + "name": "label", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type", + "symbols": [ + "ST_UNKNOWN", + "ST_FLUME_DO_FN_CREATE", + "ST_FLUME_DO_FN_START_DO", + "ST_FLUME_COMPARE_FN_CREATE", + "ST_FLUME_COMPARE_FN_START_COMPARE", + "ST_NAMED_VALUE", + "ST_LIST", + "ST_TABLE", + "ST_TENSORFLOW_MODEL_APPLICATION", + "ST_TENSORFLOW_MODEL_TRAINING_EPOCH", + "ST_TENSORFLOW_MODEL_TRAINING", + "ST_SIMULATION", + "ST_SIMULATION_PARAMETERS", + "ST_SIMULATION_STATE", + "ST_SIMULATION_TIME_STEP", + "ST_SIMULATION_INITIAL_STATE", + "ST_SIMULATION_BOUNDARY_STATE", + "ST_CLUSTER_ASSIGNMENT", + "ST_CONFIGURATION" + ], + "doc": "sub-type" + } + ], + "doc": "" + }, + { + "name": "list", + "type": [ + "null", + { + "namespace": "list", + "name": "list", + "type": "record", + "fields": [ { - "name": "key", - "type": ["string", "null"], - "doc": "key of attribute" + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type", + "symbols": [ + "ST_UNKNOWN", + "ST_HOMOGENEOUS", + "ST_HETEROGENEOUS", + "ST_MAP", + "ST_MAP_ENTRY", + "ST_DICT" + ], + "doc": "sub-type" + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "tensor_flow_model_training_epoch", + "type": [ + "null", + { + "type": "record", + "name": "type_tensor_flow_model_training_epoch", + "fields": [ + { + "name": "epoch_num", + "type": ["long", "null"], + "doc": "" }, { - "name": "value", - "type": ["string", "null"], - "doc": "value of key" + "name": "batch_size", + "type": ["long", "null"], + "doc": "" } - ] - }] - }], - "doc": "The attributes that are in-force at the time this object was logged" - }, - { - "name": "sub_type", - "type": [ "null", { - "type": "enum", - "name": "type_sub_type", - "symbols": - [ - "ST_UNKNOWN", - "ST_TEXT", - "ST_BLOCK_START", - "ST_BLOCK_END", - "ST_ATTRIBUTE_START", - "ST_ATTRIBUTE_END", - "ST_FLUME_DO_FN_EMIT", - "ST_FLUME_DEPEND", - "ST_VALUE", - "ST_EXCEPTION", - "ST_TENSOR", - "ST_TENSORFLOW_EXAMPLE", - "ST_DECISION_POINT", - "ST_DECISION_OUTCOME", - "ST_GAP", - "ST_LINK", - "ST_PROPOSE_ACTION" - ], - "doc": "sub-type" - }], - "doc": "Sub-type of the attribute" - }, - { - "name": "text", - "type": ["null", { - "namespace": "text", - "name": "text", - "type": "record", - "fields": [ - { - "name": "text", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "sub_type", - "type": [ "null", { - "type": "enum", - "name": "type_sub_type", - "symbols": - [ + ] + } + ], + "doc": "" + }, + { + "name": "flume_do_fn_create", + "type": [ + "null", + { + "type": "record", + "name": "type_flume_do_fn_create", + "fields": [ + { + "name": "label", + "type": ["string", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "flume_do_fn_start_do", + "type": [ + "null", + { + "type": "record", + "name": "type_flume_do_fn_start_do", + "fields": [ + { + "name": "input_stage_id", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "input_item_id", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "is_passthrough", + "type": ["boolean", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "flume_compare_fn_create", + "type": [ + "null", + { + "type": "record", + "name": "type_flume_compare_fn_create", + "fields": [ + { + "name": "label", + "type": ["string", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "flume_compare_fn_start_compare", + "type": [ + "null", + { + "type": "record", + "name": "type_flume_compare_fn_start_compare", + "fields": [ + { + "name": "input1_stage_id", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "input1_item_id", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "input2_stage_id", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "input2_item_id", + "type": ["long", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "simulation_time_step_start", + "type": [ + "null", + { + "name": "type_simulation_time_step_start", + "type": "record", + "fields": [ + { + "name": "time_step_index", + "type": [ + "null", + { + "type": "array", + "name": "type_time_step_index", + "items": ["long", "null"] + } + ], + "doc": "" + }, + { + "name": "time_step", + "type": ["float", "null"], + "doc": "" + }, + { + "name": "time_step_size", + "type": ["float", "null"], + "doc": "" + }, + { + "name": "time_step_units", + "type": [ + "null", + { + "type": "enum", + "name": "time_step_units", + "symbols": [ "ST_UNKNOWN", - "ST_TEXT", - "ST_HTML" - ], - "doc": "sub-type" - }], - "doc": "" + "TSU_SECOND", + "TSU_MINUTE", + "TSU_HOUR", + "TSU_DAY", + "TSU_MONTH", + "TSU_QUARTER", + "TSU_YEAR" + ], + "doc": "" + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "text from oneOf type sub_type_message" - }, - { - "name": "block_start", - "type": ["null", { - "namespace": "block_start", - "name": "block_start", - "type": "record", - "fields": [ - { - "name": "label", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "sub_type", - "type": [ "null",{ - "type": "enum", - "name": "type_sub_type", - "symbols": - [ + ], + "doc": "" + }, + { + "name": "configuration", + "type": [ + "null", + { + "namespace": "configuration", + "name": "type_configuration", + "type": "record", + "fields": [ + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type", + "symbols": [ "ST_UNKNOWN", - "ST_FLUME_DO_FN_CREATE", - "ST_FLUME_DO_FN_START_DO", - "ST_FLUME_COMPARE_FN_CREATE", - "ST_FLUME_COMPARE_FN_START_COMPARE", - "ST_NAMED_VALUE", - "ST_LIST", - "ST_TABLE", - "ST_TENSORFLOW_MODEL_APPLICATION", - "ST_TENSORFLOW_MODEL_TRAINING_EPOCH", - "ST_TENSORFLOW_MODEL_TRAINING", - "ST_SIMULATION", - "ST_SIMULATION_PARAMETERS", - "ST_SIMULATION_STATE", - "ST_SIMULATION_TIME_STEP", - "ST_SIMULATION_INITIAL_STATE", - "ST_SIMULATION_BOUNDARY_STATE", - "ST_CLUSTER_ASSIGNMENT", - "ST_CONFIGURATION" - ], - "doc": "sub-type" - }], - "doc": "" - }, - { - "name": "list", - "type": ["null", { - "namespace": "list", - "name": "list", - "type": "record", - "fields": [ - { - "name": "sub_type", - "type": [ "null",{ - "type": "enum", - "name": "type_sub_type", - "symbols": - [ - "ST_UNKNOWN", - "ST_HOMOGENEOUS", - "ST_HETEROGENEOUS", - "ST_MAP", - "ST_MAP_ENTRY", - "ST_DICT" - ], - "doc": "sub-type" - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "tensor_flow_model_training_epoch", - "type": [ "null", { - "type": "record", - "name": "type_tensor_flow_model_training_epoch", - "fields": [ - { - "name": "epoch_num", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "batch_size", - "type": ["long", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "flume_do_fn_create", - "type": [ "null", { - "type": "record", - "name": "type_flume_do_fn_create", - "fields": [ - { - "name": "label", - "type": ["string", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "flume_do_fn_start_do", - "type": [ "null", { - "type": "record", - "name": "type_flume_do_fn_start_do", - "fields": [ - { - "name": "input_stage_id", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "input_item_id", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "is_passthrough", - "type": ["boolean", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "flume_compare_fn_create", - "type": [ "null", { - "type": "record", - "name": "type_flume_compare_fn_create", - "fields": [ - { - "name": "label", - "type": ["string", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "flume_compare_fn_start_compare", - "type": [ "null", { - "type": "record", - "name": "type_flume_compare_fn_start_compare", - "fields": [ - { - "name": "input1_stage_id", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "input1_item_id", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "input2_stage_id", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "input2_item_id", - "type": ["long", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "simulation_time_step_start", - "type": ["null", { - "name": "type_simulation_time_step_start", - "type": "record", - "fields": [ - { - "name": "time_step_index", - "type": [ "null", { - "type": "array", - "name": "type_time_step_index", - "items": ["long", "null"] - }], - "doc": "" - }, - { - "name": "time_step", - "type": ["float", "null"], - "doc": "" - }, - { - "name": "time_step_size", - "type": ["float", "null"], - "doc": "" - }, - { - "name": "time_step_units", - "type": [ "null",{ - "type": "enum", - "name": "time_step_units", - "symbols": - [ - "ST_UNKNOWN", - "TSU_SECOND", - "TSU_MINUTE", - "TSU_HOUR", - "TSU_DAY", - "TSU_MONTH", - "TSU_QUARTER", - "TSU_YEAR" - ], - "doc": "" - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "configuration", - "type": [ "null", { - "namespace": "configuration", - "name": "type_configuration", - "type": "record", - "fields": [ - { - "name": "sub_type", - "type": [ "null", { - "type": "enum", - "name": "type_sub_type", - "symbols": - [ - "ST_UNKNOWN", - "ST_DECISION_CONFIGURATION" - ], - "doc": "sub-type" - }], - "doc": "" - }, + "ST_DECISION_CONFIGURATION" + ], + "doc": "sub-type" + } + ], + "doc": "" + }, + { + "name": "decision_configuration", + "type": [ + "null", + { + "type": "record", + "name": "type_decision_configuration", + "fields": [ { - "name": "decision_configuration", - "type": [ "null", { + "name": "choice_algorithm", + "type": [ + "null", + { + "type": "map", + "name": "type_choice_algorithm", + "values": { + "name": "type_map_choice_algorithm", "type": "record", - "name": "type_decision_configuration", "fields": [ - { - "name": "choice_algorithm", - "type": ["null", { - "type": "map", - "name": "type_choice_algorithm", - "values": { - "name": "type_map_choice_algorithm", + { + "name": "fixed_choice", + "type": [ + "null", + { + "type": "record", + "name": "type_fixed_choice", + "fields": [ + { + "name": "chosen_option", + "type": ["null", "string"], + "doc": "" + }, + { + "name": "chosen_parameters", + "type": [ + "null", + { + "type": "map", + "name": "type_chosen_parameters", + "values": ["null", "string"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "scikit_learn", + "type": [ + "null", + { + "type": "record", + "name": "type_scikit_learn", + "fields": [ + { + "name": "model_encoding", + "type": ["null", "bytes"], + "doc": "" + }, + { + "name": "input_fields", + "type": [ + "null", + { + "type": "array", + "name": "type_input_fields", + "items": "string" + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "tf_agents", + "type": [ + "null", + { + "type": "record", + "name": "type_tf_agents", + "fields": [ + { + "name": "model_encoding", + "type": ["null", "bytes"], + "doc": "" + }, + { + "name": "input_fields", + "type": [ + "null", + { + "type": "array", + "name": "type_input_fields", + "items": "string" + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "differentiable_surrogate", + "type": [ + "null", + { + "type": "record", + "name": "type_differentiable_surrogate", + "fields": [ + { + "name": "keras_model", + "type": [ + "null", + { "type": "record", + "name": "type_keras_model", "fields": [ - { - "name": "fixed_choice", - "type": [ "null", { - "type": "record", - "name": "type_fixed_choice", - "fields": [ - { - "name": "chosen_option", - "type": [ "null", "string" ], - "doc": "" - }, - { - "name": "chosen_parameters", - "type": ["null", { - "type": "map", - "name": "type_chosen_parameters", - "values": [ "null", "string" ] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "scikit_learn", - "type": [ "null", { - "type": "record", - "name": "type_scikit_learn", - "fields": [ - { - "name": "model_encoding", - "type": [ "null", "bytes" ], - "doc": "" - }, - { - "name": "input_fields", - "type": [ "null", { - "type": "array", - "name": "type_input_fields", - "items": "string" - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "tf_agents", - "type": [ "null", { - "type": "record", - "name": "type_tf_agents", - "fields": [ - { - "name": "model_encoding", - "type": [ "null", "bytes" ], - "doc": "" - }, - { - "name": "input_fields", - "type": [ "null", { - "type": "array", - "name": "type_input_fields", - "items": "string" - }], - "doc": "" - } + { + "name": "structure_json", + "type": [ + "null", + "string" + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "min_max_input_normalizer", + "type": [ + "null", + { + "namespace": "min_max_input_normalizer", + "type": "record", + "name": "type_min_max_input_normalizer", + "fields": [ + { + "name": "attr_props", + "type": [ + "null", + { + "type": "array", + "name": "type_attr_props", + "items": { + "type": "record", + "name": "attr_props_schema", + "fields": [ + { + "name": "attr_name", + "type": [ + "null", + "string" + ], + "doc": "" + }, + { + "name": "attr_min", + "type": [ + "null", + "float" + ], + "doc": "" + }, + { + "name": "attr_max", + "type": [ + "null", + "float" + ], + "doc": "" + } ] - }], - "doc": "" - }, - { - "name": "differentiable_surrogate", - "type": [ "null", { - "type": "record", - "name": "type_differentiable_surrogate", - "fields": [ - { - "name": "keras_model", - "type": [ "null", { - "type": "record", - "name": "type_keras_model", - "fields": [ - { - "name": "structure_json", - "type": [ "null", "string" ], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "min_max_input_normalizer", - "type": [ "null", { - "namespace": "min_max_input_normalizer", - "type": "record", - "name": "type_min_max_input_normalizer", - "fields": [ - { - "name": "attr_props", - "type": [ "null", { - "type": "array", - "name": "type_attr_props", - "items": { - "type": "record", - "name": "attr_props_schema", - "fields": [ - { - "name": "attr_name", - "type": [ "null", "string" ], - "doc": "" - }, - { - "name": "attr_min", - "type": [ "null", "float" ], - "doc": "" - }, - { - "name": "attr_max", - "type": [ "null", "float" ], - "doc": "" - } - ] - } - }], - "doc": "The attributes that are in-force at the time this object was logged" - } - ] - }], - "doc": "" - }, - { - "name": "min_max_output_normalizer", - "type": [ "null", { - "namespace": "min_max_output_normalizer", - "type": "record", - "name": "type_min_max_output_normalizer", - "fields": [ - { - "name": "attr_props", - "type": [ "null", { - "type": "array", - "name": "type_attr_props", - "items": { - "type": "record", - "name": "attr_props_schema", - "fields": [ - { - "name": "attr_name", - "type": [ "null", "string" ], - "doc": "" - }, - { - "name": "attr_min", - "type": [ "null", "float" ], - "doc": "" - }, - { - "name": "attr_max", - "type": [ "null", "float" ], - "doc": "" - } - ] - } - }], - "doc": "The attributes that are in-force at the time this object was logged" - } - ] - }], - "doc": "" - } + } + } + ], + "doc": "The attributes that are in-force at the time this object was logged" + } + ] + } + ], + "doc": "" + }, + { + "name": "min_max_output_normalizer", + "type": [ + "null", + { + "namespace": "min_max_output_normalizer", + "type": "record", + "name": "type_min_max_output_normalizer", + "fields": [ + { + "name": "attr_props", + "type": [ + "null", + { + "type": "array", + "name": "type_attr_props", + "items": { + "type": "record", + "name": "attr_props_schema", + "fields": [ + { + "name": "attr_name", + "type": [ + "null", + "string" + ], + "doc": "" + }, + { + "name": "attr_min", + "type": [ + "null", + "float" + ], + "doc": "" + }, + { + "name": "attr_max", + "type": [ + "null", + "float" + ], + "doc": "" + } ] - }], - "doc": "" - } + } + } + ], + "doc": "The attributes that are in-force at the time this object was logged" + } ] - } - }], - "doc": "" - }, + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + } + ] + } + } + ], + "doc": "" + }, + { + "name": "state_attrs", + "type": [ + "null", + { + "type": "map", + "name": "type_state_attrs", + "values": [ + "null", + { + "namespace": "state_attrs", + "type": "record", + "name": "StateProps", + "fields": [ { - "name": "state_attrs", - "type": ["null", { - "type": "map", - "name": "type_state_attrs", - "values": [ "null", { - "namespace": "state_attrs", - "type": "record", - "name": "StateProps", - "fields": [ - { - "name": "min_value", - "type": [ "float" , "null" ], - "doc": "" - }, - { - "name": "max_value", - "type": [ "float" , "null" ], - "doc": "" - } - ] - }] - }], - "doc": "" + "name": "min_value", + "type": ["float", "null"], + "doc": "" }, { - "name": "action_attrs", - "type": ["null", { - "type": "map", - "name": "type_action_attrs", - "values": [ "null", { - "namespace": "action_attrs", - "type": "record", - "name": "StateProps", - "fields": [ - { - "name": "min_value", - "type": [ "float" , "null" ], - "doc": "" - }, - { - "name": "max_value", - "type": [ "float" , "null" ], - "doc": "" - } - ] - }] - }], - "doc": "" + "name": "max_value", + "type": ["float", "null"], + "doc": "" } + ] + } ] - }], - "doc": "" - } - ] - }], - "doc": "" - } - ] - }], - "doc": "block_start from oneOf type sub_type_message" - }, - { - "name": "block_end", - "type": ["null", { - "namespace": "block_end", - "name": "block_end", - "type": "record", - "fields": [ - { - "name": "label", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "sub_type", - "type": [ "null",{ - "type": "enum", - "name": "type_sub_type", - "symbols": - [ - "ST_UNKNOWN", - "ST_FLUME_DO_FN_COMPLETE", - "ST_FLUME_DO_FN_END_DO", - "ST_FLUME_COMPARE_FN_COMPLETE", - "ST_FLUME_COMPARE_FN_END_COMPARE", - "ST_NAMED_VALUE", - "ST_LIST", - "ST_TABLE", - "ST_TENSORFLOW_MODEL_APPLICATION", - "ST_TENSORFLOW_MODEL_TRAINING_EPOCH", - "ST_TENSORFLOW_MODEL_TRAINING", - "ST_SIMULATION", - "ST_SIMULATION_PARAMETERS", - "ST_SIMULATION_STATE", - "ST_SIMULATION_TIME_STEP", - "ST_SIMULATION_INITIAL_STATE", - "ST_SIMULATION_BOUNDARY_STATE", - "ST_CLUSTER_ASSIGNMENT" - ], - "doc": "sub-type" - }], - "doc": "" - }, - { - "name": "location_of_block_start", - "type": ["null", "string"], - "doc": "Sub-type of the message" - }, - { - "name": "num_direct_contents", - "type": ["null", "long"], - "doc": "Sub-type of the message" - }, - { - "name": "num_transitive_contents", - "type": ["null", "long"], - "doc": "Sub-type of the message" - }, - { - "name": "flume_do_fn_complete", - "type": [ "null", { - "type": "record", - "name": "type_flume_do_fn_complete", - "fields": [ - { - "name": "label", - "type": ["string", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "flume_do_fn_end_do", - "type": [ "null", { - "type": "record", - "name": "type_flume_do_fn_end_do", - "fields": [ - { - "name": "input_stage_id", - "type": ["long", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "flume_compare_fn_complete", - "type": [ "null", { - "type": "record", - "name": "type_flume_compare_fn_complete", - "fields": [ - { - "name": "label", - "type": ["string", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "flume_compare_fn_end_compare", - "type": [ "null", { - "type": "record", - "name": "type_flume_compare_fn_end_compare", - "fields": [ - { - "name": "input_stage_id", - "type": ["long", "null"], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "metrics", - "type": [ "null", { - "type": "record", - "name": "type_metrics", - "fields": [ - { - "name": "elapsed_time_ns", - "type": ["long", "null"], - "doc": "" + } + ], + "doc": "" }, { - "name": "exclusive_elapsed_time_ns", - "type": ["long", "null"], - "doc": "" + "name": "action_attrs", + "type": [ + "null", + { + "type": "map", + "name": "type_action_attrs", + "values": [ + "null", + { + "namespace": "action_attrs", + "type": "record", + "name": "StateProps", + "fields": [ + { + "name": "min_value", + "type": ["float", "null"], + "doc": "" + }, + { + "name": "max_value", + "type": ["float", "null"], + "doc": "" + } + ] + } + ] + } + ], + "doc": "" } - ] - }], - "doc": "Encapsulates meta-data that tracks the dynamic behavior of the system" + ] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + } + ] + } + ], + "doc": "block_start from oneOf type sub_type_message" + }, + { + "name": "block_end", + "type": [ + "null", + { + "namespace": "block_end", + "name": "block_end", + "type": "record", + "fields": [ + { + "name": "label", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type", + "symbols": [ + "ST_UNKNOWN", + "ST_FLUME_DO_FN_COMPLETE", + "ST_FLUME_DO_FN_END_DO", + "ST_FLUME_COMPARE_FN_COMPLETE", + "ST_FLUME_COMPARE_FN_END_COMPARE", + "ST_NAMED_VALUE", + "ST_LIST", + "ST_TABLE", + "ST_TENSORFLOW_MODEL_APPLICATION", + "ST_TENSORFLOW_MODEL_TRAINING_EPOCH", + "ST_TENSORFLOW_MODEL_TRAINING", + "ST_SIMULATION", + "ST_SIMULATION_PARAMETERS", + "ST_SIMULATION_STATE", + "ST_SIMULATION_TIME_STEP", + "ST_SIMULATION_INITIAL_STATE", + "ST_SIMULATION_BOUNDARY_STATE", + "ST_CLUSTER_ASSIGNMENT" + ], + "doc": "sub-type" + } + ], + "doc": "" + }, + { + "name": "location_of_block_start", + "type": ["null", "string"], + "doc": "Sub-type of the message" + }, + { + "name": "num_direct_contents", + "type": ["null", "long"], + "doc": "Sub-type of the message" + }, + { + "name": "num_transitive_contents", + "type": ["null", "long"], + "doc": "Sub-type of the message" + }, + { + "name": "flume_do_fn_complete", + "type": [ + "null", + { + "type": "record", + "name": "type_flume_do_fn_complete", + "fields": [ + { + "name": "label", + "type": ["string", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "flume_do_fn_end_do", + "type": [ + "null", + { + "type": "record", + "name": "type_flume_do_fn_end_do", + "fields": [ + { + "name": "input_stage_id", + "type": ["long", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "flume_compare_fn_complete", + "type": [ + "null", + { + "type": "record", + "name": "type_flume_compare_fn_complete", + "fields": [ + { + "name": "label", + "type": ["string", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "flume_compare_fn_end_compare", + "type": [ + "null", + { + "type": "record", + "name": "type_flume_compare_fn_end_compare", + "fields": [ + { + "name": "input_stage_id", + "type": ["long", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "metrics", + "type": [ + "null", + { + "type": "record", + "name": "type_metrics", + "fields": [ + { + "name": "elapsed_time_ns", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "exclusive_elapsed_time_ns", + "type": ["long", "null"], + "doc": "" + } + ] } - ] - }], - "doc": "block_end from oneOf type sub_type_message" - }, - { - "name": "attribute_start", - "type": ["null", { - "name": "type_attribute_start", - "type": "record", - "fields": [ - { - "name": "attribute", - "type": ["null", { - "name": "type_attribute", - "type": "record", - "fields": [ - { - "name": "key", - "type": ["string", "null"], - "doc": "" - }, + ], + "doc": "Encapsulates meta-data that tracks the dynamic behavior of the system" + } + ] + } + ], + "doc": "block_end from oneOf type sub_type_message" + }, + { + "name": "attribute_start", + "type": [ + "null", + { + "name": "type_attribute_start", + "type": "record", + "fields": [ + { + "name": "attribute", + "type": [ + "null", + { + "name": "type_attribute", + "type": "record", + "fields": [ + { + "name": "key", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "value", + "type": ["string", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + } + ] + } + ], + "doc": "Identifies the start of a region where a given key has a given value" + }, + { + "name": "attribute_end", + "type": [ + "null", + { + "name": "type_attribute_end", + "type": "record", + "fields": [ + { + "name": "key", + "type": ["string", "null"], + "doc": "" + } + ] + } + ], + "doc": "The ending point of a region where a given key has a given value" + }, + { + "name": "flume_do_fn_emit", + "type": [ + "null", + { + "name": "type_flume_do_fn_emit", + "type": "record", + "fields": [ + { + "name": "stage_id", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "item_id", + "type": ["long", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "flume_depend", + "type": [ + "null", + { + "name": "type_flume_depend", + "type": "record", + "fields": [ + { + "name": "input_stage_id", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "input_item_id", + "type": ["long", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "value", + "type": [ + "null", + { + "namespace": "value", + "name": "type_value", + "type": "record", + "fields": [ + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type", + "symbols": [ + "ST_UNKNOWN", + "ST_STRING", + "ST_HETEROGENEOUS", + "ST_BYTES", + "ST_INT64", + "ST_DOUBLE", + "ST_BOOL", + "ST_NONE" + ], + "doc": "sub-type" + } + ], + "doc": "" + }, + { + "name": "string_value", + "type": ["null", "string"], + "doc": "" + }, + { + "name": "bytes_value", + "type": ["null", "bytes"], + "doc": "" + }, + { + "name": "int64_value", + "type": ["null", "long"], + "doc": "" + }, + { + "name": "double_value", + "type": ["null", "double"], + "doc": "" + }, + { + "name": "bool_value", + "type": ["null", "boolean"], + "doc": "" + }, + { + "name": "none_value", + "type": ["null", "boolean"], + "doc": "" + }, + { + "name": "mime_type", + "type": ["null", "string"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "exception", + "type": [ + "null", + { + "name": "type_exception", + "type": "record", + "fields": [ + { + "name": "type", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "value", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "traceback", + "type": ["string", "null"], + "doc": "" + } + ] + } + ], + "doc": "The ending point of a region where a given key has a given value" + }, + { + "name": "tensor", + "type": [ + "null", + { + "namespace": "tensor", + "name": "type_tensor", + "type": "record", + "fields": [ + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type", + "symbols": [ + "ST_UNKNOWN", + "ST_STRING", + "ST_BYTES", + "ST_INT64", + "ST_DOUBLE", + "ST_BOOL" + ], + "doc": "sub-type" + } + ], + "doc": "" + }, + { + "name": "label", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "shape", + "type": { + "type": "array", + "name": "type_shape", + "items": ["long", "null"] + }, + "doc": "" + }, + { + "name": "dim_label", + "type": [ + "null", + { + "type": "array", + "name": "type_dim_label", + "items": ["string", "null"] + } + ], + "doc": "" + }, + { + "name": "dim_axis_values", + "type": [ + "null", + { + "type": "array", + "name": "type_dim_axis_values", + "items": [ + "null", + { + "type": "record", + "name": "type_string_values", + "fields": [ + { + "name": "value", + "type": [ + "null", { - "name": "value", - "type": ["string", "null"], - "doc": "" + "type": "array", + "name": "type_value", + "items": "string" } - ] - }], - "doc": "" + ], + "doc": "" + } + ] + } + ] } - ] - }], - "doc": "Identifies the start of a region where a given key has a given value" - }, - { - "name": "attribute_end", - "type": ["null", { - "name": "type_attribute_end", - "type": "record", - "fields": [ - { - "name": "key", - "type": ["string", "null"], - "doc": "" + ], + "doc": "The attributes that are in-force at the time this object was logged" + }, + { + "name": "string_values", + "type": [ + "null", + { + "namespace": "value_type", + "type": "record", + "name": "type_string_values", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["string", "null"] + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "The ending point of a region where a given key has a given value" - }, - { - "name": "flume_do_fn_emit", - "type": ["null", { - "name": "type_flume_do_fn_emit", - "type": "record", - "fields": [ - { - "name": "stage_id", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "item_id", - "type": ["long", "null"], - "doc": "" + ], + "doc": "The attributes that are in-force at the time this object was logged" + }, + { + "name": "bytes_values", + "type": [ + "null", + { + "namespace": "value_type", + "type": "record", + "name": "type_bytes_values", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["bytes", "null"] + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "" - }, - { - "name": "flume_depend", - "type": [ "null", { - "name": "type_flume_depend", - "type": "record", - "fields": [ - { - "name": "input_stage_id", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "input_item_id", - "type": ["long", "null"], - "doc": "" + ], + "doc": "The attributes that are in-force at the time this object was logged" + }, + { + "name": "int64_values", + "type": [ + "null", + { + "namespace": "value_type", + "type": "record", + "name": "type_int64_values", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["long", "null"] + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "" - }, - { - "name": "value", - "type": [ "null", { - "namespace": "value", - "name": "type_value", - "type": "record", - "fields": [ - { - "name": "sub_type", - "type": [ "null", { - "type": "enum", - "name": "type_sub_type", - "symbols": - [ - "ST_UNKNOWN", - "ST_STRING", - "ST_HETEROGENEOUS", - "ST_BYTES", - "ST_INT64", - "ST_DOUBLE", - "ST_BOOL", - "ST_NONE" - ], - "doc": "sub-type" - }], - "doc": "" - }, - { - "name": "string_value", - "type": [ "null", "string" ], - "doc": "" - }, - { - "name": "bytes_value", - "type": [ "null", "bytes" ], - "doc": "" - }, - { - "name": "int64_value", - "type": [ "null", "long" ], - "doc": "" - }, - { - "name": "double_value", - "type": [ "null", "double" ], - "doc": "" - }, - { - "name": "bool_value", - "type": [ "null", "boolean" ], - "doc": "" - }, - { - "name": "none_value", - "type": [ "null", "boolean" ], - "doc": "" - }, - { - "name": "mime_type", - "type": [ "null", "string" ], - "doc": "" + ], + "doc": "The attributes that are in-force at the time this object was logged" + }, + { + "name": "double_values", + "type": [ + "null", + { + "namespace": "value_type", + "type": "record", + "name": "type_double_values", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["double", "null"] + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "" - }, - { - "name": "exception", - "type": ["null", { - "name": "type_exception", - "type": "record", - "fields": [ - { - "name": "type", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "value", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "traceback", - "type": ["string", "null"], - "doc": "" + ], + "doc": "The attributes that are in-force at the time this object was logged" + }, + { + "name": "bool_values", + "type": [ + "null", + { + "namespace": "value_type", + "type": "record", + "name": "type_bool_values", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["boolean", "null"] + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "The ending point of a region where a given key has a given value" - }, - { - "name": "tensor", - "type": [ "null", { - "namespace": "tensor", - "name": "type_tensor", - "type": "record", - "fields": [ - { - "name": "sub_type", - "type": [ "null", { - "type": "enum", - "name": "type_sub_type", - "symbols": - [ - "ST_UNKNOWN", - "ST_STRING", - "ST_BYTES", - "ST_INT64", - "ST_DOUBLE", - "ST_BOOL" - ], - "doc": "sub-type" - }], - "doc": "" - }, - { - "name": "label", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "shape", - "type": { - "type": "array", - "name": "type_shape", - "items": ["long", "null"] - }, - "doc": "" - }, - { - "name": "dim_label", - "type": [ "null", { - "type": "array", - "name": "type_dim_label", - "items": ["string", "null"] - }], - "doc": "" - }, - { - "name": "dim_axis_values", - "type": [ "null", { - "type": "array", - "name": "type_dim_axis_values", - "items": [ "null", { - "type": "record", - "name": "type_string_values", - "fields": [ + ], + "doc": "The attributes that are in-force at the time this object was logged" + } + ] + } + ], + "doc": "" + }, + { + "name": "tensor_flow_example", + "type": [ + "null", + { + "namespace": "tensor_flow_example", + "name": "type_tensor_flow_example", + "type": "record", + "fields": [ + { + "name": "input_example", + "type": [ + "null", + { + "namespace": "input_example", + "type": "record", + "name": "type_input_example", + "fields": [ + { + "name": "features", + "namespace": "features", + "type": [ + "null", + { + "name": "type_features", + "type": "record", + "fields": [ + { + "name": "feature", + "type": [ + "null", + { + "type": "map", + "name": "type_feature", + "values": [ + "null", { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": "string" - }], - "doc": "" + "name": "type_map_features", + "type": "record", + "fields": [ + { + "name": "bytes_list", + "type": [ + "null", + { + "type": "record", + "name": "type_bytes_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["bytes", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "float_list", + "type": [ + "null", + { + "type": "record", + "name": "type_float_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["float", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "int64_list", + "type": [ + "null", + { + "type": "record", + "name": "type_int64_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["long", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + } + ] } - ] - }] - }], - "doc": "The attributes that are in-force at the time this object was logged" - }, - { - "name": "string_values", - "type": [ "null", { - "namespace": "value_type", - "type": "record", - "name": "type_string_values", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["string", "null"] - }], - "doc": "" - } - ] - }], - "doc": "The attributes that are in-force at the time this object was logged" - }, - { - "name": "bytes_values", - "type": [ "null", { - "namespace": "value_type", - "type": "record", - "name": "type_bytes_values", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["bytes", "null"] - }], - "doc": "" - } - ] - }], - "doc": "The attributes that are in-force at the time this object was logged" - }, - { - "name": "int64_values", - "type": [ "null", { - "namespace": "value_type", - "type": "record", - "name": "type_int64_values", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["long", "null"] - }], - "doc": "" - } - ] - }], - "doc": "The attributes that are in-force at the time this object was logged" - }, - { - "name": "double_values", - "type": [ "null", { - "namespace": "value_type", - "type": "record", - "name": "type_double_values", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["double", "null"] - }], - "doc": "" - } - ] - }], - "doc": "The attributes that are in-force at the time this object was logged" - }, - { - "name": "bool_values", - "type": [ "null", { - "namespace": "value_type", - "type": "record", - "name": "type_bool_values", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["boolean", "null"] - }], - "doc": "" + ] + } + ], + "doc": "" } - ] - }], - "doc": "The attributes that are in-force at the time this object was logged" + ] + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "" - }, - { - "name": "tensor_flow_example", - "type": [ "null", { - "namespace": "tensor_flow_example", - "name": "type_tensor_flow_example", - "type": "record", - "fields": [ - { - "name": "input_example", - "type": [ "null", { - "namespace": "input_example", - "type": "record", - "name": "type_input_example", - "fields": [ + ], + "doc": "" + }, + { + "name": "input_sequence_example", + "type": [ + "null", + { + "namespace": "input_sequence_example", + "type": "record", + "name": "type_input_sequence_example", + "fields": [ + { + "name": "context", + "type": [ + "null", + { + "namespace": "input_sequence_example.context", + "name": "type_context", + "type": "record", + "fields": [ { - "name": "features", - "namespace": "features", - "type": ["null", { - "name": "type_features", - "type": "record", - "fields": [ + "name": "feature", + "type": [ + "null", + { + "type": "map", + "name": "type_feature", + "values": [ + "null", + { + "name": "type_map_features", + "type": "record", + "fields": [ { - "name": "feature", - "type": ["null", { - "type": "map", - "name": "type_feature", - "values": [ "null", { - "name": "type_map_features", - "type": "record", - "fields": [ - { - "name": "bytes_list", - "type": [ "null", { - "type": "record", - "name": "type_bytes_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["bytes", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "float_list", - "type": [ "null", { - "type": "record", - "name": "type_float_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["float", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "int64_list", - "type": [ "null", { - "type": "record", - "name": "type_int64_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["long", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - } - ] - }] - }], - "doc": "" + "name": "bytes_list", + "type": [ + "null", + { + "type": "record", + "name": "type_bytes_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["bytes", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "float_list", + "type": [ + "null", + { + "type": "record", + "name": "type_float_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["float", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "int64_list", + "type": [ + "null", + { + "type": "record", + "name": "type_int64_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["long", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" } - ] - }], - "doc": "" + ] + } + ] + } + ], + "doc": "" } - ] - }], - "doc": "" - }, - { - "name": "input_sequence_example", - "type": [ "null", { - "namespace": "input_sequence_example", - "type": "record", - "name": "type_input_sequence_example", - "fields": [ + ] + } + ], + "doc": "" + }, + { + "name": "feature_lists", + "type": [ + "null", + { + "namespace": "input_sequence_example.feature_lists", + "name": "type_feature_lists", + "type": "record", + "fields": [ { - - "name": "context", - "type": ["null", { - "namespace": "input_sequence_example.context", - "name": "type_context", - "type": "record", - "fields": [ + "name": "feature_list", + "type": [ + "null", + { + "type": "map", + "name": "type_feature_list", + "values": [ + "null", + { + "name": "type_map_feature_list", + "type": "record", + "fields": [ { - "name": "feature", - "type": ["null", { - "type": "map", - "name": "type_feature", - "values": [ "null", { - "name": "type_map_features", - "type": "record", - "fields": [ + "name": "feature", + "type": { + "type": "array", + "name": "type_feature", + "items": { + "name": "type_item_feature", + "type": "record", + "fields": [ + { + "name": "bytes_list", + "type": [ + "null", + { + "type": "record", + "name": "type_bytes_list", + "fields": [ { - "name": "bytes_list", - "type": [ "null", { - "type": "record", - "name": "type_bytes_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["bytes", "null"] - }], - "doc": "" - } + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": [ + "bytes", + "null" ] - }], - "doc": "" - }, + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "float_list", + "type": [ + "null", + { + "type": "record", + "name": "type_float_list", + "fields": [ { - "name": "float_list", - "type": [ "null", { - "type": "record", - "name": "type_float_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["float", "null"] - }], - "doc": "" - } + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": [ + "float", + "null" ] - }], - "doc": "" - }, + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "int64_list", + "type": [ + "null", + { + "type": "record", + "name": "type_int64_list", + "fields": [ { - "name": "int64_list", - "type": [ "null", { - "type": "record", - "name": "type_int64_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["long", "null"] - }], - "doc": "" - } + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": [ + "long", + "null" ] - }], - "doc": "" + } + ], + "doc": "" } - ] - }] - }], - "doc": "" + ] + } + ], + "doc": "" + } + ] + } + }, + "doc": "" } - ] - }], - "doc": "" - }, + ] + } + ] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "output_example", + "type": [ + "null", + { + "namespace": "output_example", + "type": "record", + "name": "type_output_example", + "fields": [ + { + "name": "features", + "namespace": "features", + "type": [ + "null", + { + "name": "type_features", + "type": "record", + "fields": [ { - "name": "feature_lists", - "type": ["null", { - "namespace": "input_sequence_example.feature_lists", - "name": "type_feature_lists", - "type": "record", - "fields": [ + "name": "feature", + "type": [ + "null", + { + "type": "map", + "name": "type_feature", + "values": [ + "null", + { + "name": "type_map_features", + "type": "record", + "fields": [ { - "name": "feature_list", - "type": ["null", { - "type": "map", - "name": "type_feature_list", - "values": [ "null", { - "name": "type_map_feature_list", - "type": "record", - "fields": [ - { - "name": "feature", - "type": { - "type": "array", - "name": "type_feature", - "items": - { - "name": "type_item_feature", - "type": "record", - "fields": [ - { - "name": "bytes_list", - "type": [ "null", { - "type": "record", - "name": "type_bytes_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["bytes", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "float_list", - "type": [ "null", { - "type": "record", - "name": "type_float_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["float", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "int64_list", - "type": [ "null", { - "type": "record", - "name": "type_int64_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["long", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - } - ] - } - }, - "doc": "" - } - ] - }] - }], - "doc": "" + "name": "bytes_list", + "type": [ + "null", + { + "type": "record", + "name": "type_bytes_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["bytes", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "float_list", + "type": [ + "null", + { + "type": "record", + "name": "type_float_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["float", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "int64_list", + "type": [ + "null", + { + "type": "record", + "name": "type_int64_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["long", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" } - ] - }], - "doc": "" + ] + } + ] + } + ], + "doc": "" } - ] - }], - "doc": "" - }, - { - "name": "output_example", - "type": [ "null", { - "namespace": "output_example", - "type": "record", - "name": "type_output_example", - "fields": [ + ] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "output_sequence_example", + "type": [ + "null", + { + "namespace": "output_sequence_example", + "type": "record", + "name": "type_output_sequence_example", + "fields": [ + { + "name": "context", + "type": [ + "null", + { + "namespace": "output_sequence_example.context", + "name": "type_context", + "type": "record", + "fields": [ { - "name": "features", - "namespace": "features", - "type": ["null", { - "name": "type_features", - "type": "record", - "fields": [ + "name": "feature", + "type": [ + "null", + { + "type": "map", + "name": "type_feature", + "values": [ + "null", + { + "name": "type_map_features", + "type": "record", + "fields": [ { - "name": "feature", - "type": ["null", { - "type": "map", - "name": "type_feature", - "values": [ "null", { - "name": "type_map_features", - "type": "record", - "fields": [ - { - "name": "bytes_list", - "type": [ "null", { - "type": "record", - "name": "type_bytes_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["bytes", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "float_list", - "type": [ "null", { - "type": "record", - "name": "type_float_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["float", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "int64_list", - "type": [ "null", { - "type": "record", - "name": "type_int64_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["long", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - } - ] - }] - }], - "doc": "" + "name": "bytes_list", + "type": [ + "null", + { + "type": "record", + "name": "type_bytes_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["bytes", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "float_list", + "type": [ + "null", + { + "type": "record", + "name": "type_float_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["float", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "int64_list", + "type": [ + "null", + { + "type": "record", + "name": "type_int64_list", + "fields": [ + { + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": ["long", "null"] + } + ], + "doc": "" + } + ] + } + ], + "doc": "" } - ] - }], - "doc": "" + ] + } + ] + } + ], + "doc": "" } - ] - }], - "doc": "" - }, - { - "name": "output_sequence_example", - "type": [ "null", { - "namespace": "output_sequence_example", - "type": "record", - "name": "type_output_sequence_example", - "fields": [ + ] + } + ], + "doc": "" + }, + { + "name": "feature_lists", + "type": [ + "null", + { + "namespace": "output_sequence_example.feature_lists", + "name": "type_feature_lists", + "type": "record", + "fields": [ { - - "name": "context", - "type": ["null", { - "namespace": "output_sequence_example.context", - "name": "type_context", - "type": "record", - "fields": [ + "name": "feature_list", + "type": [ + "null", + { + "type": "map", + "name": "type_feature_list", + "values": [ + "null", + { + "name": "type_map_feature_list", + "type": "record", + "fields": [ { - "name": "feature", - "type": ["null", { - "type": "map", - "name": "type_feature", - "values": [ "null", { - "name": "type_map_features", - "type": "record", - "fields": [ - { - "name": "bytes_list", - "type": [ "null", { - "type": "record", - "name": "type_bytes_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["bytes", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, + "name": "feature", + "type": { + "type": "array", + "name": "type_feature", + "items": { + "name": "type_item_feature", + "type": "record", + "fields": [ + { + "name": "bytes_list", + "type": [ + "null", + { + "type": "record", + "name": "type_bytes_list", + "fields": [ { - "name": "float_list", - "type": [ "null", { - "type": "record", - "name": "type_float_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["float", "null"] - }], - "doc": "" - } + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": [ + "bytes", + "null" ] - }], - "doc": "" - }, + } + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "float_list", + "type": [ + "null", + { + "type": "record", + "name": "type_float_list", + "fields": [ { - "name": "int64_list", - "type": [ "null", { - "type": "record", - "name": "type_int64_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["long", "null"] - }], - "doc": "" - } + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": [ + "float", + "null" ] - }], - "doc": "" + } + ], + "doc": "" } - ] - }] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "feature_lists", - "type": ["null", { - "namespace": "output_sequence_example.feature_lists", - "name": "type_feature_lists", - "type": "record", - "fields": [ - { - "name": "feature_list", - "type": ["null", { - "type": "map", - "name": "type_feature_list", - "values": [ "null", { - "name": "type_map_feature_list", - "type": "record", - "fields": [ + ] + } + ], + "doc": "" + }, + { + "name": "int64_list", + "type": [ + "null", + { + "type": "record", + "name": "type_int64_list", + "fields": [ { - "name": "feature", - "type": { - "type": "array", - "name": "type_feature", - "items": - { - "name": "type_item_feature", - "type": "record", - "fields": [ - { - "name": "bytes_list", - "type": [ "null", { - "type": "record", - "name": "type_bytes_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["bytes", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "float_list", - "type": [ "null", { - "type": "record", - "name": "type_float_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["float", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - }, - { - "name": "int64_list", - "type": [ "null", { - "type": "record", - "name": "type_int64_list", - "fields": [ - { - "name": "value", - "type": [ "null", { - "type": "array", - "name": "type_value", - "items": ["long", "null"] - }], - "doc": "" - } - ] - }], - "doc": "" - } - ] - } - }, - "doc": "" + "name": "value", + "type": [ + "null", + { + "type": "array", + "name": "type_value", + "items": [ + "long", + "null" + ] + } + ], + "doc": "" } - ] - }] - }], - "doc": "" + ] + } + ], + "doc": "" + } + ] + } + }, + "doc": "" } - ] - }], - "doc": "" + ] + } + ] + } + ], + "doc": "" } - ] - }], - "doc": "" + ] + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "" - }, - { - "name": "decision_point", - "type": ["null", { - "name": "type_decision_point", - "type": "record", - "fields": [ - { - "name": "choice_label", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "chosen_option", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "choice_params", - "type": [ "null", { - "type": "array", - "name": "type_decision_param", - "items": [ "null", { + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "decision_point", + "type": [ + "null", + { + "name": "type_decision_point", + "type": "record", + "fields": [ + { + "name": "choice_label", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "chosen_option", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "choice_params", + "type": [ + "null", + { + "name": "type_params_map_01", + "type": "record", + "fields": [ + { + "name": "params", + "type": [ + "null", + { + "type": "map", + "values": { "type": "record", - "name": "decision_param_schema", + "name": "type_params_value_01", "fields": [ - { - "name": "key", - "type": ["string", "null"], - "doc": "key of attribute" - }, - { - "name": "value", - "type": [ "null", { - "namespace": "value", - "name": "type_value2", - "type": "record", - "fields": [ - { - "name": "sub_type", - "type": [ "null", { - "type": "enum", - "name": "type_sub_type2", - "symbols": - [ - "ST_UNKNOWN", - "ST_STRING", - "ST_HETEROGENEOUS", - "ST_BYTES", - "ST_INT64", - "ST_DOUBLE", - "ST_BOOL", - "ST_NONE" - ], - "doc": "sub-type" - }], - "doc": "" - }, - { - "name": "string_value", - "type": [ "null", "string" ], - "doc": "" - }, - { - "name": "bytes_value", - "type": [ "null", "bytes" ], - "doc": "" - }, - { - "name": "int64_value", - "type": [ "null", "long" ], - "doc": "" - }, - { - "name": "double_value", - "type": [ "null", "double" ], - "doc": "" - }, - { - "name": "bool_value", - "type": [ "null", "boolean" ], - "doc": "" - }, - { - "name": "none_value", - "type": [ "null", "boolean" ], - "doc": "" - } - ] - }], - "doc": "" - } + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type_01", + "symbols": [ + "ST_UNKNOWN", + "ST_STRING", + "ST_HETEROGENEOUS", + "ST_BYTES", + "ST_INT64", + "ST_DOUBLE", + "ST_BOOL", + "ST_NONE" + ], + "doc": "sub-type" + } + ], + "doc": "" + }, + { + "name": "string_value", + "type": ["null", "string"], + "doc": "" + }, + { + "name": "bytes_value", + "type": ["null", "bytes"], + "doc": "" + }, + { + "name": "int64_value", + "type": ["null", "long"], + "doc": "" + }, + { + "name": "double_value", + "type": ["null", "double"], + "doc": "" + }, + { + "name": "bool_value", + "type": ["null", "boolean"], + "doc": "" + }, + { + "name": "none_value", + "type": ["null", "boolean"], + "doc": "" + } ] - }] - }], - "doc": "DecisionPoint choice parameters" - } - ] - }], - "doc": "" - }, - { - "name": "decision_outcome", - "type": ["null", { - "name": "type_decision_outcome", - "type": "record", - "fields": [ - { - "name": "outcome_label", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "outcome_value", - "type": ["float", "null"], - "doc": "" + } + } + ], + "doc": "" + } + ] } - ] - }], - "doc": "" - }, - { - "name": "link", - "type": [ "null", { - "namespace": "link", - "name": "type_link", - "type": "record", - "fields": [ - { - "name": "linked_sight_id", - "type": [ "null", "string" ], - "doc": "" - }, - { - "name": "link_type", - "type": [ "null", { - "type": "enum", - "name": "type_link_type", - "symbols": - [ - "LT_UNKNOWN", - "LT_PARENT_TO_CHILD", - "LT_CHILD_TO_PARENT" - ], - "doc": "link-type" - }], - "doc": "" + ], + "doc": "DecisionPoint choice parameters" + } + ] + } + ], + "doc": "" + }, + { + "name": "decision_outcome", + "type": [ + "null", + { + "name": "type_decision_outcome", + "type": "record", + "fields": [ + { + "name": "outcome_label", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "outcome_value", + "type": ["float", "null"], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "link", + "type": [ + "null", + { + "namespace": "link", + "name": "type_link", + "type": "record", + "fields": [ + { + "name": "linked_sight_id", + "type": ["null", "string"], + "doc": "" + }, + { + "name": "link_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_link_type", + "symbols": [ + "LT_UNKNOWN", + "LT_PARENT_TO_CHILD", + "LT_CHILD_TO_PARENT" + ], + "doc": "link-type" } - ] - }], - "doc": "" - }, - { - "name": "propose_action", - "type": ["null", { - "name": "type_propose_action", - "type": "record", - "fields": [ - { - "name": "action_id", - "type": ["string", "null"], - "doc": "" - }, - { - "name": "action_attrs", - "type": [ "null", { - "type": "array", - "name": "type_decision_param", - "items": [ "null", { + ], + "doc": "" + } + ] + } + ], + "doc": "" + }, + { + "name": "propose_action", + "type": [ + "null", + { + "name": "type_propose_action", + "type": "record", + "fields": [ + { + "name": "action_id", + "type": ["string", "null"], + "doc": "" + }, + { + "name": "action_attrs", + "type": [ + "null", + { + "name": "type_params_map_02", + "type": "record", + "fields": [ + { + "name": "params", + "type": [ + "null", + { + "type": "map", + "values": { "type": "record", - "name": "propose_action_action_attrs_schema", + "name": "type_params_value_02", "fields": [ - { - "name": "key", - "type": ["string", "null"], - "doc": "key of attribute" - }, - { - "name": "value", - "type": [ "null", { - "namespace": "value", - "name": "type_value3", - "type": "record", - "fields": [ - { - "name": "sub_type", - "type": [ "null", { - "type": "enum", - "name": "type_sub_type3", - "symbols": - [ - "ST_UNKNOWN", - "ST_STRING", - "ST_HETEROGENEOUS", - "ST_BYTES", - "ST_INT64", - "ST_DOUBLE", - "ST_BOOL", - "ST_NONE" - ], - "doc": "sub-type" - }], - "doc": "" - }, - { - "name": "string_value", - "type": [ "null", "string" ], - "doc": "" - }, - { - "name": "bytes_value", - "type": [ "null", "bytes" ], - "doc": "" - }, - { - "name": "int64_value", - "type": [ "null", "long" ], - "doc": "" - }, - { - "name": "double_value", - "type": [ "null", "double" ], - "doc": "" - }, - { - "name": "bool_value", - "type": [ "null", "boolean" ], - "doc": "" - }, - { - "name": "none_value", - "type": [ "null", "boolean" ], - "doc": "" - } - ] - }], - "doc": "" - } + { + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type_02", + "symbols": [ + "ST_UNKNOWN", + "ST_STRING", + "ST_HETEROGENEOUS", + "ST_BYTES", + "ST_INT64", + "ST_DOUBLE", + "ST_BOOL", + "ST_NONE", + "ST_JSON" + ], + "doc": "sub-type" + } + ], + "doc": "" + }, + { + "name": "json_value", + "type": ["null", "string", "int"], + "doc": "" + }, + { + "name": "string_value", + "type": ["null", "string"], + "doc": "" + }, + { + "name": "bytes_value", + "type": ["null", "bytes"], + "doc": "" + }, + { + "name": "int64_value", + "type": ["null", "long"], + "doc": "" + }, + { + "name": "double_value", + "type": ["null", "double"], + "doc": "" + }, + { + "name": "bool_value", + "type": ["null", "boolean"], + "doc": "" + }, + { + "name": "none_value", + "type": ["null", "boolean"], + "doc": "" + } ] - }] - }], - "doc": "propose action action attributes" - }, - { - "name": "attributes", - "type": [ "null", { - "type": "array", - "name": "type_attributes", - "items": [ "null", { - "type": "record", - "name": "propose_action_attribute_schema", - "fields": [ + } + } + ], + "doc": "" + } + ] + } + ], + "doc": "propose action action attributes" + }, + { + "name": "attributes", + "type": [ + "null", + { + "name": "type_params_map_03", + "type": "record", + "fields": [ + { + "name": "params", + "type": [ + "null", + { + "type": "map", + "values": { + "type": "record", + "name": "type_params_value_03", + "fields": [ { - "name": "key", - "type": ["string", "null"], - "doc": "key of attribute" + "name": "sub_type", + "type": [ + "null", + { + "type": "enum", + "name": "type_sub_type_03", + "symbols": [ + "ST_UNKNOWN", + "ST_STRING", + "ST_HETEROGENEOUS", + "ST_BYTES", + "ST_INT64", + "ST_DOUBLE", + "ST_BOOL", + "ST_NONE", + "ST_JSON" + ], + "doc": "sub-type" + } + ], + "doc": "" }, { - "name": "value", - "type": [ "null", { - "namespace": "value", - "name": "type_value4", - "type": "record", - "fields": [ - { - "name": "sub_type", - "type": [ "null", { - "type": "enum", - "name": "type_sub_type4", - "symbols": - [ - "ST_UNKNOWN", - "ST_STRING", - "ST_HETEROGENEOUS", - "ST_BYTES", - "ST_INT64", - "ST_DOUBLE", - "ST_BOOL", - "ST_NONE" - ], - "doc": "sub-type" - }], - "doc": "" - }, - { - "name": "string_value", - "type": [ "null", "string" ], - "doc": "" - }, - { - "name": "bytes_value", - "type": [ "null", "bytes" ], - "doc": "" - }, - { - "name": "int64_value", - "type": [ "null", "long" ], - "doc": "" - }, - { - "name": "double_value", - "type": [ "null", "double" ], - "doc": "" - }, - { - "name": "bool_value", - "type": [ "null", "boolean" ], - "doc": "" - }, - { - "name": "none_value", - "type": [ "null", "boolean" ], - "doc": "" - } - ] - }], + "name": "json_value", + "type": ["null", "string", "int"], + "doc": "" + }, + { + "name": "string_value", + "type": ["null", "string"], + "doc": "" + }, + { + "name": "bytes_value", + "type": ["null", "bytes"], + "doc": "" + }, + { + "name": "int64_value", + "type": ["null", "long"], + "doc": "" + }, + { + "name": "double_value", + "type": ["null", "double"], + "doc": "" + }, + { + "name": "bool_value", + "type": ["null", "boolean"], + "doc": "" + }, + { + "name": "none_value", + "type": ["null", "boolean"], "doc": "" } - ] - }] - }], - "doc": "propose_action attributes" - } - ] - }], - "doc": "" - }, - { - "name": "file", - "type": ["string", "null"], - "doc": "File name" - }, - { - "name": "line", - "type": ["int", "null"], - "doc": "Line name" - }, - { - "name": "func", - "type": ["string", "null"], - "doc": "Function name" - }, - { - "name": "ancestor_start_location", - "type": [ "null", { - "type": "array", - "name": "type_ancestor_start_location", - "items": ["string", "null"] - }], - "doc": "The locations of the starting points of all the blocks that contain this object" - }, - { - "name": "metrics", - "type": [ "null", { - "type": "record", - "name": "type_metrics", - "fields": [ - { - "name": "process_free_swap_space_bytes", - "type": ["long", "null"], - "doc": "" - }, - { - "name": "process_total_swap_space_bytes", - "type": ["long", "null"], - "doc": "" + ] + } + } + ], + "doc": "" + } + ] } + ], + "doc": "propose_action attributes" + } ] - }], - "doc": "Encapsulates meta-data that tracks the dynamic behavior of the system" - }, - { - "name": "order", - "type": [ "null", { - "type": "record", - "name": "type_order", - "fields": [ - { - "name": "timestamp_ns", - "type": ["long", "null"], - "doc": "timestamp" - } + } + ], + "doc": "" + }, + { + "name": "file", + "type": ["string", "null"], + "doc": "File name" + }, + { + "name": "line", + "type": ["int", "null"], + "doc": "Line name" + }, + { + "name": "func", + "type": ["string", "null"], + "doc": "Function name" + }, + { + "name": "ancestor_start_location", + "type": [ + "null", + { + "type": "array", + "name": "type_ancestor_start_location", + "items": ["string", "null"] + } + ], + "doc": "The locations of the starting points of all the blocks that contain this object" + }, + { + "name": "metrics", + "type": [ + "null", + { + "type": "record", + "name": "type_metrics", + "fields": [ + { + "name": "process_free_swap_space_bytes", + "type": ["long", "null"], + "doc": "" + }, + { + "name": "process_total_swap_space_bytes", + "type": ["long", "null"], + "doc": "" + } + ] + } + ], + "doc": "Encapsulates meta-data that tracks the dynamic behavior of the system" + }, + { + "name": "order", + "type": [ + "null", + { + "type": "record", + "name": "type_order", + "fields": [ + { + "name": "timestamp_ns", + "type": ["long", "null"], + "doc": "timestamp" + } ] - }], - "doc": "The attributes that are in-force at the time this object was logged" - } - ] + } + ], + "doc": "The attributes that are in-force at the time this object was logged" + } + ] } diff --git a/py/sight/demo/propose_action.py b/py/sight/demo/propose_action.py index e5df312..8fc6882 100644 --- a/py/sight/demo/propose_action.py +++ b/py/sight/demo/propose_action.py @@ -40,12 +40,12 @@ def warn(*args, **kwargs): from sight.block import Block from sight.proto import sight_pb2 from sight.sight import Sight +from sight.utils.proto_conversion import convert_proto_to_dict from sight.widgets.decision import decision from sight.widgets.decision import trials from sight.widgets.decision.single_action_optimizer_client import ( SingleActionOptimizerClient ) -from sight_service.optimizer_instance import param_proto_to_dict from sight_service.proto import service_pb2 _RUN_MODE = flags.DEFINE_enum( @@ -194,11 +194,14 @@ def main(argv: Sequence[str]) -> None: outcome_dict = {} outcome_dict['action_id'] = outcome.action_id outcome_dict['reward'] = outcome.reward - outcome_dict['action'] = param_proto_to_dict(outcome.action_attrs) - outcome_dict['outcome'] = param_proto_to_dict(outcome.outcome_attrs) + outcome_dict['action'] = convert_proto_to_dict( + proto=outcome.action_attrs) + outcome_dict['outcome'] = convert_proto_to_dict( + proto=outcome.outcome_attrs) print('here : ', outcome_dict['outcome']['time_series'], type(outcome_dict['outcome']['time_series'])) - outcome_dict['attributes'] = param_proto_to_dict(outcome.attributes) + outcome_dict['attributes'] = convert_proto_to_dict( + proto=outcome.attributes) outcome_list.append(outcome_dict) id += 1 diff --git a/py/sight/proto/sight.proto b/py/sight/proto/sight.proto index e32b2e6..3723801 100644 --- a/py/sight/proto/sight.proto +++ b/py/sight/proto/sight.proto @@ -114,8 +114,8 @@ message Object { } message ProposeAction { - repeated DecisionParam action_attrs = 1; - repeated DecisionParam attributes = 2; + DecisionParam action_attrs = 1; + DecisionParam attributes = 2; string action_id = 3; } @@ -743,8 +743,7 @@ message DecisionConfigurationStart { } message DecisionParam { - string key = 1; - Value value = 2; + map params = 1; } message DecisionPoint { @@ -755,8 +754,8 @@ message DecisionPoint { string chosen_option = 2; // need or not? // The parameters that characterize the chosen option. - repeated DecisionParam choice_params = 3; - repeated DecisionParam state_params = 4; + DecisionParam choice_params = 3; + DecisionParam state_params = 4; } message DecisionOutcome { @@ -764,5 +763,5 @@ message DecisionOutcome { // The numeric value of this outcome, with higher values being more desirable. float reward = 2; float discount = 3; - repeated DecisionParam outcome_params = 4; + DecisionParam outcome_params = 4; } diff --git a/py/sight/proto/sight_pb2.py b/py/sight/proto/sight_pb2.py index 123dd8f..91b0028 100644 --- a/py/sight/proto/sight_pb2.py +++ b/py/sight/proto/sight_pb2.py @@ -4,497 +4,25 @@ """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - from sight.proto import example_pb2 as sight_dot_proto_dot_example__pb2 -from sight.proto.widgets.pipeline.flume import flume_pb2 as sight_dot_proto_dot_widgets_dot_pipeline_dot_flume_dot_flume__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xeb\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\x91\x04\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\x8a\t\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\x45\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\x12!\n\x19\x65xclusive_elapsed_time_ns\x18\x02 \x01(\x03\"\xfb\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"A\n\rDecisionParam\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3') - - - -_ATTRIBUTE = DESCRIPTOR.message_types_by_name['Attribute'] -_OBJECT = DESCRIPTOR.message_types_by_name['Object'] -_OBJECT_METRICS = _OBJECT.nested_types_by_name['Metrics'] -_OBJECT_ORDER = _OBJECT.nested_types_by_name['Order'] -_PROPOSEACTION = DESCRIPTOR.message_types_by_name['ProposeAction'] -_CONFIGURATIONSTART = DESCRIPTOR.message_types_by_name['ConfigurationStart'] -_EXCEPTION = DESCRIPTOR.message_types_by_name['Exception'] -_TENSOR = DESCRIPTOR.message_types_by_name['Tensor'] -_TENSOR_STRINGVALUES = _TENSOR.nested_types_by_name['StringValues'] -_TENSOR_BYTESVALUES = _TENSOR.nested_types_by_name['BytesValues'] -_TENSOR_INT64VALUES = _TENSOR.nested_types_by_name['Int64Values'] -_TENSOR_DOUBLEVALUES = _TENSOR.nested_types_by_name['DoubleValues'] -_TENSOR_BOOLVALUES = _TENSOR.nested_types_by_name['BoolValues'] -_LINK = DESCRIPTOR.message_types_by_name['Link'] -_TENSORFLOWEXAMPLE = DESCRIPTOR.message_types_by_name['TensorFlowExample'] -_LOG = DESCRIPTOR.message_types_by_name['Log'] -_TEXT = DESCRIPTOR.message_types_by_name['Text'] -_VALUE = DESCRIPTOR.message_types_by_name['Value'] -_BLOCKSTART = DESCRIPTOR.message_types_by_name['BlockStart'] -_BLOCKEND = DESCRIPTOR.message_types_by_name['BlockEnd'] -_BLOCKEND_METRICS = _BLOCKEND.nested_types_by_name['Metrics'] -_LISTSTART = DESCRIPTOR.message_types_by_name['ListStart'] -_TENSORFLOWMODELTRAININGEPOCHSTART = DESCRIPTOR.message_types_by_name['TensorFlowModelTrainingEpochStart'] -_ATTRIBUTESTART = DESCRIPTOR.message_types_by_name['AttributeStart'] -_ATTRIBUTEEND = DESCRIPTOR.message_types_by_name['AttributeEnd'] -_PARAMS = DESCRIPTOR.message_types_by_name['Params'] -_SIMULATIONSTART = DESCRIPTOR.message_types_by_name['SimulationStart'] -_SIMULATIONPARAMETERSSTART = DESCRIPTOR.message_types_by_name['SimulationParametersStart'] -_SIMULATIONTIMESTEPSTART = DESCRIPTOR.message_types_by_name['SimulationTimeStepStart'] -_CONTINUOUSPROBDIST = DESCRIPTOR.message_types_by_name['ContinuousProbDist'] -_CONTINUOUSPROBDIST_GAUSSIAN = _CONTINUOUSPROBDIST.nested_types_by_name['Gaussian'] -_CONTINUOUSPROBDIST_UNIFORM = _CONTINUOUSPROBDIST.nested_types_by_name['Uniform'] -_DISCRETEPROBDIST = DESCRIPTOR.message_types_by_name['DiscreteProbDist'] -_DISCRETEPROBDIST_UNIFORM = _DISCRETEPROBDIST.nested_types_by_name['Uniform'] -_DECISIONCONFIGURATIONSTART = DESCRIPTOR.message_types_by_name['DecisionConfigurationStart'] -_DECISIONCONFIGURATIONSTART_VIZIERCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['VizierConfig'] -_DECISIONCONFIGURATIONSTART_ACMECONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['AcmeConfig'] -_DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['GeneticAlgorithmConfig'] -_DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['ExhaustiveSearchConfig'] -_DECISIONCONFIGURATIONSTART_LLMCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['LLMConfig'] -_DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['BayesianOptConfig'] -_DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['SensitivityAnalysisConfig'] -_DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['NeverGradConfig'] -_DECISIONCONFIGURATIONSTART_SMCPYCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['SMCPyConfig'] -_DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['WorklistSchedulerConfig'] -_DECISIONCONFIGURATIONSTART_CHOICECONFIG = _DECISIONCONFIGURATIONSTART.nested_types_by_name['ChoiceConfig'] -_DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY = _DECISIONCONFIGURATIONSTART.nested_types_by_name['ChoiceConfigEntry'] -_DECISIONCONFIGURATIONSTART_ATTRPROPS = _DECISIONCONFIGURATIONSTART.nested_types_by_name['AttrProps'] -_DECISIONCONFIGURATIONSTART_STATEATTRSENTRY = _DECISIONCONFIGURATIONSTART.nested_types_by_name['StateAttrsEntry'] -_DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY = _DECISIONCONFIGURATIONSTART.nested_types_by_name['ActionAttrsEntry'] -_DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY = _DECISIONCONFIGURATIONSTART.nested_types_by_name['OutcomeAttrsEntry'] -_DECISIONPARAM = DESCRIPTOR.message_types_by_name['DecisionParam'] -_DECISIONPOINT = DESCRIPTOR.message_types_by_name['DecisionPoint'] -_DECISIONOUTCOME = DESCRIPTOR.message_types_by_name['DecisionOutcome'] -_OBJECT_SUBTYPE = _OBJECT.enum_types_by_name['SubType'] -_CONFIGURATIONSTART_SUBTYPE = _CONFIGURATIONSTART.enum_types_by_name['SubType'] -_TENSOR_SUBTYPE = _TENSOR.enum_types_by_name['SubType'] -_LINK_LINKTYPE = _LINK.enum_types_by_name['LinkType'] -_TEXT_SUBTYPE = _TEXT.enum_types_by_name['SubType'] -_VALUE_SUBTYPE = _VALUE.enum_types_by_name['SubType'] -_BLOCKSTART_SUBTYPE = _BLOCKSTART.enum_types_by_name['SubType'] -_BLOCKEND_SUBTYPE = _BLOCKEND.enum_types_by_name['SubType'] -_LISTSTART_SUBTYPE = _LISTSTART.enum_types_by_name['SubType'] -_SIMULATIONTIMESTEPSTART_TIMESTEPUNITS = _SIMULATIONTIMESTEPSTART.enum_types_by_name['TimeStepUnits'] -_DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT = _DECISIONCONFIGURATIONSTART_ACMECONFIG.enum_types_by_name['AcmeAgent'] -_DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM = _DECISIONCONFIGURATIONSTART_LLMCONFIG.enum_types_by_name['LLMAlgorithm'] -_DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL = _DECISIONCONFIGURATIONSTART_LLMCONFIG.enum_types_by_name['LLMGoal'] -_DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM = _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG.enum_types_by_name['NeverGradAlgorithm'] -_DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE = _DECISIONCONFIGURATIONSTART.enum_types_by_name['OptimizerType'] -_DECISIONCONFIGURATIONSTART_DATATYPE = _DECISIONCONFIGURATIONSTART.enum_types_by_name['DataType'] -Attribute = _reflection.GeneratedProtocolMessageType('Attribute', (_message.Message,), { - 'DESCRIPTOR' : _ATTRIBUTE, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Attribute) - }) -_sym_db.RegisterMessage(Attribute) - -Object = _reflection.GeneratedProtocolMessageType('Object', (_message.Message,), { - - 'Metrics' : _reflection.GeneratedProtocolMessageType('Metrics', (_message.Message,), { - 'DESCRIPTOR' : _OBJECT_METRICS, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Object.Metrics) - }) - , - - 'Order' : _reflection.GeneratedProtocolMessageType('Order', (_message.Message,), { - 'DESCRIPTOR' : _OBJECT_ORDER, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Object.Order) - }) - , - 'DESCRIPTOR' : _OBJECT, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Object) - }) -_sym_db.RegisterMessage(Object) -_sym_db.RegisterMessage(Object.Metrics) -_sym_db.RegisterMessage(Object.Order) - -ProposeAction = _reflection.GeneratedProtocolMessageType('ProposeAction', (_message.Message,), { - 'DESCRIPTOR' : _PROPOSEACTION, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.ProposeAction) - }) -_sym_db.RegisterMessage(ProposeAction) - -ConfigurationStart = _reflection.GeneratedProtocolMessageType('ConfigurationStart', (_message.Message,), { - 'DESCRIPTOR' : _CONFIGURATIONSTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.ConfigurationStart) - }) -_sym_db.RegisterMessage(ConfigurationStart) - -Exception = _reflection.GeneratedProtocolMessageType('Exception', (_message.Message,), { - 'DESCRIPTOR' : _EXCEPTION, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Exception) - }) -_sym_db.RegisterMessage(Exception) - -Tensor = _reflection.GeneratedProtocolMessageType('Tensor', (_message.Message,), { - - 'StringValues' : _reflection.GeneratedProtocolMessageType('StringValues', (_message.Message,), { - 'DESCRIPTOR' : _TENSOR_STRINGVALUES, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Tensor.StringValues) - }) - , - - 'BytesValues' : _reflection.GeneratedProtocolMessageType('BytesValues', (_message.Message,), { - 'DESCRIPTOR' : _TENSOR_BYTESVALUES, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Tensor.BytesValues) - }) - , - - 'Int64Values' : _reflection.GeneratedProtocolMessageType('Int64Values', (_message.Message,), { - 'DESCRIPTOR' : _TENSOR_INT64VALUES, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Tensor.Int64Values) - }) - , - - 'DoubleValues' : _reflection.GeneratedProtocolMessageType('DoubleValues', (_message.Message,), { - 'DESCRIPTOR' : _TENSOR_DOUBLEVALUES, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Tensor.DoubleValues) - }) - , - - 'BoolValues' : _reflection.GeneratedProtocolMessageType('BoolValues', (_message.Message,), { - 'DESCRIPTOR' : _TENSOR_BOOLVALUES, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Tensor.BoolValues) - }) - , - 'DESCRIPTOR' : _TENSOR, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Tensor) - }) -_sym_db.RegisterMessage(Tensor) -_sym_db.RegisterMessage(Tensor.StringValues) -_sym_db.RegisterMessage(Tensor.BytesValues) -_sym_db.RegisterMessage(Tensor.Int64Values) -_sym_db.RegisterMessage(Tensor.DoubleValues) -_sym_db.RegisterMessage(Tensor.BoolValues) - -Link = _reflection.GeneratedProtocolMessageType('Link', (_message.Message,), { - 'DESCRIPTOR' : _LINK, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Link) - }) -_sym_db.RegisterMessage(Link) - -TensorFlowExample = _reflection.GeneratedProtocolMessageType('TensorFlowExample', (_message.Message,), { - 'DESCRIPTOR' : _TENSORFLOWEXAMPLE, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.TensorFlowExample) - }) -_sym_db.RegisterMessage(TensorFlowExample) - -Log = _reflection.GeneratedProtocolMessageType('Log', (_message.Message,), { - 'DESCRIPTOR' : _LOG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Log) - }) -_sym_db.RegisterMessage(Log) - -Text = _reflection.GeneratedProtocolMessageType('Text', (_message.Message,), { - 'DESCRIPTOR' : _TEXT, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Text) - }) -_sym_db.RegisterMessage(Text) - -Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), { - 'DESCRIPTOR' : _VALUE, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Value) - }) -_sym_db.RegisterMessage(Value) - -BlockStart = _reflection.GeneratedProtocolMessageType('BlockStart', (_message.Message,), { - 'DESCRIPTOR' : _BLOCKSTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.BlockStart) - }) -_sym_db.RegisterMessage(BlockStart) - -BlockEnd = _reflection.GeneratedProtocolMessageType('BlockEnd', (_message.Message,), { - - 'Metrics' : _reflection.GeneratedProtocolMessageType('Metrics', (_message.Message,), { - 'DESCRIPTOR' : _BLOCKEND_METRICS, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.BlockEnd.Metrics) - }) - , - 'DESCRIPTOR' : _BLOCKEND, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.BlockEnd) - }) -_sym_db.RegisterMessage(BlockEnd) -_sym_db.RegisterMessage(BlockEnd.Metrics) - -ListStart = _reflection.GeneratedProtocolMessageType('ListStart', (_message.Message,), { - 'DESCRIPTOR' : _LISTSTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.ListStart) - }) -_sym_db.RegisterMessage(ListStart) - -TensorFlowModelTrainingEpochStart = _reflection.GeneratedProtocolMessageType('TensorFlowModelTrainingEpochStart', (_message.Message,), { - 'DESCRIPTOR' : _TENSORFLOWMODELTRAININGEPOCHSTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.TensorFlowModelTrainingEpochStart) - }) -_sym_db.RegisterMessage(TensorFlowModelTrainingEpochStart) - -AttributeStart = _reflection.GeneratedProtocolMessageType('AttributeStart', (_message.Message,), { - 'DESCRIPTOR' : _ATTRIBUTESTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.AttributeStart) - }) -_sym_db.RegisterMessage(AttributeStart) - -AttributeEnd = _reflection.GeneratedProtocolMessageType('AttributeEnd', (_message.Message,), { - 'DESCRIPTOR' : _ATTRIBUTEEND, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.AttributeEnd) - }) -_sym_db.RegisterMessage(AttributeEnd) - -Params = _reflection.GeneratedProtocolMessageType('Params', (_message.Message,), { - 'DESCRIPTOR' : _PARAMS, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.Params) - }) -_sym_db.RegisterMessage(Params) - -SimulationStart = _reflection.GeneratedProtocolMessageType('SimulationStart', (_message.Message,), { - 'DESCRIPTOR' : _SIMULATIONSTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.SimulationStart) - }) -_sym_db.RegisterMessage(SimulationStart) - -SimulationParametersStart = _reflection.GeneratedProtocolMessageType('SimulationParametersStart', (_message.Message,), { - 'DESCRIPTOR' : _SIMULATIONPARAMETERSSTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.SimulationParametersStart) - }) -_sym_db.RegisterMessage(SimulationParametersStart) - -SimulationTimeStepStart = _reflection.GeneratedProtocolMessageType('SimulationTimeStepStart', (_message.Message,), { - 'DESCRIPTOR' : _SIMULATIONTIMESTEPSTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.SimulationTimeStepStart) - }) -_sym_db.RegisterMessage(SimulationTimeStepStart) - -ContinuousProbDist = _reflection.GeneratedProtocolMessageType('ContinuousProbDist', (_message.Message,), { - - 'Gaussian' : _reflection.GeneratedProtocolMessageType('Gaussian', (_message.Message,), { - 'DESCRIPTOR' : _CONTINUOUSPROBDIST_GAUSSIAN, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.ContinuousProbDist.Gaussian) - }) - , - - 'Uniform' : _reflection.GeneratedProtocolMessageType('Uniform', (_message.Message,), { - 'DESCRIPTOR' : _CONTINUOUSPROBDIST_UNIFORM, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.ContinuousProbDist.Uniform) - }) - , - 'DESCRIPTOR' : _CONTINUOUSPROBDIST, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.ContinuousProbDist) - }) -_sym_db.RegisterMessage(ContinuousProbDist) -_sym_db.RegisterMessage(ContinuousProbDist.Gaussian) -_sym_db.RegisterMessage(ContinuousProbDist.Uniform) - -DiscreteProbDist = _reflection.GeneratedProtocolMessageType('DiscreteProbDist', (_message.Message,), { - - 'Uniform' : _reflection.GeneratedProtocolMessageType('Uniform', (_message.Message,), { - 'DESCRIPTOR' : _DISCRETEPROBDIST_UNIFORM, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DiscreteProbDist.Uniform) - }) - , - 'DESCRIPTOR' : _DISCRETEPROBDIST, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DiscreteProbDist) - }) -_sym_db.RegisterMessage(DiscreteProbDist) -_sym_db.RegisterMessage(DiscreteProbDist.Uniform) - -DecisionConfigurationStart = _reflection.GeneratedProtocolMessageType('DecisionConfigurationStart', (_message.Message,), { - - 'VizierConfig' : _reflection.GeneratedProtocolMessageType('VizierConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_VIZIERCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.VizierConfig) - }) - , - - 'AcmeConfig' : _reflection.GeneratedProtocolMessageType('AcmeConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_ACMECONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.AcmeConfig) - }) - , - - 'GeneticAlgorithmConfig' : _reflection.GeneratedProtocolMessageType('GeneticAlgorithmConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfig) - }) - , - - 'ExhaustiveSearchConfig' : _reflection.GeneratedProtocolMessageType('ExhaustiveSearchConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfig) - }) - , - - 'LLMConfig' : _reflection.GeneratedProtocolMessageType('LLMConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_LLMCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.LLMConfig) - }) - , - - 'BayesianOptConfig' : _reflection.GeneratedProtocolMessageType('BayesianOptConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.BayesianOptConfig) - }) - , - - 'SensitivityAnalysisConfig' : _reflection.GeneratedProtocolMessageType('SensitivityAnalysisConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfig) - }) - , - - 'NeverGradConfig' : _reflection.GeneratedProtocolMessageType('NeverGradConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.NeverGradConfig) - }) - , - - 'SMCPyConfig' : _reflection.GeneratedProtocolMessageType('SMCPyConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_SMCPYCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.SMCPyConfig) - }) - , - - 'WorklistSchedulerConfig' : _reflection.GeneratedProtocolMessageType('WorklistSchedulerConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfig) - }) - , - - 'ChoiceConfig' : _reflection.GeneratedProtocolMessageType('ChoiceConfig', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_CHOICECONFIG, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.ChoiceConfig) - }) - , - - 'ChoiceConfigEntry' : _reflection.GeneratedProtocolMessageType('ChoiceConfigEntry', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry) - }) - , - - 'AttrProps' : _reflection.GeneratedProtocolMessageType('AttrProps', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_ATTRPROPS, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.AttrProps) - }) - , - - 'StateAttrsEntry' : _reflection.GeneratedProtocolMessageType('StateAttrsEntry', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.StateAttrsEntry) - }) - , - - 'ActionAttrsEntry' : _reflection.GeneratedProtocolMessageType('ActionAttrsEntry', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry) - }) - , - - 'OutcomeAttrsEntry' : _reflection.GeneratedProtocolMessageType('OutcomeAttrsEntry', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry) - }) - , - 'DESCRIPTOR' : _DECISIONCONFIGURATIONSTART, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionConfigurationStart) - }) -_sym_db.RegisterMessage(DecisionConfigurationStart) -_sym_db.RegisterMessage(DecisionConfigurationStart.VizierConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.AcmeConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.GeneticAlgorithmConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.ExhaustiveSearchConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.LLMConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.BayesianOptConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.SensitivityAnalysisConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.NeverGradConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.SMCPyConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.WorklistSchedulerConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.ChoiceConfig) -_sym_db.RegisterMessage(DecisionConfigurationStart.ChoiceConfigEntry) -_sym_db.RegisterMessage(DecisionConfigurationStart.AttrProps) -_sym_db.RegisterMessage(DecisionConfigurationStart.StateAttrsEntry) -_sym_db.RegisterMessage(DecisionConfigurationStart.ActionAttrsEntry) -_sym_db.RegisterMessage(DecisionConfigurationStart.OutcomeAttrsEntry) - -DecisionParam = _reflection.GeneratedProtocolMessageType('DecisionParam', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONPARAM, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionParam) - }) -_sym_db.RegisterMessage(DecisionParam) - -DecisionPoint = _reflection.GeneratedProtocolMessageType('DecisionPoint', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONPOINT, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionPoint) - }) -_sym_db.RegisterMessage(DecisionPoint) +from sight.proto.widgets.pipeline.flume import ( + flume_pb2 as sight_dot_proto_dot_widgets_dot_pipeline_dot_flume_dot_flume__pb2 +) -DecisionOutcome = _reflection.GeneratedProtocolMessageType('DecisionOutcome', (_message.Message,), { - 'DESCRIPTOR' : _DECISIONOUTCOME, - '__module__' : 'sight.proto.sight_pb2' - # @@protoc_insertion_point(class_scope:sight.x.proto.DecisionOutcome) - }) -_sym_db.RegisterMessage(DecisionOutcome) +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( + b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xeb\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\x91\x04\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\x8a\t\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\x45\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\x12!\n\x19\x65xclusive_elapsed_time_ns\x18\x02 \x01(\x03\"\xfb\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"\x8e\x01\n\rDecisionParam\x12\x38\n\x06params\x18\x01 \x03(\x0b\x32(.sight.x.proto.DecisionParam.ParamsEntry\x1a\x43\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value:\x02\x38\x01\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3' +) +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sight.proto.sight_pb2', + globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None @@ -506,144 +34,148 @@ _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_options = b'8\001' _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._options = None _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_options = b'8\001' - _ATTRIBUTE._serialized_start=117 - _ATTRIBUTE._serialized_end=156 - _OBJECT._serialized_start=159 - _OBJECT._serialized_end=1744 - _OBJECT_METRICS._serialized_start=1264 - _OBJECT_METRICS._serialized_end=1352 - _OBJECT_ORDER._serialized_start=1354 - _OBJECT_ORDER._serialized_end=1383 - _OBJECT_SUBTYPE._serialized_start=1386 - _OBJECT_SUBTYPE._serialized_end=1724 - _PROPOSEACTION._serialized_start=1747 - _PROPOSEACTION._serialized_end=1883 - _CONFIGURATIONSTART._serialized_start=1886 - _CONFIGURATIONSTART._serialized_end=2122 - _CONFIGURATIONSTART_SUBTYPE._serialized_start=2046 - _CONFIGURATIONSTART_SUBTYPE._serialized_end=2102 - _EXCEPTION._serialized_start=2124 - _EXCEPTION._serialized_end=2183 - _TENSOR._serialized_start=2186 - _TENSOR._serialized_end=2913 - _TENSOR_STRINGVALUES._serialized_start=2652 - _TENSOR_STRINGVALUES._serialized_end=2681 - _TENSOR_BYTESVALUES._serialized_start=2683 - _TENSOR_BYTESVALUES._serialized_end=2711 - _TENSOR_INT64VALUES._serialized_start=2713 - _TENSOR_INT64VALUES._serialized_end=2741 - _TENSOR_DOUBLEVALUES._serialized_start=2743 - _TENSOR_DOUBLEVALUES._serialized_end=2772 - _TENSOR_BOOLVALUES._serialized_start=2774 - _TENSOR_BOOLVALUES._serialized_end=2801 - _TENSOR_SUBTYPE._serialized_start=2803 - _TENSOR_SUBTYPE._serialized_end=2899 - _LINK._serialized_start=2916 - _LINK._serialized_end=3072 - _LINK_LINKTYPE._serialized_start=2998 - _LINK_LINKTYPE._serialized_end=3072 - _TENSORFLOWEXAMPLE._serialized_start=3075 - _TENSORFLOWEXAMPLE._serialized_end=3345 - _LOG._serialized_start=3347 - _LOG._serialized_end=3388 - _TEXT._serialized_start=3390 - _TEXT._serialized_end=3510 - _TEXT_SUBTYPE._serialized_start=3459 - _TEXT_SUBTYPE._serialized_end=3510 - _VALUE._serialized_start=3513 - _VALUE._serialized_end=3885 - _VALUE_SUBTYPE._serialized_start=3749 - _VALUE_SUBTYPE._serialized_end=3871 - _BLOCKSTART._serialized_start=3888 - _BLOCKSTART._serialized_end=5275 - _BLOCKSTART_SUBTYPE._serialized_start=4726 - _BLOCKSTART_SUBTYPE._serialized_end=5255 - _BLOCKEND._serialized_start=5278 - _BLOCKEND._serialized_end=6440 - _BLOCKEND_METRICS._serialized_start=5841 - _BLOCKEND_METRICS._serialized_end=5910 - _BLOCKEND_SUBTYPE._serialized_start=5913 - _BLOCKEND_SUBTYPE._serialized_end=6420 - _LISTSTART._serialized_start=6443 - _LISTSTART._serialized_end=6618 - _LISTSTART_SUBTYPE._serialized_start=6508 - _LISTSTART_SUBTYPE._serialized_end=6618 - _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_start=6620 - _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_end=6694 - _ATTRIBUTESTART._serialized_start=6696 - _ATTRIBUTESTART._serialized_end=6757 - _ATTRIBUTEEND._serialized_start=6759 - _ATTRIBUTEEND._serialized_end=6786 - _PARAMS._serialized_start=6789 - _PARAMS._serialized_end=7223 - _SIMULATIONSTART._serialized_start=7225 - _SIMULATIONSTART._serialized_end=7242 - _SIMULATIONPARAMETERSSTART._serialized_start=7244 - _SIMULATIONPARAMETERSSTART._serialized_end=7271 - _SIMULATIONTIMESTEPSTART._serialized_start=7274 - _SIMULATIONTIMESTEPSTART._serialized_end=7586 - _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_start=7449 - _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_end=7586 - _CONTINUOUSPROBDIST._serialized_start=7589 - _CONTINUOUSPROBDIST._serialized_end=7829 - _CONTINUOUSPROBDIST_GAUSSIAN._serialized_start=7737 - _CONTINUOUSPROBDIST_GAUSSIAN._serialized_end=7776 - _CONTINUOUSPROBDIST_UNIFORM._serialized_start=7778 - _CONTINUOUSPROBDIST_UNIFORM._serialized_end=7821 - _DISCRETEPROBDIST._serialized_start=7832 - _DISCRETEPROBDIST._serialized_end=7963 - _DISCRETEPROBDIST_UNIFORM._serialized_start=7912 - _DISCRETEPROBDIST_UNIFORM._serialized_end=7955 - _DECISIONCONFIGURATIONSTART._serialized_start=7966 - _DECISIONCONFIGURATIONSTART._serialized_end=11588 - _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_start=8427 - _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_end=8441 - _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_start=8444 - _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_end=8685 - _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_start=8543 - _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_end=8685 - _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_start=8687 - _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_end=8740 - _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_start=8742 - _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_end=8766 - _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_start=8769 - _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_end=9132 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_start=8963 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_end=9050 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_start=9052 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_end=9132 - _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_start=9134 - _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_end=9153 - _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_start=9155 - _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_end=9182 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_start=9185 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_end=9583 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_start=9302 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_end=9583 - _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_start=9585 - _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_end=9598 - _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_start=9600 - _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_end=9625 - _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_start=9628 - _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_end=10568 - _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_start=10570 - _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_end=10677 - _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_start=10680 - _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_end=10949 - _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_start=10951 - _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_end=11053 - _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_start=11055 - _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_end=11158 - _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_start=11160 - _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_end=11264 - _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_start=11267 - _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_end=11501 - _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_start=11503 - _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_end=11588 - _DECISIONPARAM._serialized_start=11590 - _DECISIONPARAM._serialized_end=11655 - _DECISIONPOINT._serialized_start=11658 - _DECISIONPOINT._serialized_end=11823 - _DECISIONOUTCOME._serialized_start=11826 - _DECISIONOUTCOME._serialized_end=11954 + _DECISIONPARAM_PARAMSENTRY._options = None + _DECISIONPARAM_PARAMSENTRY._serialized_options = b'8\001' + _ATTRIBUTE._serialized_start = 117 + _ATTRIBUTE._serialized_end = 156 + _OBJECT._serialized_start = 159 + _OBJECT._serialized_end = 1744 + _OBJECT_METRICS._serialized_start = 1264 + _OBJECT_METRICS._serialized_end = 1352 + _OBJECT_ORDER._serialized_start = 1354 + _OBJECT_ORDER._serialized_end = 1383 + _OBJECT_SUBTYPE._serialized_start = 1386 + _OBJECT_SUBTYPE._serialized_end = 1724 + _PROPOSEACTION._serialized_start = 1747 + _PROPOSEACTION._serialized_end = 1883 + _CONFIGURATIONSTART._serialized_start = 1886 + _CONFIGURATIONSTART._serialized_end = 2122 + _CONFIGURATIONSTART_SUBTYPE._serialized_start = 2046 + _CONFIGURATIONSTART_SUBTYPE._serialized_end = 2102 + _EXCEPTION._serialized_start = 2124 + _EXCEPTION._serialized_end = 2183 + _TENSOR._serialized_start = 2186 + _TENSOR._serialized_end = 2913 + _TENSOR_STRINGVALUES._serialized_start = 2652 + _TENSOR_STRINGVALUES._serialized_end = 2681 + _TENSOR_BYTESVALUES._serialized_start = 2683 + _TENSOR_BYTESVALUES._serialized_end = 2711 + _TENSOR_INT64VALUES._serialized_start = 2713 + _TENSOR_INT64VALUES._serialized_end = 2741 + _TENSOR_DOUBLEVALUES._serialized_start = 2743 + _TENSOR_DOUBLEVALUES._serialized_end = 2772 + _TENSOR_BOOLVALUES._serialized_start = 2774 + _TENSOR_BOOLVALUES._serialized_end = 2801 + _TENSOR_SUBTYPE._serialized_start = 2803 + _TENSOR_SUBTYPE._serialized_end = 2899 + _LINK._serialized_start = 2916 + _LINK._serialized_end = 3072 + _LINK_LINKTYPE._serialized_start = 2998 + _LINK_LINKTYPE._serialized_end = 3072 + _TENSORFLOWEXAMPLE._serialized_start = 3075 + _TENSORFLOWEXAMPLE._serialized_end = 3345 + _LOG._serialized_start = 3347 + _LOG._serialized_end = 3388 + _TEXT._serialized_start = 3390 + _TEXT._serialized_end = 3510 + _TEXT_SUBTYPE._serialized_start = 3459 + _TEXT_SUBTYPE._serialized_end = 3510 + _VALUE._serialized_start = 3513 + _VALUE._serialized_end = 3885 + _VALUE_SUBTYPE._serialized_start = 3749 + _VALUE_SUBTYPE._serialized_end = 3871 + _BLOCKSTART._serialized_start = 3888 + _BLOCKSTART._serialized_end = 5275 + _BLOCKSTART_SUBTYPE._serialized_start = 4726 + _BLOCKSTART_SUBTYPE._serialized_end = 5255 + _BLOCKEND._serialized_start = 5278 + _BLOCKEND._serialized_end = 6440 + _BLOCKEND_METRICS._serialized_start = 5841 + _BLOCKEND_METRICS._serialized_end = 5910 + _BLOCKEND_SUBTYPE._serialized_start = 5913 + _BLOCKEND_SUBTYPE._serialized_end = 6420 + _LISTSTART._serialized_start = 6443 + _LISTSTART._serialized_end = 6618 + _LISTSTART_SUBTYPE._serialized_start = 6508 + _LISTSTART_SUBTYPE._serialized_end = 6618 + _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_start = 6620 + _TENSORFLOWMODELTRAININGEPOCHSTART._serialized_end = 6694 + _ATTRIBUTESTART._serialized_start = 6696 + _ATTRIBUTESTART._serialized_end = 6757 + _ATTRIBUTEEND._serialized_start = 6759 + _ATTRIBUTEEND._serialized_end = 6786 + _PARAMS._serialized_start = 6789 + _PARAMS._serialized_end = 7223 + _SIMULATIONSTART._serialized_start = 7225 + _SIMULATIONSTART._serialized_end = 7242 + _SIMULATIONPARAMETERSSTART._serialized_start = 7244 + _SIMULATIONPARAMETERSSTART._serialized_end = 7271 + _SIMULATIONTIMESTEPSTART._serialized_start = 7274 + _SIMULATIONTIMESTEPSTART._serialized_end = 7586 + _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_start = 7449 + _SIMULATIONTIMESTEPSTART_TIMESTEPUNITS._serialized_end = 7586 + _CONTINUOUSPROBDIST._serialized_start = 7589 + _CONTINUOUSPROBDIST._serialized_end = 7829 + _CONTINUOUSPROBDIST_GAUSSIAN._serialized_start = 7737 + _CONTINUOUSPROBDIST_GAUSSIAN._serialized_end = 7776 + _CONTINUOUSPROBDIST_UNIFORM._serialized_start = 7778 + _CONTINUOUSPROBDIST_UNIFORM._serialized_end = 7821 + _DISCRETEPROBDIST._serialized_start = 7832 + _DISCRETEPROBDIST._serialized_end = 7963 + _DISCRETEPROBDIST_UNIFORM._serialized_start = 7912 + _DISCRETEPROBDIST_UNIFORM._serialized_end = 7955 + _DECISIONCONFIGURATIONSTART._serialized_start = 7966 + _DECISIONCONFIGURATIONSTART._serialized_end = 11588 + _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_start = 8427 + _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_end = 8441 + _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_start = 8444 + _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_end = 8685 + _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_start = 8543 + _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_end = 8685 + _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_start = 8687 + _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_end = 8740 + _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_start = 8742 + _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_end = 8766 + _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_start = 8769 + _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_end = 9132 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_start = 8963 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_end = 9050 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_start = 9052 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_end = 9132 + _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_start = 9134 + _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_end = 9153 + _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_start = 9155 + _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_end = 9182 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_start = 9185 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_end = 9583 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_start = 9302 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_end = 9583 + _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_start = 9585 + _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_end = 9598 + _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_start = 9600 + _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_end = 9625 + _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_start = 9628 + _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_end = 10568 + _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_start = 10570 + _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_end = 10677 + _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_start = 10680 + _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_end = 10949 + _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_start = 10951 + _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_end = 11053 + _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_start = 11055 + _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_end = 11158 + _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_start = 11160 + _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_end = 11264 + _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_start = 11267 + _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_end = 11501 + _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_start = 11503 + _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_end = 11588 + _DECISIONPARAM._serialized_start = 11591 + _DECISIONPARAM._serialized_end = 11733 + _DECISIONPARAM_PARAMSENTRY._serialized_start = 11666 + _DECISIONPARAM_PARAMSENTRY._serialized_end = 11733 + _DECISIONPOINT._serialized_start = 11736 + _DECISIONPOINT._serialized_end = 11901 + _DECISIONOUTCOME._serialized_start = 11904 + _DECISIONOUTCOME._serialized_end = 12032 # @@protoc_insertion_point(module_scope) diff --git a/py/sight/utility.py b/py/sight/utility.py index 2999d24..4d9b8eb 100644 --- a/py/sight/utility.py +++ b/py/sight/utility.py @@ -26,8 +26,8 @@ from google.protobuf.json_format import _Printer as BasePrinter from google.protobuf.json_format import SerializeToJsonError from sight import service_utils as service +from sight.utils.proto_conversion import convert_proto_to_dict from sight.widgets.decision.resource_lock import RWLockDictWrapper -from sight_service.optimizer_instance import param_proto_to_dict from sight_service.proto import service_pb2 POLL_LIMIT = 10 # POLL_TIME_INTERVAL th part of second @@ -62,9 +62,12 @@ def get_all_outcomes(sight_id, action_ids): outcome_dict = {} outcome_dict['action_id'] = outcome.action_id outcome_dict['reward'] = outcome.reward - outcome_dict['action'] = param_proto_to_dict(outcome.action_attrs) - outcome_dict['outcome'] = param_proto_to_dict(outcome.outcome_attrs) - outcome_dict['attributes'] = param_proto_to_dict(outcome.attributes) + outcome_dict['action'] = convert_proto_to_dict( + proto=outcome.action_attrs) + outcome_dict['outcome'] = convert_proto_to_dict( + proto=outcome.outcome_attrs) + outcome_dict['attributes'] = convert_proto_to_dict( + proto=outcome.attributes) else: outcome_dict = None outcome_list.append(outcome_dict) diff --git a/py/sight/utils/proto_conversion.py b/py/sight/utils/proto_conversion.py new file mode 100644 index 0000000..2769aa6 --- /dev/null +++ b/py/sight/utils/proto_conversion.py @@ -0,0 +1,133 @@ +"""Utility functions for converting between dictionaries and Sight protos.""" + +import json +from typing import Any, Callable, Dict, List, Optional + +import pandas as pd +from sight.proto import sight_pb2 +from sight_service.proto import service_pb2 + + +def update_proto_map(existing_proto_map: sight_pb2.DecisionParam, + new_proto_map: dict[str, Any]): + """Updates the existing proto map with the new proto map. + + Args: + existing_proto_map: The existing proto map to update. + new_proto_map: The new proto map to update the existing proto map with. + """ + for key, value in new_proto_map.items(): + # Use CopyFrom to assign each new value properly + existing_proto_map.params[key].CopyFrom(get_proto_value_from_value(value)) + + +def get_value_from_proto_value(proto_value: sight_pb2.Value) -> Any: + """Returns the value of the proto value. + + Args: + proto_value: The proto value to get the value from. + + Returns: The value of the proto value. + + Raises: + ValueError: If the proto value has an unsupported subtype. + """ + if proto_value.sub_type == sight_pb2.Value.ST_STRING: + return proto_value.string_value + elif proto_value.sub_type == sight_pb2.Value.ST_BYTES: + return proto_value.bytes_value + elif proto_value.sub_type == sight_pb2.Value.ST_INT64: + return proto_value.int64_value + elif proto_value.sub_type == sight_pb2.Value.ST_DOUBLE: + return proto_value.double_value + elif proto_value.sub_type == sight_pb2.Value.ST_BOOL: + return proto_value.bool_value + elif proto_value.sub_type == sight_pb2.Value.ST_NONE: + return None + elif proto_value.sub_type == sight_pb2.Value.ST_JSON: + try: + return json.loads(proto_value.json_value) + except (ValueError, TypeError): + return (proto_value.json_value + ) # Fall back to the raw string if JSON parsing fails + else: + raise ValueError(f"Unsupported subtype: {proto_value.sub_type}") + + +def get_proto_value_from_value(v) -> sight_pb2.Value: + """Returns a proto value from a value. + + Args: + v: The value to get the proto value from. + + Returns: The proto value of the value. + + Raises: + ValueError: If the value has an unsupported type. + """ + val = sight_pb2.Value() + if isinstance(v, dict): + val.sub_type = sight_pb2.Value.ST_JSON + val.json_value = json.dumps(v) + elif isinstance(v, pd.Series): + val.sub_type = sight_pb2.Value.ST_JSON + val.json_value = json.dumps(v.to_dict()) + elif isinstance(v, str): + try: + # Try to parse as JSON if possible + json.loads(v) + val.sub_type = sight_pb2.Value.ST_JSON + val.json_value = v + except (ValueError, TypeError): + val.sub_type = sight_pb2.Value.ST_STRING + val.string_value = v + elif isinstance(v, int): + val.sub_type = sight_pb2.Value.ST_INT64 + val.int64_value = v + elif isinstance(v, float): + val.sub_type = sight_pb2.Value.ST_DOUBLE + val.double_value = v + elif isinstance(v, bool): + val.sub_type = sight_pb2.Value.ST_BOOL + val.bool_value = v + elif isinstance(v, bytes): + val.sub_type = sight_pb2.Value.ST_BYTES + val.bytes_value = v + elif v is None: + val.sub_type = sight_pb2.Value.ST_NONE + val.none_value = True + else: + raise ValueError(f"Unsupported type: {type(v)}") + return val + + +def convert_dict_to_proto(dict: Dict[str, Any]) -> sight_pb2.DecisionParam: + """Converts a dictionary to a proto. + + Args: + dict: The dictionary to convert to a proto. + + Returns: + The proto representation of the dictionary. + + """ + proto_map = sight_pb2.DecisionParam() + for k, v in dict.items(): + proto_map.params[k].CopyFrom(get_proto_value_from_value(v)) + return proto_map + + +def convert_proto_to_dict(proto: sight_pb2.DecisionParam) -> Dict[str, Any]: + """Converts a proto to a dictionary. + + Args: + proto: The proto to convert to a dictionary. + + Returns: + The dictionary representation of the proto. + + """ + result = {} + for k, v in proto.params.items(): + result[k] = get_value_from_proto_value(v) + return result diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index c12fcbc..e9293c9 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -30,6 +30,7 @@ from sight import service_utils as service from sight.proto import sight_pb2 from sight.utility import poll_network_batch_outcome +from sight.utils.proto_conversion import convert_dict_to_proto # from sight.widgets.decision.cartpole_driver import driver_fn from sight.widgets.decision import decision_episode_fn from sight.widgets.decision import trials @@ -633,7 +634,6 @@ def run( logging.debug("<<<< Out %s of %s", method_name, _file_name) - def get_state_attrs(sight: Any) -> list[str]: state_attrs = [] state_details = sight.widget_decision_state['decision_episode_fn'] @@ -669,31 +669,8 @@ def get_decision_outcome_proto(outcome_label: str, decision_outcome.reward = sight.widget_decision_state['sum_reward'] if 'sum_outcome' in sight.widget_decision_state: - outcome_params: List[sight_pb2.DecisionParam] = [] - for key in sight.widget_decision_state['sum_outcome']: - val = sight.widget_decision_state['sum_outcome'][key] - if (utils.is_scalar(val)): - #todo: assuming only double for now in scalar - value = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=val, - ) - else: - if (isinstance(val, dict) or isinstance(val, list)): - json_value = json.dumps(val) - elif (isinstance(val, pd.Series)): - json_value = json.dumps(val.to_dict()) - else: - raise TypeError(f'Value of {key} needs to be dict, list or pd.Series type. Actual type is {type(val)}, val={val}.') - - value = sight_pb2.Value(sub_type=sight_pb2.Value.ST_JSON, - json_value=json_value) - - outcome_params.append(sight_pb2.DecisionParam( - key=key, - value=value, - )) - decision_outcome.outcome_params.extend(outcome_params) + decision_outcome.outcome_params.CopyFrom( + convert_dict_to_proto(dict=sight.widget_decision_state['sum_outcome'])) if 'discount' in sight.widget_decision_state: decision_outcome.discount = sight.widget_decision_state['discount'] @@ -790,48 +767,14 @@ def decision_point( if 'reward' in sight.widget_decision_state: req.decision_outcome.reward = sight.widget_decision_state['reward'] if 'outcome_value' in sight.widget_decision_state: - outcome_params: List[sight_pb2.DecisionParam] = [] - for key in sight.widget_decision_state['outcome_value']: - outcome_params.append( - sight_pb2.DecisionParam( - key=key, - value=sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=sight.widget_decision_state['outcome_value'] - [key], - ), - )) - req.decision_outcome.outcome_params.extend(outcome_params) + req.decision.outcome.outcome_params.CopyFrom( + convert_dict_to_proto( + dict=sight.widget_decision_state["outcome_value"])) req.decision_outcome.discount = sight.widget_decision_state['discount'] chosen_action = optimizer_obj.decision_point(sight, req) - choice_params: List[sight_pb2.DecisionParam] = [] - # for attr in sight.widget_decision_state[ - # 'decision_episode_fn'].action_attrs: - for attr in chosen_action.keys(): - #? keep this might need to change sub_type of deicision param value - if isinstance(chosen_action[attr], str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=chosen_action[attr], - ) - elif isinstance(chosen_action[attr], float): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=chosen_action[attr], - ) - elif isinstance(chosen_action[attr], int): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_INT64, - int64_value=chosen_action[attr], - ) - else: - raise ValueError("unsupported type!!") - - choice_params.append(sight_pb2.DecisionParam( - key=attr, - value=val, - )) + choice_params = sight_pb2.DecisionParam() + choice_params.CopyFrom(convert_dict_to_proto(dict=chosen_action)) # pytype: disable=attribute-error obj = sight_pb2.Object( @@ -840,7 +783,7 @@ def decision_point( # choice_params=choice_params, ), ) - obj.decision_point.choice_params.extend(choice_params) + obj.decision_point.choice_params.CopyFrom(choice_params) sight.log_object(obj, inspect.currentframe().f_back.f_back) logging.info('decision_point() chosen_action=%s', chosen_action) @@ -913,54 +856,13 @@ def decision_outcome( def propose_actions(sight, action_dict): - request = service_pb2.ProposeActionRequest() - if sight.params.silent_logger: - raise ValueError('Cannot use Decision API using Sight silent logger.') - request.client_id = str(sight.id) - - actions_data = [] - attributes_data = [] - - # Process actions - for k, v in action_dict.items(): - action_attr = sight_pb2.DecisionParam() - action_attr.key = k - if isinstance(v, str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=v, - ) - else: - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=v, - ) - action_attr.value.CopyFrom(val) - # Append to actions_data list - actions_data.append(action_attr) - request.action_attrs.extend(actions_data) attr_dict = sight.fetch_attributes() - # print('attr_dict : ', attr_dict) - - # Process attributes - for k, v in attr_dict.items(): - attribute = sight_pb2.DecisionParam() - attribute.key = k - if isinstance(v, str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=v, - ) - else: - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=v, - ) - attribute.value.CopyFrom(val) - # Append to attributes_data list - attributes_data.append(attribute) - request.attributes.extend(attributes_data) + + request = service_pb2.ProposeActionRequest() + request.client_id = str(sight.id) + request.action_attrs.CopyFrom(convert_dict_to_proto(dict=action_dict)) + request.attributes.CopyFrom(convert_dict_to_proto(dict=attr_dict)) response = service.call( lambda s, meta: s.ProposeAction(request, 300, metadata=meta)) @@ -970,8 +872,8 @@ def propose_actions(sight, action_dict): sight_obj = sight_pb2.Object() sight_obj.sub_type = sight_pb2.Object.SubType.ST_PROPOSE_ACTION sight_obj.propose_action.action_id = str(action_id) - sight_obj.propose_action.action_attrs.extend(actions_data) - sight_obj.propose_action.attributes.extend(attributes_data) + sight_obj.propose_action.action_attrs.CopyFrom(request.action_attrs) + sight_obj.propose_action.attributes.CopyFrom(request.attributes) frame = inspect.currentframe().f_back.f_back sight.set_object_code_loc(sight_obj, frame) @@ -1021,8 +923,9 @@ def finalize_episode(sight): # , optimizer_obj optimizer.obj = SingleActionOptimizerClient( sight_pb2.DecisionConfigurationStart.OptimizerType. OT_WORKLIST_SCHEDULER, sight) - req.decision_outcome.CopyFrom( - get_decision_outcome_proto('outcome', sight)) + req.decision_outcome.CopyFrom(get_decision_outcome_proto( + 'outcome', sight)) + # print('request : ', req) optimizer_obj = optimizer.get_instance() optimizer_obj.finalize_episode(sight, req) elif _OPTIMIZER_TYPE.value == 'dm_acme': diff --git a/py/sight/widgets/decision/optimizer_client.py b/py/sight/widgets/decision/optimizer_client.py index 1af7d0b..7efeb11 100644 --- a/py/sight/widgets/decision/optimizer_client.py +++ b/py/sight/widgets/decision/optimizer_client.py @@ -1,4 +1,4 @@ - # Copyright 2023 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ from sight import service_utils as service from sight.proto import sight_pb2 +from sight.utils.proto_conversion import convert_proto_to_dict from sight_service.proto import service_pb2 @@ -43,38 +44,7 @@ def decision_point(self, sight, request: service_pb2.DecisionPointRequest): def _get_dp_action( self, dp_response: service_pb2.DecisionPointResponse) -> Dict[str, Any]: """Returns the dict representation of the action encoded in dp_response.""" - d = {} - for a in dp_response.action: - if (a.value.sub_type == sight_pb2.Value.ST_DOUBLE): - d[a.key] = a.value.double_value - elif(a.value.sub_type == sight_pb2.Value.ST_INT64): - d[a.key] = a.value.int64_value - elif(a.value.sub_type == sight_pb2.Value.ST_STRING): - d[a.key] = a.value.string_value - else: - raise ValueError(f"Not supported type: {a.key}: {a.value}") - return d - - def _set_dp_action(self, dp: sight_pb2.DecisionPoint, - action: Dict[str, Any]) -> None: - """Add to dp the attributes of action.""" - for key, val in action.items(): - if(isinstance(val,str)): - dp.value.add(sight_pb2.DecisionParam(key=key, value=sight_pb2.Value(string_value=val))) - elif(isinstance(val,float)): - dp.value.add(sight_pb2.DecisionParam(key=key, value=sight_pb2.Value(double_value=val))) - elif(isinstance(val,int)): - dp.value.add(sight_pb2.DecisionParam(key=key, value=sight_pb2.Value(int64_value=val))) - if (isinstance(val, str)): - dp.value.add( - sight_pb2.DecisionParam(key=key, - value=sight_pb2.Value(string_value=val))) - elif (isinstance(val, float)): - dp.value.add( - sight_pb2.DecisionParam(key=key, - value=sight_pb2.Value(double_value=val))) - else: - raise ValueError(f"Not supported type: {key}: {val}") + return convert_proto_to_dict(dp_response.action) def finalize_episode(self, sight, request: service_pb2.FinalizeEpisodeRequest): diff --git a/py/sight/widgets/decision/single_action_optimizer_client.py b/py/sight/widgets/decision/single_action_optimizer_client.py index d57ea6f..7608d29 100644 --- a/py/sight/widgets/decision/single_action_optimizer_client.py +++ b/py/sight/widgets/decision/single_action_optimizer_client.py @@ -19,6 +19,8 @@ from overrides import override from sight import service_utils as service from sight.proto import sight_pb2 +from sight.utils.proto_conversion import convert_proto_to_dict +from sight.utils.proto_conversion import update_proto_map from sight.widgets.decision.optimizer_client import OptimizerClient from sight_service.proto import service_pb2 @@ -86,7 +88,6 @@ def decision_point(self, sight, request: service_pb2.DecisionPointRequest): # while True: response = service.call( lambda s, meta: s.DecisionPoint(request, 300, metadata=meta)) - logging.info('response: %s', response) if response.action_type == service_pb2.DecisionPointResponse.ActionType.AT_ACT: self._last_action = response.action return self._get_dp_action(response) @@ -105,8 +106,11 @@ def finalize_episode(self, sight, request: service_pb2.FinalizeEpisodeRequest): # logging.info('SingleActionOptimizerClient() finalize_episode') if self._last_action: - for a in self._last_action: - request.decision_point.choice_params.append(a) + logging.info('finalize episode => %s', + request.decision_point.choice_params) + update_proto_map( + existing_proto_map=request.decision_point.choice_params, + new_proto_map=convert_proto_to_dict(proto=self._last_action)) response = service.call( lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta)) return response diff --git a/sight_service/Dockerfile b/sight_service/Dockerfile index 088b30f..25e9db3 100644 --- a/sight_service/Dockerfile +++ b/sight_service/Dockerfile @@ -37,6 +37,7 @@ RUN pip3 install -r sight_service/requirements.txt # copy sight code COPY py/helpers/logs/ py/helpers/logs/ COPY py/sight/proto/ py/sight/proto/ +COPY py/sight/utils py/sight/utils/ # COPY py/sight/widgets/decision py/sight/widgets/decision COPY sight_service/ sight_service/ COPY fvs_sight/ fvs_sight/ diff --git a/sight_service/acme_optimizer.py b/sight_service/acme_optimizer.py index b6e6b53..5dba738 100644 --- a/sight_service/acme_optimizer.py +++ b/sight_service/acme_optimizer.py @@ -41,7 +41,6 @@ # from sight_service.build_sac_learner import build_sac_config from sight_service.build_td3_learner import build_td3_config from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 from sight_service.proto.numproto.numproto import ndarray_to_proto from sight_service.proto.numproto.numproto import proto_to_ndarray @@ -463,40 +462,6 @@ def finalize_episode( # Manually shutdown the executor after submitting tasks executor.shutdown(wait=False) - # observation = np.array( - # list(param_proto_to_dict(request.decision_point.state_params).values()), - # dtype=np.float32, - # ) - # # logging.info('observation : %s', observation) - # with self.last_action_lock.gen_wlock(): - # if request.worker_id in self.last_action: - # action = self.last_action[request.worker_id] - - # timestep = dm_env.TimeStep( - # step_type=dm_env.StepType.LAST, - # reward=np.array( - # request.decision_outcome.outcome_value, dtype=np.float64 - # ), - # discount=np.array( - # request.decision_outcome.discount, dtype=np.float64 - # ), - # observation=np.frombuffer(observation, dtype=np.float32), - # ) - - # with self.agents_lock.gen_rlock(): - # self.agents[request.worker_id].observe( - # np.int64(action), next_timestep=timestep - # ) - # - # # self.agents[request.worker_id].observe( - # # np.float32(action), next_timestep=timestep - # # ) - # self.agents[request.worker_id].update() - # self._learner_checkpointer.save(force=True) - - # Resetting last action for agent since it is the end of the episode. - # del self.last_action[request.worker_id] - logging.debug("<<<< Out %s of %s", method_name, _file_name) return service_pb2.FinalizeEpisodeResponse(response_str="Success!") diff --git a/sight_service/bayesian_opt.py b/sight_service/bayesian_opt.py index 3ad5822..8f35787 100644 --- a/sight_service/bayesian_opt.py +++ b/sight_service/bayesian_opt.py @@ -20,6 +20,8 @@ from helpers.logs.logs_handler import logger as logging from overrides import overrides from sight.proto import sight_pb2 +from sight.utils.proto_conversion import convert_dict_to_proto +from sight.utils.proto_conversion import convert_proto_to_dict from sight_service.optimizer_instance import OptimizerInstance from sight_service.proto import service_pb2 @@ -73,11 +75,8 @@ def decision_point( self._lock.release() dp_response = service_pb2.DecisionPointResponse() - for key, value in selected_actions.items(): - a = dp_response.action.add() - a.key = key - a.value.sub_type = sight_pb2.Value.ST_DOUBLE - a.value.double_value = float(value) + + dp_response.action.CopyFrom(convert_dict_to_proto(dict=selected_actions)) print('DecisionPoint response=%s' % dp_response) dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT @@ -88,9 +87,11 @@ def finalize_episode( self, request: service_pb2.FinalizeEpisodeRequest ) -> service_pb2.FinalizeEpisodeResponse: logging.info('FinalizeEpisode request=%s', request) - d = {} - for a in request.decision_point.choice_params: - d[a.key] = a.value.double_value + + d = convert_proto_to_dict(proto=request.decision_point.choice_params) + # d = {} + # for a in request.decision_point.choice_params: + # d[a.key] = a.value.double_value self._lock.acquire() logging.info('FinalizeEpisode outcome=%s / %s', diff --git a/sight_service/exhaustive_search.py b/sight_service/exhaustive_search.py index b8f12ab..0aba076 100644 --- a/sight_service/exhaustive_search.py +++ b/sight_service/exhaustive_search.py @@ -18,8 +18,8 @@ from helpers.logs.logs_handler import logger as logging from overrides import overrides +from sight.utils.proto_conversion import convert_dict_to_proto from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 _file_name = "exhaustive_search.py" @@ -124,7 +124,7 @@ def decision_point( logging.info('next_action=%s', next_action) dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend(param_dict_to_proto(next_action)) + dp_response.action.CopyFrom(convert_dict_to_proto(dict=next_action)) logging.debug("<<<< Out %s of %s", method_name, _file_name) dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT return dp_response diff --git a/sight_service/genetic_algorithm.py b/sight_service/genetic_algorithm.py index f66ad88..550d632 100644 --- a/sight_service/genetic_algorithm.py +++ b/sight_service/genetic_algorithm.py @@ -20,8 +20,8 @@ from helpers.logs.logs_handler import logger as logging from overrides import overrides +from sight.utils.proto_conversion import convert_dict_to_proto from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 @@ -251,7 +251,7 @@ def decision_point( } dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend(param_dict_to_proto(next_action)) + dp_response.action.CopyFrom(convert_dict_to_proto(dict=next_action)) dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT return dp_response diff --git a/sight_service/llm.py b/sight_service/llm.py index e2eed1b..302303c 100644 --- a/sight_service/llm.py +++ b/sight_service/llm.py @@ -26,9 +26,9 @@ from overrides import overrides import requests from sight.proto import sight_pb2 +from sight.utils.proto_conversion import convert_dict_to_proto from sight_service.bayesian_opt import BayesianOpt from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 # _GENAI_API_KEY = os.environ['GENAI_API_KEY'] @@ -263,8 +263,7 @@ def _history_to_chat( for h in self._filtered_history(include_example_action): if len(h['state']) > 0: chat.append({ - 'author': - 'USER', + 'author': 'USER', 'content': (last_outcome_message + 'Decision State:\n' + ' {' + ', '.join([f'"{k}": {v}' for k, v in h['state'].items()]) + @@ -272,8 +271,7 @@ def _history_to_chat( }) if h['action'] is not None: chat.append({ - 'author': - 'AI', + 'author': 'AI', 'content': (+ 'Decision Action:\n' + ' {{' + ', '.join( [f'"{key}": {value}' for key, value in h['action'].items()]) + '}'), @@ -620,10 +618,8 @@ def finalize_episode( # self.last_outcome = self._history[-1]['outcome'] logging.info('self._history[-1]=%s', self._history[-1]) - for key, value in self._history[-1]['action'].items(): - a = request.decision_point.choice_params.add() - a.key = key - a.value.double_value = float(value) + request.decision_point.choice_params.CopyFrom( + convert_dict_to_proto(dict=self._history[-1]['action'])) self._bayesian_opt.finalize_episode(request) if (self._llm_config.goal == diff --git a/sight_service/nevergrad_opt.py b/sight_service/nevergrad_opt.py index 1b2cbee..7f190ed 100644 --- a/sight_service/nevergrad_opt.py +++ b/sight_service/nevergrad_opt.py @@ -26,9 +26,9 @@ from overrides import overrides import requests from sight.proto import sight_pb2 +from sight.utils.proto_conversion import convert_dict_to_proto from sight_service.normalizer import Normalizer from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 _file_name = "nevergrad_opt.py" @@ -202,16 +202,10 @@ def decision_point( # print("denormalized_actions : ", denormalized_actions) dp_response = service_pb2.DecisionPointResponse() - for key, value in denormalized_actions.items(): - a = dp_response.action.add() - a.key = key - a.value.sub_type = sight_pb2.Value.ST_DOUBLE - a.value.double_value = float(value) - # self.last_outcome = request.decision_outcome.outcome_value - # print('DecisionPoint response=%s' % dp_response) + dp_response.action.CopyFrom( + convert_dict_to_proto(dict=denormalized_actions)) - # print('DecisionPoint response=%s' % dp_response) dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT return dp_response diff --git a/sight_service/optimizer_instance.py b/sight_service/optimizer_instance.py index 2923598..d2e52ef 100644 --- a/sight_service/optimizer_instance.py +++ b/sight_service/optimizer_instance.py @@ -24,64 +24,6 @@ _file_name = "optimizer_instance.py" -def param_dict_to_proto( - param_dict: Dict[str, float]) -> List[sight_pb2.DecisionParam]: - """converting dictionary of parameters into proto.""" - param_proto: List[sight_pb2.DecisionParam] = [] - for k, v in sorted(param_dict.items()): - if isinstance(v, str): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_STRING, - string_value=v, - ) - elif isinstance(v, float): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_DOUBLE, - double_value=v, - ) - elif isinstance(v, int): - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_INT64, - int64_value=v, - ) - elif (not utils.is_scalar(v)): - print('here v is : ', v, type(v)) - val = sight_pb2.Value( - sub_type=sight_pb2.Value.ST_JSON, - json_value=v, - ) - else: - raise ValueError(f'action attribute type of key "{k}", value "{v}" must be either string or float') - - param_proto.append(sight_pb2.DecisionParam(key=k, value=val)) - return param_proto - - -def param_proto_to_dict( - param_proto: Sequence[sight_pb2.DecisionParam],) -> Dict[str, float]: - """converting proto back into dictionary of parameters.""" - param_dict = {} - for param in param_proto: - # if ((param.value.sub_type != sight_pb2.Value.ST_DOUBLE) and (param.value.sub_type != sight_pb2.Value.ST_STRING)): - # raise ValueError("Unsupported action type %s" % param.value.sub_type) - # param_dict[param.key] = param.value.double_value - if (param.value.sub_type == sight_pb2.Value.ST_DOUBLE): - param_dict[param.key] = param.value.double_value - elif (param.value.sub_type == sight_pb2.Value.ST_STRING): - param_dict[param.key] = param.value.string_value - elif (param.value.sub_type == sight_pb2.Value.ST_BOOL): - param_dict[param.key] = param.value.bool_value - elif (param.value.sub_type == sight_pb2.Value.ST_BYTES): - param_dict[param.key] = param.value.bytes_value - elif (param.value.sub_type == sight_pb2.Value.ST_INT64): - param_dict[param.key] = param.value.int64_value - elif (param.value.sub_type == sight_pb2.Value.ST_JSON): - param_dict[param.key] = param.value.json_value - else: - raise ValueError("Unsupported action type %s" % param.value.sub_type) - return param_dict - - class OptimizerInstance: """An OptimizerInstance class that is generic for all optimizers. diff --git a/sight_service/proto/api_descriptor.pb b/sight_service/proto/api_descriptor.pb index 048838b46826bff9f58ca9d70d73720159c0b599..3e7cc3adf87c886dc4095f9a5c15fa9009d2f4b0 100644 GIT binary patch delta 15163 zcmb_jd3aP+me0FOrSh^@_6iA=5C~hsrnaJpK|nSU5oe|sg&=90uMrrQUbMRl6cq&( zd`O9+C@MD17Ks{K6xjrVxS%v*i-G|Wn2-@f8wBb8ox9YleBXTE{4w)io%7D`+CxgWj}Zxn9<=ptsZ$fr`AT@45Ryrl^InIFgZt*Q+#@;gY##=|!j2U{<$gx95 z4jnVJX2jTA#?*`%l$9Ndxb4~s47lmmkppW+|I|o?yd}2{W{2E{FK;`{oJro+alNh$ zUF88-vFCgQra-=(JMpF2i|?pt#ICM==%JVvg9&1AW}sV z%M6J|COU>IB4d747nwMD;*5#4lf2dIO1x1k#Gp(^eQxh&y2)Cc5u?1q9boWTA@Nrx zI)!^1{EWJ3chyapSUaOGGJeX;$rEa){oZQplm%@Nj>NTPLEB$hhmLMvqb)lmHhS-` zxUy3=L^m=~6c^2g=tkDA)cuta4TZ!eulvitjD{e(iHY2}Xb7U4SV0kNAVeyRB{nlw z;HJaKpaP0av84LyhAheHYhJ#aLC|1QD6o~W_ZiopgGrFy%7TSPGuqk4qN2Y6GpOFi zM5QGmyp474Ye}fy#`;}ru%NaxF~EG}fP(C7NkF%=F8wU2gs-bHP!SUEFfr78j0#{u zcQ>>E_8nF=(2~Hu!)_R&N5o_(u!D)6teg2P0S2SDB>~>af}Jc0@J`mHt7#aryO^++ z3}`Z%ZiEBC?qV6%lmTFOkty{oB!@&36T^*cGO!>!S`yGE)@iUM0c~PKhZ$%vBz7~= z#e56`3Npu%QDAqo+zLwryPH*ZHrSMqc$dXzCca)I5$uDaSZn;H`DG11isrveKy-;%(-#|mtZQvrRC zRd#WI?lO@U3Vh6xVqWcEes<%>ESRPArLm-t*vFV2GbpLZpwb5t`u4Gu3`;`aK9(JF zOX0(50zUi+V;8*Os*)j}u;dIlLV(z$^8neAmbRjy28;}!0slTgimosS2>cz(H{8kV zvAV#Uv#MPs$l|!X1NpzR()R8^BMNF`+{|-a7P+3EP=z`L#s!XA7-LMyaNJahgqjex5|jYPR-th z)ji6anFz(jB*dD@GrJlwOwUno#G3BqN0~^Civ=Kdlx1Yb#o)rnys{T7ys2wMr(_u8 z80IT3o(%D0EHlfkHd^7f$Gs^pcJ{K?2Fs5p_A?049Jz+2TF6wqJQmqLEyPwNNnaVYI>tTfNq`y~F3=4 zo_@+y>Vcq6Jqid-=WOaB22#%tNIjQLPz1*JL!zTn`uTy<4@I*y$|)CZP5}i60;zKf zNcGj^6p--gi{x&1DJT@p;@+Y9$HxPpMXk9+sLGNR{w zoFy9>4#<5xr~^_O<4OpNPeZUk9^kCP`{OHvqXNqSgauN9cz_4n8$ANR2Y7i0l_1h6 z<;~^nQLa-SXh_fHCgr6ee~5>;xZYqXdw|rT86P405bt`e{R+(w@tWuWV>Adq%!U0* z=>w!rHEE2p$HTl*$6}DcKFqJ_XP{Vm^SHRtD5mTIvf7e>&f`6XSQ5~AeE0}GAQl`L z3e4x?5uR$kQvv~_rzL@&&wE+jL7*SuLD!w_knK6_@i=Esc+rMg(Z{))t-~i*;nU-y zRIdP{VHz+}d_DB%s>k2zDw0g8_j>97$g-BBG%J3||F#(jxAf2sfHUpkon7 z=+J(Mz_FN{+9r)rdIM?wkOIkK>M~V6_zLJ^j)mVJc_uv+c$z1z_8xil+VM~GAPq~1 zbV?RWIs1#5eb7*`l$)dhl9I+V+$0S}LjVk&G!z5={R~Ia=#SM+3)>VY)z|x%zsyBeT#ORM%RDF79c08{_?2E>;|=3ha+3i_8zqXB z92tO&olaw4WsMCA#s;a44HDyA#m%AziH@$K@m&vNXM_Ulcv1s*!k2psU(NIOHg<`w z59&fKlX|m}v#@Fp6h;YBuN{z3xsj*k*{@K! zkrx)Zw_}u9p}=OIwABl|aoh0CJeaGxm__=waHjhY3Znz5;~q%p+rm@Qls=Hqw}m5z zj)lJLP+$j7n(wWB^;bnb#b1}BoGIAxY)f?Aux-Fyy&LBRfqfm zQN%sOQ*5s&;vS-2jZg?g+~(M?FJnWz>bC<`%|46@pJtwtYfPUESTn8AKPecBo?~9^ z+q0^U`G_D8$9Rg)RN0Io>@ki^6;TL;uoGU!=JKi&)^oE}33P&jFgZO)AWrb~Ow$I+ zpHtp#n`iYte`(?G=^dr#QBNzff(U5;~0)79NS&$@40<4D1ck&Q2y27Of0IS?>H+o6T4>14OJj5G!4-2{E1Bi^gEtcV2qB6>6}-#wQtoq-xgFj zo#QFGlFw%8K%C?Gg{U#fw^1}*@aAsqUUk7oWB_r&w|52*7ieSy6aux=CGX3v1FJ9j z2->t?;wgH`WwT5mE>YaSN+D1&%@*D*+xk||7G@o0GuoNY7V$~V0%W#8BYFX$G@2v4 zjoSuQ&+#F&L!Tq!D<~U~IifJ^PE)O5cRp8m)!V06&GjL)Q=cnR^!5uC6(Dm(q}&~) zAUR0e-uv4}N9Xy793bY26djGSSq>2ML`i$sMSMULkBX!)jzo}HAor*U7N`$Uv_K?P zs1KxIfe6wO3yuho%SF;(grh#?;?v^-b@9m|-!=H9y$^gWx5Xk= z`v*wC7K?0Ms)0n877LVWeU%b?T;i?UaZS|{Vd8MEiZx4w3E?1tSRxR@X`$s(hFj_t z?i|^3sgIyovs54?lm9~mh@}F{|7t=|l3pg*pS{I9`$m@u_&n7UVABYmgMdnNiiR36 zGJOUbdrpLM;s^*lFHAZo1f>ixbUIfIAf87kjw3)X_YqN~Xh6X6Au4AlLeTpc;N9_F zP&fobZ=I+S%Gxgo#A`bcgv1wx$=4)6;SdbHH&YBCUKAOb(KrN?^pY?I21!sn1S37( zDkbxmM3ye#NCGi&MWVGR9)jUpi#C8O60L=p_;TVvlAW=NP6Dq-s+}%qxB*f#yEw@qiXaO!Cn=* zItL9MuLx8fWI2%J0_*McC>nMFLzk3_0mOO%c`7;;1MRN2(?tvjG+4V39i_4q9A5`{ zNrQj}tAkv+L0gJ?>4XBW3-;G|FG>D&VQOc5rd~D*oQG&UXtV%^Z2=4*8VLc3C}~vd ztdUlY9;rj!+>oe)dI^TFgIe8SbSM?n%MDrudPzH-je>2my;M7$jUt$)<}Q!=_@)qu zJW;GckeEEsy2JnpNf@I2b%Xh>f2jF6sm~^1t>^eLxEIZ({u^|3FuoQ zzd%K({7_(vNZM^8R6ZoPh$uE+H2e89`&$Kj$MhI9zHha51xbD0W^JNq*aQr16U6{x zo3)8zp!IFmti*u8_Cytqj4F<=gZjTcQ3s7+yR~aR*>$I2O}3ZHt~;$=^Qn)!tZhKS zHXyZaKtlH}VQd2uns-I5aSB3#JtFCt)Zx1TqI*QJT-m08g5rCEeQH_-jnD7d;0Ti1 zec!GOMMD4#y$}=wYxjM-5QqVR4-itK%1sqRb978m3_w1xF@+eQKeT~M(LjKq1D9eb z32Z6IjfsH~?iDzZq@|!}AiyZ}8Bsv?isE*02%_Uhi4cm8VE7Q)-h7mZfE#>lBO5hB z!4V9-Z&C~(J{H(F*=}K4_8FB?)h~*U;OOH*f>3naM^j@Vh>m{~=7fa=s89q$pARYq z5PuUW{44^+;(qIEBtV5C7&=oZ1`zwB);)=XYVi{r<`oSH82*&dVc92%Q-W&ofG|f$ zgrG_h481K^3?L2&bF@={MJ7HItVpe0P);Zc+SNA#1r*vo7wjABW}Fto=fcf(vuHlK zLw@*$5GRDx6G$A0d?8S0XSqQwLcVxN1o9ByQY;5TU}Yt;(62)xCpVe^gw$aX$WD-= z4uF-G$O7;%B_Y2-aK6MnL4pu@H(2S3EG2~u>Eqy?&A4F5O^~9r)hq&;i7Wt{Dg66D z_~?%a67lAsqIrw!I>c1uM}qh)_vQO~6II7p#L>FXSEH>jvnf2nEgw zHc#q$1vEaN6E1lQEf+z{<-8D=;>!gbh@7W=g`OiYsFOd~%ex>7r}G>(D?ot4^c$JnZeNKRT!@t?LS3!7?Ti1njj8Z`ll!&A zprOu|DK-p%K@Ht2<8eoEAOx0gM+l*N_sRr2s#@zl8OSH2Asb-Yzyc6F8;TSQ!22Xh zG;2tQEHp<33gXrV2RN{7C?cGe!pVb=!|3jpfuaN)*%7SViHSU@zhC0)L3^_x-4CKz zGrkHMh6KZ;Uzm-0{h&^UM!w-&22=B-41^PUMbiV8O~izw)bgZ6B_GE{X{WfP zZBJ?Hz)I{X<(4IFJBr-$lnj(5G)Qv_R=Y$Na8F4*?T9zXkXoMhEs7PdOC#_SO-i}t zY1zi4$Sq6Tc9e1pScx5_+_JQ7M^Pg^(-ucn5?It;loAm2fi4)Ikr_6i5DN!u&$hK6 zou+|h%L63@+_N%d12eI3sxM4buCU7pOrUz>;b4z9jK1LML0$aJ?uvHr#*%EicIeeUXWIQJLT+i32Dd zf8aZRmP~Iw3pkY|f0nMkE-1iZuUIL~c?2kYTq)f$6Mx)LV3lMW;_-)EV3l<9ZTxX* zXSZ64#(4Yz2QsUrYX{)c0M^KW9RO$y01SQkUckWsVvWqP{RabaU=8(O+rXvZv6fbi zwtucb9)b-=FcWT0tiYV>O&-7!ugHMDz9tp)R0j-waH1Gc@ruNAEjw5` z>3>BcuiKGG#J}}2&@(OpV-iDOy^{_KV(VpLH(vre*30T1aUDQ3$iQ`R0lInx!|xY` zvj!Qt+LwTe26=6NQb8HtWr$_3%0TzH1l_@cVRJ16pyO4UU+GIg$E&hyw|F<;ny<+~ zMO=U`V!<%mJ+wjI`kD;+{i3Ua*P;@KV%8k+;MZlKUt9-Wd4gffQ&(Lbye>$2}PdVP?Cz9DV7s@xN#K9Va?&iRJSGTB?X<{L7-tXy5)7m&|v^3HxfCAvv6eVYLu zF=ab_1r8FeZIa!q&|&0sl)Hh=lI^f-O4VeWWwPD=tBP!kG{=?@S3Lm3?q_v1wuKU| zvH%rchzMJ8E20NzDlsthAtdRbcQ{)qz*`9DZHWld0fvvD?7S@zfl+Li@y))&$ko9x zhb6<&2oy?vmIN=qXlj zotmUQ&EWdA6Bu?&qTf3;5kfB-Fwos5X^$cRLDRbvRnXZW7`_UcwcT+QMFfG4-K67L zrmC$pvdP|t*@q)epS_7tQoc711yUX15Rz`Z3&4P;ecrw=YX9apC0KGJ?N(({Z!+iEU*DbKh`+t z2b?qt7N1MJNVF3~!2%3@GeL?eSbQ$?Y_I?WO@B_o;&O5hvi+BmbqQChSEaRH>e22| zFicugMfjJpNFS_28sIObdGl95RI_CI@f|3%4bno%>dmr9zdXcOB@COZ>`4|sBF*E- z0u`=7YG@QRut#K(so&MD>=9{RM;6f2n6IRHivdcL`Ys+MpkGOI7Y`E6ekHNxp=+ta zP~f;sI%&=|3n6)226f6Vq}G3vocFx1cF3MXQeq&C?e`R z$;#9&2^63pEtKZ(JBfGEbTx*rN*I}+MjXazgT&iveLx(B0!24+q%b+~ zMalH_04OvDQab}kKrc!>?$l5k@I~5;X(*^4ZH5Y~;e#~82Mz3xHbsF{(q-k|`s1z6 znuh03tY9gZrs+rKXS`7@rOssyhx>J74NtY+&9fi81y({%YG6qxm=iuXx}oyS{6KP5 zWXPB?qxyP#&V^IlNLA#{(W9>K+i>DsfG2z^^2#oj_@BL+i@o#R$SpI+-&i+&dhMNc zou|(n|I3MWlP2_Sc)KjeJJ3h0^qFQ-hJH08GwC*K*V`(5o_Qzz9$rpM~@oU)wS$&qnGX3Us6 zdgi3MagiBQA`@rC=H@vaV<+>RI~&L3I}gaL271=}7~MIm^uHl?ve;SJYXdXBsB3@? z%-r9D#1wB}I9B;kVWAg-h4=>#o%PVyTRN>vag^+h{^C9G&+X(U>3Ib|JY)+~3vno6rw-9p5 z9V}HlDQW`}VmJ?`HwvmUXo{t_cLrAB$+v}2ENx<``jG-;0BNE@)+h+p%bwUD+B?@* zF}%tgGTV(1=*W%DFc7{rN>8c(&yUtg*}!Ud`^0F|~Mv3wmb9QCg7BZhdf zU4xLRK5M+Z%o*%df9B5srKitWihkMdsO0n+Lk+e>gHXsGX5wbmO^>PT6+WTgQ;t;1;LZS<=|$l#?&x|7%%rPoS~(jB`>XaMWLr z+F1tu0V%t&@gEL%5?RTy&(Ap}mSg|8loso9-sxt!^p~0nOEf~yMdtilY|TX{k`}`A z;o3W9;NLuj@xMAjNg&y98{fO={5qJjX95Mof}~}QHAtgH)w)pUDv=&?m`*_;VfjN& zsjetMV)Tcc%Fd0?ma_iAiZL*odZ)J*kAB00BPLWuYE=7?d)!DQ61({yPI0VbB^%}R zi2bILb-n@)SRD-5HMjaTUOlU-b|)*{ScwgcRfWeHJ;|Nfh>lPIrtV^f znzja3+Sw?eb=knWb#HvIGrOBb^#(DUE`b5V+SI!!#X!qDSVY%riUH^jW;Te#fWS^B zdZ~I%(NF<~uYx+ilSOnzrzD_aC+pEuRqbHJmUm;rqP^o9C~IT2`x+=jG_gpPF98ir ztYDX-7M18mw<}hWJB6Ipc9RcbZ2*S=c`z-CmTMPvZ=a? zKgyV%b$t~NQkzs?#UEv6*7a5VQTp%571&iH%zn+1PRDNV$u7V8YbG-&Jxg`IiUq9V z7E$OENcAj$NX+AL{U-pD#mAGe_j|IQr6(Cv(S(?gIZ2DLr_u+iCDyJN>s{Kyn2OVo zq57ePf^LP9!8luElX|gErC8&(E!u&#Qd_x72Il)FR^N-|N4_yn(V>U-T;H&?boT>n zuLWKge9Mw9H6H85-gmOTWvB+P0vq@0vv|_|vF&}>m6@~XB`DR1*bUr-{F zeVV7Y$oJZ(c_7iXd+EXL2nE4_=g_Ockx@?2B7TC^)S(Ma*POL8% z*xu^;a)IuxsV^7$SlX?Ev`|`m3ww{M{F{Ry6|&k{h=3@^5?xfKloF!Y{XG2lOo>6I(}1&A0Bcrv!|3U+1rllI-U zVngmpa)WM04)jy8|GI)zb$H6Ys3tk`>!;{NHT`{%7#Pe_9vc~9SC3O9^e^S+WI{0j wSV~WM$;idj;b*y7zMv2xK#mGc~2uwDTC*Y1K zdXXoBh#El<5NVx8P-$eRWicQD6hR>f0)lLVlE8eYmV0miG5Kfw{p8o*x#v4ob?Vfq zbL-ao`ebbTvDl(vSBXt)n4A9dF{iBl(W?f>9PWL3Ltpi8WqQL8PF8*I8>eSFkJj(_ z_bS0Qac|2#iTaXXKH_-0Z@()0t4udG;DHhM{Q9cl_j--JO6mvR(a4-Go*dGJS1r-X z&kRw@iTV-utSimG!Oa;s^qyg(hK(40^N@#zJm@|5`$BL2A3J!T|8cl()%*3>r24DM zTR3)-k=_2MysTY295*8_b0cSDy2TNvRsEhm9a(*2syZ*#-pzKUU@Riyd^B?{iUMVo zS>ynfUu+QvYFFcCxdJ>q8pGO|M-GgP@r^hzGR8OJkY;(c)r65TZQ8o^gPy%C?lgIS zes#99tp4KHUXUBVc=RL2 zHZZOq$0E@Fh{ecHWEt%&XDQLokQq!bXQHizP+rd3^|BDAm$N>74Hu}9i2>#z2NkfL zg`hUFnm!gPI+({2 zWC-dk1bHQkwXqQ7m8^zhhQO)s3^uwM z-~wK3A*ican?V+Wx|&^kouS6!Vhs~D=3xv}z(NaC;I3gs7g-GM8rGH~l)9W57i*b6 zLz&A2O!PJnT~GmASP1Gm*0Q^sbDIh)7ud_eMj-3itx6CDGS= zctNFiVS(#yf4!_daVh%osM_ZWaKB*4Jhg@k-+k%5yYP~#FPVt@Y7%N+Qna==YM8RU zo_xDw)m|pDe6=Xl_OjeOUkzdXwb$~Ei&WzoX#8u;p|73+^{-i8zT3%IMJVt0UVXcB z)&6iFV^G^qee7b?5VZ%q58mkDRV<2C9blr3Z!r_92WUlFFCt10dIht)d4GcH!LZj{ zs2*h17rU1js|d<(y^*tPye&}umWcx2Y8F(#Wkn@^2eRYhi1*{7u2n~vXz8nEL+uD_ zUFr5T7IWg_JMX4P!d-?s(q2 zkMTExd;%A!N092W8Rc%kP8NbXfnRcsg`iI0gRj>EVzGGi5q^bulxLael)fL~qO*ly zKf=3M+cB^oVRRx<0jt%QofzSO}9~J}&pfJ_@ zrlEh;RBlq0l0t1NM?~Lg)PSdX*DV=RHI17ph}0Z{W}OTKCj)9H17diuakEeX(ZAPdSXaWwx$)?19+~Gov~)mfHjmXPd%0w9 z4rh8F1wtPH^&Q^f1w}fYAD|>*jw}fMl zxfAvZ;?WOzWP*49hj-+C$YZ(ge_zaaIQATaz8G0rPR7P#0nHZuNh|SHMwMg9RJ-58rt82Wr4$uyWGV5lZ zp`)XKQD)uD%MCIYkW9`m>LB+vNo>^qvp8>{BTAqJa3={^t zy%rxoT)R79#K73iGxhFLz$iEF=Ga}5ih%^S*IV~-Roz}2#|4b?)?P}Vv;+Xb*voVB zOdBW__9IFYBfb9p%e+fh-ctu?UEqScpJ&+aQ9Zk#;~naksxg!f2eI%HH-cL1eYtXA zEuf`lfqIZ<6>tH|0pl1iDMKAb@r?rGgg0Py$J!GCBNvPlfxmOXI6(s%pctq%PJ7c= z5A1Y0U{Kj{nrG@|SHSYXI88~vr()oJ^DOsr*7WLhHlWbE=h>i@iW zD23Roy(iY*k{T~ejwoP-V2l@;IwJvsF3N#K1Ry9)Fk6|qwF0EQkH zk&Dy=GVr*FRU#qb)jA%1Qbb-Aj(SvtM^6e=FxzR5E~4ZzNwA9(-hZsy)*FB6=z|se--Y-Mpb!YN|k})0+^)G=ek~)MlqJ z%s`MA2+&ws#0z}}6kZf&FDC|VAQ0%iTnWH<5$V`x08a}TDeTXnKoU^Yh8?D$_b(y3 z{a#RRgh0PuP$RT=za)^e?Ld$prwg-dlLDng2=x1l5`ZyX*PGJdh0MUL^@eLN7`)X(UuP6w$oR z5=_4!1EEF0JTt50hFKzCSI~H>lu_kRM322 zv;cu^0Rk`<5(643vZxl_LRvz4qz-lS?XV5%B?N&DYV~bnLz$pnzO7B5m-NE2Sg`kO zFVzdnViC($6Io1stQP`lD1`+Hh?xY|H3uM!*NYroPyoVoy+A=hK~PM6dsm1uHS0j2 z0_vg)P(97o`vD-R?~0OAm99$S(Iq0X!lbJbXf6>cyhG9Km(c9LFW3jB$G~|0zV$00 z^?9lFiNf#+1llJ`0LD`56D2_FORZZ;0EJ~?6OK$NNnnHezbtHnMzGBKwM3mf3f5?Q zsZSoQUrVTu%dKyK;2S{g8$j4zE{tygVR?DVI;S)qT`3~FrOxH0P+civRmwM|lp$9M zw#BpxjK`~NrUayRSKF1LFccur3qc96kXPG|_ixB9yYcfHHO`C-|p(({86Mq$~T&-OoC!PX!^|5*>rM7K?ePhFn z(_+{r+#)xh=94=Vho1?tS4iC;NuctXK)s#s#;&u@&Mt;k*5-35G9~Pls+eKkf zDnx|V-$b+^q(vQoC_5|y@o%&v1r0*71BdM)C5mo{a>62|h3zunAv$;Bl)fmWMSIvz z5zPyWK-@{`KOiDTcQqdlMJYt#1}RQ=VM`D35UHOxAE2aDa>#N+4bsN`dGiq>V83YI zD~ec%!k(d+{er?g=qRFgcXJ-aEkr&qMX?Ox?&d@U@0ZPq6uuDoM2cwezHH7z1n+6i zqgNk@!aR!NJ;ZC}MpcMV$n5jJ+SWR?&yRN66+zu6a?R2=+vGmI^aVnT#iNG=JM4!m z&H5pM*D$pNIliUg^i+r{6Rg0P00?xp2Vb@Dtw29ig5oTtlJd7AN%b`~fe<+=ShZO% z6dgx(1N2bDqsIgrFLk{FjK{}>OOZm$MbL6NF2renxj+Jy;{vbhdX6BVPJVBfOG>d+ zw;;3YMQOoj-($Umh%j|RM9WO%kP)73mSyvWOE)l4x&OgrPnOkqHKS% zIT6k{)tpEUfhgP{)xM|v%*h3f?~I?K2qTyw&_`GpuNv(cQINnSq_9np1TCErttxy+ zA~Bs6;-4W$(ooNeOq<K)A%k57v?7M;$;A1%8AG-TNWzM^$V6D553gH0%b7NY){< zsfcXR*OVVASzAXs^ z>57&7Py6;biT5+T2(W4%k*vZj0@_|5k!}m~5S@5TvNq-+rKQKDtB=h~agq>yLb8|r z6++qP3F($o)~6+sK}+OGDW*%iqd)?kCnZi-w8J2vnSDz7JBpG(2_ieQN`%2ZCH)mb zA}CG7VZ3pwl4udWjp|5OG7u+9e`g>OBu_WDPPQP*3h}g-pWulHzfF>U*eD5j5ZUyn zL=3y`BtaDuu zK^Ar??I_R7W-i5!;x+Fm?I;k1J4!o>*Sw>sjHWc_QEdbfwU?r8hx$O}(G;0$(+G)h zr1nB{|Is-bM7AhUO5nX9<2D_W2SwzP^Ya7TD&tPZWQP(78s$Ig2bj9NML1#EY;tYF)u3r z&yYBS()kCm^NM8pg|n1XA@YiJ_4k8P9QBG>(i}+u;o&UlR-$#+@umyA;GeuLJKwE@4*meF3m4O+qw1Pp3rwrtfc z(13~AvPVzf1n?YXB1P|GXoOC1APH>HMl>gEgDRjoVH?O)Z_227a|mURd;U}LWAbct^RjCnG8 zg|9&0O(6*SMd@swOkN&nz{EV+w;!3HecolrWpBx7M_+?Jph94GSSY~8Te75WpaC0i z$@U%mZXh)0%jiYE0)0h=z`W$44T{$JG9L7cz81_+Nt}yWcOZfn$Y>wm27U8{z!s&h z`g*WHR$LZnz{UdEtG8Ys6rk@&TcoP+1k|T;r7Aezk@;r#R-yTh%&DwW-`z_oX5RA_ z?j4bOPcr>M12PiImiqfOAX(eNhMk`y!&R3N;c#FB%BY-9~8-AV5LWE5at|L=b|&1o>=* zZ=#$iu(5({%w($C$|j$ztG|3-(&@S`OeN#%d@7jg;D(s=#k&*&SZeZ~+24O)lka1a zq7O|n$7VtZV5mtJ+FYq`UNlKuQlzigWC$+Z=oS9G-@uJwLjs02h7FOYH_AeNT}biJ zgN+hbajr3jz}VtV{rh!Yw*;lEzH6~1C}kl4Zwr;ObcKfuVR3Es3J-KmZ4I^{ea~X6 zEVT(o-?Z2oBplqXz)^_AWj;GWlq?|7A1BB#C5vsc*d_}I(DXJ+7TqW~$oD%Wt4Xv? z-gf`cvnmn$W-Q!;&T4G3)mS}J*cr^LNb`nrOr zN*O*^yC->kmo)bZOI5lC)YKF(xVvPzso&Md*^wNB7Ifccw?>(A+O`(4{Ug zTTWd%;9YUBYwZEs5y+r(K$_3$fUtIes^~t7frp2@SqJ-dJ`^yh{X;TSf95Jz?H{7p zyHYU_07ozpUf!X8DNG9f)?ehx83pALi5t2!fq*b`#LfvI8a`s@q@2)E8wx4FQDhvokpX4JMP%GX9!cPILgFT}K5r@Srwy=P;OPxS2cmv;U`! zf~fP>zZwY!%#vd zl)IA;4jp~}h(;nZh~;?V2DYQ{cKZ>VrK=dEV3 zJarF4RWR$)Z?<;ssKs4m9e%osS;w+;&;!EOI=Z%dg=!u_-<0l}bOzKm1r!SQrhr1h z-b9q2DGJc0^lM4y=e3&x3I+8hmZj@G6fva4a9mCIDO3sbX}YG;8Cd&iK%u1nDa+Cq zDxd?(r!>g^ih||0E&WoZb7d#oI<{k>WWSA>O9PHd_S+a%;fqXhu+DZhWK}r{rxR`n z>y_-N+l#wcNlUff0MX7ahF6+ZnuINNU&F1{&LF4LzF-DuAKk|?brN^fF1n8)iBHoc zRNRLf)?Vbaa5^0hNK|GYW+wAG>PpUGR^Gy0rl%Su_VILn8|TA5$C=3vj=EcToMq`x zm4GmDoE7Qh0Eo^WXYD(>S7X`Zr1=z!{FrvzI)m~~u~@lVhheJ&<}>L#+d7xlp0Rz= zhstN{003d=3=M$F0F}*tNH?~1E^GZmK%tG}hk!zn@(Z@$#dozVecyM^ zUisDwj$7#!94>0O^O$p9=wFsJq)s{C>c4w)o}^#DR5svuFS(9>p8oMir$YVNDSh=h zr-PABIcKE$M=}k2&N+vflT0_Acaja27o1t+mP+%pRw6mxg^w)-RSvSIpFf!G*XfoVY37rG^!!`?hDL z-Ya8s(nD(4%}$T>;u_X&AR1X@XA5`#(8wZY8UaKji&)#Psu6uNa1rZ%nfqI19&4+f zvF?ds=|1gQnX=rWJ-hx=n1o!%bgtTV>jRUt_Uc*NF2*FSy?Sgu4 z=sPs5;D-+SRYM8TwKXiKU7!I=Ygo;tfdIDFu*-U=7XqR{0l#<9FY^k+CIo>^8t&SV zO}&M#Wi|TsUU6V_E$h)UJ*k%6lF~1_B&U)9ziJ3<(Jr_?WJ~RX>sd{Azkif8*0Y|y z{Qkkj1}1v=3iv8Xg1{!d_HPK;OsGn71FPv8aA0%;=A_}Lo!EHp>`(9P!Ukt`$J;%! z=m8cvp8n4|*6s2GOy<&NF4Z~jH%wozBm^e`s!It3F{$6^JNu-IhlkR;>R9KBLyW0p zK|<&pqP5mp*#kP9Zr7F7RUlQUGz=Xo#1B(Wy-4YxlSk9P>&n{Pb~I><-UN?QTSZC- zgFglXiDh0csz33Tjsy2}X?NDOcAUL6s9)X3*;|8v=;}DS5q!O(p!Es3 z%Xm$Cd3P2~O|ZAp^y~Wsd-qMhzE7YFjixStj7Of}y5^(0{4smGRn_H>+pD;`EC&pi z<&WEomAWi{oGw=C+B${n;E|_|St_od;-=hJwe>`M2^R?43!pZui|mQ^)}1c0C(_+j zQ=mS>BhMMDRG>b?aal+ge(>0{wzCPABD28Z3iVmrTV0_(t9whuEi6qAxb!9sXsMK0 zCI?i?#gpwoTg0OtkIXbZY5`4;<5vc`?k{i^fWBo+;cN!i-vfd1d\n\x0eListenResponse\x12\x16\n\x0eresponse_ready\x18\x01 \x01(\x08\x12\x14\n\x0cresponse_str\x18\x02 \x01(\t\"<\n\x14\x43urrentStatusRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xae\x01\n\x15\x43urrentStatusResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12=\n\x06status\x18\x02 \x01(\x0e\x32-.sight.x.service.CurrentStatusResponse.Status\"@\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0f\n\x0bIN_PROGRESS\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\"|\n\rLaunchRequest\x12I\n\x16\x64\x65\x63ision_config_params\x18\x01 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStart\x12\r\n\x05label\x18\x03 \x01(\t\x12\x11\n\tclient_id\x18\x04 \x01(\t\"(\n\x0eLaunchResponse\x12\x16\n\x0e\x64isplay_string\x18\x01 \x01(\t\"\x8f\x01\n\x14ProposeActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x03 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\"*\n\x15ProposeActionResponse\x12\x11\n\taction_id\x18\x01 \x01(\x03\":\n\x11GetOutcomeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\nunique_ids\x18\x02 \x03(\x03\"\xf7\x03\n\x12GetOutcomeResponse\x12<\n\x07outcome\x18\x01 \x03(\x0b\x32+.sight.x.service.GetOutcomeResponse.Outcome\x1a\xa2\x03\n\x07Outcome\x12\x31\n\x0bstate_attrs\x18\x01 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x0e\n\x06reward\x18\x03 \x01(\x02\x12\x33\n\routcome_attrs\x18\x04 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x05 \x03(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x42\n\x06status\x18\x06 \x01(\x0e\x32\x32.sight.x.service.GetOutcomeResponse.Outcome.Status\x12\x14\n\x0cresponse_str\x18\x07 \x01(\t\x12\x11\n\taction_id\x18\x08 \x01(\x03\"L\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\r\n\tNOT_EXIST\x10\x04\"\xb3\x02\n\x16\x46inalizeEpisodeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12O\n\x0eoptimizer_type\x18\x05 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12\x32\n\x0b\x61\x63me_config\x18\x06 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"D\n\x17\x46inalizeEpisodeResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12\x13\n\x0bstop_worker\x18\x02 \x01(\x08\" \n\x0bTestRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x1b\n\x0cTestResponse\x12\x0b\n\x03val\x18\x01 \x01(\t\"\x0f\n\rCreateRequest\"1\n\x0e\x43reateResponse\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x13\n\x0bpath_prefix\x18\x02 \x01(\t\"!\n\x0c\x43loseRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"%\n\rCloseResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\":\n\x12WorkerAliveRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xa0\x01\n\x13WorkerAliveResponse\x12\x44\n\x0bstatus_type\x18\x04 \x01(\x0e\x32/.sight.x.service.WorkerAliveResponse.StatusType\"C\n\nStatusType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\n\n\x06ST_ACT\x10\x01\x12\x0b\n\x07ST_DONE\x10\x02\x12\x0c\n\x08ST_RETRY\x10\x03*[\n\tLogFormat\x12\x0e\n\nLF_UNKNOWN\x10\x00\x12\x0f\n\x0bLF_COLUMNIO\x10\x01\x12\x10\n\x0cLF_CAPACITOR\x10\x02\x12\x0e\n\nLF_SPANNER\x10\x03\x12\x0b\n\x07LF_AVRO\x10\x04\x32\xd6\x0c\n\x0cSightService\x12\x61\n\x04Test\x12\x1c.sight.x.service.TestRequest\x1a\x1d.sight.x.service.TestResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1/test/{client_id}\x12]\n\x06\x43reate\x12\x1e.sight.x.service.CreateRequest\x1a\x1f.sight.x.service.CreateResponse\"\x12\x82\xd3\xe4\x93\x02\x0c\x12\n/v1/create\x12Y\n\x05\x43lose\x12\x1d.sight.x.service.CloseRequest\x1a\x1e.sight.x.service.CloseResponse\"\x11\x82\xd3\xe4\x93\x02\x0b\x12\t/v1/Close\x12q\n\x0bWorkerAlive\x12#.sight.x.service.WorkerAliveRequest\x1a$.sight.x.service.WorkerAliveResponse\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/v1/WorkerAlive\x12\x89\x01\n\x06Launch\x12\x1e.sight.x.service.LaunchRequest\x1a\x1f.sight.x.service.LaunchResponse\">\x82\xd3\xe4\x93\x02\x38\"\x1e/v1/launch/{client_id}/{label}:\x16\x64\x65\x63ision_config_params\x12\x9f\x01\n\rDecisionPoint\x12%.sight.x.service.DecisionPointRequest\x1a&.sight.x.service.DecisionPointResponse\"?\x82\xd3\xe4\x93\x02\x39\"*/v1/decision_point/{client_id}/{worker_id}:\x0b\x61\x63me_config\x12m\n\x04Tell\x12\x1c.sight.x.service.TellRequest\x1a\x1d.sight.x.service.TellResponse\"(\x82\xd3\xe4\x93\x02\"\" /v1/tell/{client_id}/{worker_id}\x12u\n\x06Listen\x12\x1e.sight.x.service.ListenRequest\x1a\x1f.sight.x.service.ListenResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/listen/{client_id}/{worker_id}\x12\x92\x01\n\rCurrentStatus\x12%.sight.x.service.CurrentStatusRequest\x1a&.sight.x.service.CurrentStatusResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v1/current_status/{client_id}/{worker_id}\x12\xa7\x01\n\x12\x46\x65tchOptimalAction\x12*.sight.x.service.FetchOptimalActionRequest\x1a+.sight.x.service.FetchOptimalActionResponse\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1/fetch_optimal_action/{client_id}/{worker_id}\x12`\n\rProposeAction\x12%.sight.x.service.ProposeActionRequest\x1a&.sight.x.service.ProposeActionResponse\"\x00\x12W\n\nGetOutcome\x12\".sight.x.service.GetOutcomeRequest\x1a#.sight.x.service.GetOutcomeResponse\"\x00\x12\xa7\x01\n\x0f\x46inalizeEpisode\x12\'.sight.x.service.FinalizeEpisodeRequest\x1a(.sight.x.service.FinalizeEpisodeResponse\"A\x82\xd3\xe4\x93\x02;\",/v1/finalize_episode/{client_id}/{worker_id}:\x0b\x61\x63me_configb\x06proto3' + b'\n!sight_service/proto/service.proto\x12\x0fsight.x.service\x1a\x17sight/proto/sight.proto\x1a\x33sight_service/proto/numproto/protobuf/ndarray.proto\x1a\x1cgoogle/api/annotations.proto\"\xa5\x03\n\x0c\x41\x63me_Request\x12G\n\x14\x65pisode_observations\x18\x01 \x03(\x0b\x32).sight.x.service.Acme_Request.Observation\x12\x14\n\x0clearner_keys\x18\x02 \x03(\t\x1a\xfe\x01\n\x0bObservation\x12*\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x38\n\x08steptype\x18\x02 \x01(\x0e\x32&.sight.x.service.Acme_Request.StepType\x12*\n\x06reward\x18\x03 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12,\n\x08\x64iscount\x18\x04 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12/\n\x0bobservation\x18\x05 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\"5\n\x08StepType\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\x07\n\x03MID\x10\x02\x12\x08\n\x04LAST\x10\x03\"\x80\x02\n\rAcme_Response\x12\x34\n\x06layers\x18\x01 \x03(\x0b\x32$.sight.x.service.Acme_Response.Layer\x1a\xb8\x01\n\x05Layer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x07weights\x18\x02 \x01(\x0b\x32\x30.sight.x.service.Acme_Response.Layer.WeightsData\x1a^\n\x0bWeightsData\x12\t\n\x01\x62\x18\x01 \x03(\x02\x12%\n\x01w\x18\x02 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x0e\n\x06offset\x18\x03 \x03(\x02\x12\r\n\x05scale\x18\x04 \x03(\x02\"\xe0\x01\n\x14\x44\x65\x63isionPointRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12\x32\n\x0b\x61\x63me_config\x18\x05 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"\x9f\x02\n\x15\x44\x65\x63isionPointResponse\x12,\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x35\n\racme_response\x18\x02 \x01(\x0b\x32\x1e.sight.x.service.Acme_Response\x12\x14\n\x0cresponse_idx\x18\x03 \x01(\x03\x12\x46\n\x0b\x61\x63tion_type\x18\x04 \x01(\x0e\x32\x31.sight.x.service.DecisionPointResponse.ActionType\"C\n\nActionType\x12\x0e\n\nAT_UNKNOWN\x10\x00\x12\n\n\x06\x41T_ACT\x10\x01\x12\x0b\n\x07\x41T_DONE\x10\x02\x12\x0c\n\x08\x41T_RETRY\x10\x03\"A\n\x19\x46\x65tchOptimalActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"2\n\x1a\x46\x65tchOptimalActionResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"5\n\x0bTellRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0bmessage_str\x18\x02 \x01(\t\"$\n\x0cTellResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"\"\n\rListenRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\">\n\x0eListenResponse\x12\x16\n\x0eresponse_ready\x18\x01 \x01(\x08\x12\x14\n\x0cresponse_str\x18\x02 \x01(\t\"<\n\x14\x43urrentStatusRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xae\x01\n\x15\x43urrentStatusResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12=\n\x06status\x18\x02 \x01(\x0e\x32-.sight.x.service.CurrentStatusResponse.Status\"@\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0f\n\x0bIN_PROGRESS\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\"|\n\rLaunchRequest\x12I\n\x16\x64\x65\x63ision_config_params\x18\x01 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStart\x12\r\n\x05label\x18\x03 \x01(\t\x12\x11\n\tclient_id\x18\x04 \x01(\t\"(\n\x0eLaunchResponse\x12\x16\n\x0e\x64isplay_string\x18\x01 \x01(\t\"\x8f\x01\n\x14ProposeActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"*\n\x15ProposeActionResponse\x12\x11\n\taction_id\x18\x01 \x01(\x03\":\n\x11GetOutcomeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\nunique_ids\x18\x02 \x03(\x03\"\xf7\x03\n\x12GetOutcomeResponse\x12<\n\x07outcome\x18\x01 \x03(\x0b\x32+.sight.x.service.GetOutcomeResponse.Outcome\x1a\xa2\x03\n\x07Outcome\x12\x31\n\x0bstate_attrs\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x0e\n\x06reward\x18\x03 \x01(\x02\x12\x33\n\routcome_attrs\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x05 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x42\n\x06status\x18\x06 \x01(\x0e\x32\x32.sight.x.service.GetOutcomeResponse.Outcome.Status\x12\x14\n\x0cresponse_str\x18\x07 \x01(\t\x12\x11\n\taction_id\x18\x08 \x01(\x03\"L\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\r\n\tNOT_EXIST\x10\x04\"\xb3\x02\n\x16\x46inalizeEpisodeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12O\n\x0eoptimizer_type\x18\x05 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12\x32\n\x0b\x61\x63me_config\x18\x06 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"D\n\x17\x46inalizeEpisodeResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12\x13\n\x0bstop_worker\x18\x02 \x01(\x08\" \n\x0bTestRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"I\n\x0cTestResponse\x12\x0b\n\x03val\x18\x01 \x01(\t\x12,\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x0f\n\rCreateRequest\"1\n\x0e\x43reateResponse\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x13\n\x0bpath_prefix\x18\x02 \x01(\t\"!\n\x0c\x43loseRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"%\n\rCloseResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\":\n\x12WorkerAliveRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xa0\x01\n\x13WorkerAliveResponse\x12\x44\n\x0bstatus_type\x18\x04 \x01(\x0e\x32/.sight.x.service.WorkerAliveResponse.StatusType\"C\n\nStatusType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\n\n\x06ST_ACT\x10\x01\x12\x0b\n\x07ST_DONE\x10\x02\x12\x0c\n\x08ST_RETRY\x10\x03*[\n\tLogFormat\x12\x0e\n\nLF_UNKNOWN\x10\x00\x12\x0f\n\x0bLF_COLUMNIO\x10\x01\x12\x10\n\x0cLF_CAPACITOR\x10\x02\x12\x0e\n\nLF_SPANNER\x10\x03\x12\x0b\n\x07LF_AVRO\x10\x04\x32\xd6\x0c\n\x0cSightService\x12\x61\n\x04Test\x12\x1c.sight.x.service.TestRequest\x1a\x1d.sight.x.service.TestResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1/test/{client_id}\x12]\n\x06\x43reate\x12\x1e.sight.x.service.CreateRequest\x1a\x1f.sight.x.service.CreateResponse\"\x12\x82\xd3\xe4\x93\x02\x0c\x12\n/v1/create\x12Y\n\x05\x43lose\x12\x1d.sight.x.service.CloseRequest\x1a\x1e.sight.x.service.CloseResponse\"\x11\x82\xd3\xe4\x93\x02\x0b\x12\t/v1/Close\x12q\n\x0bWorkerAlive\x12#.sight.x.service.WorkerAliveRequest\x1a$.sight.x.service.WorkerAliveResponse\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/v1/WorkerAlive\x12\x89\x01\n\x06Launch\x12\x1e.sight.x.service.LaunchRequest\x1a\x1f.sight.x.service.LaunchResponse\">\x82\xd3\xe4\x93\x02\x38\"\x1e/v1/launch/{client_id}/{label}:\x16\x64\x65\x63ision_config_params\x12\x9f\x01\n\rDecisionPoint\x12%.sight.x.service.DecisionPointRequest\x1a&.sight.x.service.DecisionPointResponse\"?\x82\xd3\xe4\x93\x02\x39\"*/v1/decision_point/{client_id}/{worker_id}:\x0b\x61\x63me_config\x12m\n\x04Tell\x12\x1c.sight.x.service.TellRequest\x1a\x1d.sight.x.service.TellResponse\"(\x82\xd3\xe4\x93\x02\"\" /v1/tell/{client_id}/{worker_id}\x12u\n\x06Listen\x12\x1e.sight.x.service.ListenRequest\x1a\x1f.sight.x.service.ListenResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/listen/{client_id}/{worker_id}\x12\x92\x01\n\rCurrentStatus\x12%.sight.x.service.CurrentStatusRequest\x1a&.sight.x.service.CurrentStatusResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v1/current_status/{client_id}/{worker_id}\x12\xa7\x01\n\x12\x46\x65tchOptimalAction\x12*.sight.x.service.FetchOptimalActionRequest\x1a+.sight.x.service.FetchOptimalActionResponse\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1/fetch_optimal_action/{client_id}/{worker_id}\x12`\n\rProposeAction\x12%.sight.x.service.ProposeActionRequest\x1a&.sight.x.service.ProposeActionResponse\"\x00\x12W\n\nGetOutcome\x12\".sight.x.service.GetOutcomeRequest\x1a#.sight.x.service.GetOutcomeResponse\"\x00\x12\xa7\x01\n\x0f\x46inalizeEpisode\x12\'.sight.x.service.FinalizeEpisodeRequest\x1a(.sight.x.service.FinalizeEpisodeResponse\"A\x82\xd3\xe4\x93\x02;\",/v1/finalize_episode/{client_id}/{worker_id}:\x0b\x61\x63me_configb\x06proto3' ) _LOGFORMAT = DESCRIPTOR.enum_types_by_name['LogFormat'] @@ -85,19 +85,16 @@ 'Acme_Request', (_message.Message,), { - 'Observation': - _reflection.GeneratedProtocolMessageType( - 'Observation', - (_message.Message,), - { - 'DESCRIPTOR': _ACME_REQUEST_OBSERVATION, - '__module__': 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Request.Observation) - }), - 'DESCRIPTOR': - _ACME_REQUEST, - '__module__': - 'sight_service.proto.service_pb2' + 'Observation': _reflection.GeneratedProtocolMessageType( + 'Observation', + (_message.Message,), + { + 'DESCRIPTOR': _ACME_REQUEST_OBSERVATION, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Request.Observation) + }), + 'DESCRIPTOR': _ACME_REQUEST, + '__module__': 'sight_service.proto.service_pb2' # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Request) }) _sym_db.RegisterMessage(Acme_Request) @@ -107,30 +104,24 @@ 'Acme_Response', (_message.Message,), { - 'Layer': - _reflection.GeneratedProtocolMessageType( - 'Layer', - (_message.Message,), - { - 'WeightsData': - _reflection.GeneratedProtocolMessageType( - 'WeightsData', - (_message.Message,), - { - 'DESCRIPTOR': _ACME_RESPONSE_LAYER_WEIGHTSDATA, - '__module__': 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response.Layer.WeightsData) - }), - 'DESCRIPTOR': - _ACME_RESPONSE_LAYER, - '__module__': - 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response.Layer) - }), - 'DESCRIPTOR': - _ACME_RESPONSE, - '__module__': - 'sight_service.proto.service_pb2' + 'Layer': _reflection.GeneratedProtocolMessageType( + 'Layer', + (_message.Message,), + { + 'WeightsData': _reflection.GeneratedProtocolMessageType( + 'WeightsData', + (_message.Message,), + { + 'DESCRIPTOR': _ACME_RESPONSE_LAYER_WEIGHTSDATA, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response.Layer.WeightsData) + }), + 'DESCRIPTOR': _ACME_RESPONSE_LAYER, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response.Layer) + }), + 'DESCRIPTOR': _ACME_RESPONSE, + '__module__': 'sight_service.proto.service_pb2' # @@protoc_insertion_point(class_scope:sight.x.service.Acme_Response) }) _sym_db.RegisterMessage(Acme_Response) @@ -291,19 +282,16 @@ 'GetOutcomeResponse', (_message.Message,), { - 'Outcome': - _reflection.GeneratedProtocolMessageType( - 'Outcome', - (_message.Message,), - { - 'DESCRIPTOR': _GETOUTCOMERESPONSE_OUTCOME, - '__module__': 'sight_service.proto.service_pb2' - # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeResponse.Outcome) - }), - 'DESCRIPTOR': - _GETOUTCOMERESPONSE, - '__module__': - 'sight_service.proto.service_pb2' + 'Outcome': _reflection.GeneratedProtocolMessageType( + 'Outcome', + (_message.Message,), + { + 'DESCRIPTOR': _GETOUTCOMERESPONSE_OUTCOME, + '__module__': 'sight_service.proto.service_pb2' + # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeResponse.Outcome) + }), + 'DESCRIPTOR': _GETOUTCOMERESPONSE, + '__module__': 'sight_service.proto.service_pb2' # @@protoc_insertion_point(class_scope:sight.x.service.GetOutcomeResponse) }) _sym_db.RegisterMessage(GetOutcomeResponse) @@ -446,8 +434,8 @@ _SIGHTSERVICE.methods_by_name['FinalizeEpisode']._options = None _SIGHTSERVICE.methods_by_name[ 'FinalizeEpisode']._serialized_options = b'\202\323\344\223\002;\",/v1/finalize_episode/{client_id}/{worker_id}:\013acme_config' - _LOGFORMAT._serialized_start = 3645 - _LOGFORMAT._serialized_end = 3736 + _LOGFORMAT._serialized_start = 3691 + _LOGFORMAT._serialized_end = 3782 _ACME_REQUEST._serialized_start = 163 _ACME_REQUEST._serialized_end = 584 _ACME_REQUEST_OBSERVATION._serialized_start = 275 @@ -507,21 +495,21 @@ _TESTREQUEST._serialized_start = 3217 _TESTREQUEST._serialized_end = 3249 _TESTRESPONSE._serialized_start = 3251 - _TESTRESPONSE._serialized_end = 3278 - _CREATEREQUEST._serialized_start = 3280 - _CREATEREQUEST._serialized_end = 3295 - _CREATERESPONSE._serialized_start = 3297 - _CREATERESPONSE._serialized_end = 3346 - _CLOSEREQUEST._serialized_start = 3348 - _CLOSEREQUEST._serialized_end = 3381 - _CLOSERESPONSE._serialized_start = 3383 - _CLOSERESPONSE._serialized_end = 3420 - _WORKERALIVEREQUEST._serialized_start = 3422 - _WORKERALIVEREQUEST._serialized_end = 3480 - _WORKERALIVERESPONSE._serialized_start = 3483 - _WORKERALIVERESPONSE._serialized_end = 3643 - _WORKERALIVERESPONSE_STATUSTYPE._serialized_start = 3576 - _WORKERALIVERESPONSE_STATUSTYPE._serialized_end = 3643 - _SIGHTSERVICE._serialized_start = 3739 - _SIGHTSERVICE._serialized_end = 5361 + _TESTRESPONSE._serialized_end = 3324 + _CREATEREQUEST._serialized_start = 3326 + _CREATEREQUEST._serialized_end = 3341 + _CREATERESPONSE._serialized_start = 3343 + _CREATERESPONSE._serialized_end = 3392 + _CLOSEREQUEST._serialized_start = 3394 + _CLOSEREQUEST._serialized_end = 3427 + _CLOSERESPONSE._serialized_start = 3429 + _CLOSERESPONSE._serialized_end = 3466 + _WORKERALIVEREQUEST._serialized_start = 3468 + _WORKERALIVEREQUEST._serialized_end = 3526 + _WORKERALIVERESPONSE._serialized_start = 3529 + _WORKERALIVERESPONSE._serialized_end = 3689 + _WORKERALIVERESPONSE_STATUSTYPE._serialized_start = 3622 + _WORKERALIVERESPONSE_STATUSTYPE._serialized_end = 3689 + _SIGHTSERVICE._serialized_start = 3785 + _SIGHTSERVICE._serialized_end = 5407 # @@protoc_insertion_point(module_scope) diff --git a/sight_service/proto/service_pb2_grpc.py b/sight_service/proto/service_pb2_grpc.py index bdad566..3f62a74 100644 --- a/sight_service/proto/service_pb2_grpc.py +++ b/sight_service/proto/service_pb2_grpc.py @@ -201,110 +201,97 @@ def FinalizeEpisode(self, request, context): def add_SightServiceServicer_to_server(servicer, server): rpc_method_handlers = { - 'Test': - grpc.unary_unary_rpc_method_handler( - servicer.Test, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - TestRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - TestResponse.SerializeToString, - ), - 'Create': - grpc.unary_unary_rpc_method_handler( - servicer.Create, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - CreateRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - CreateResponse.SerializeToString, - ), - 'Close': - grpc.unary_unary_rpc_method_handler( - servicer.Close, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - CloseRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - CloseResponse.SerializeToString, - ), - 'WorkerAlive': - grpc.unary_unary_rpc_method_handler( - servicer.WorkerAlive, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - WorkerAliveRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - WorkerAliveResponse.SerializeToString, - ), - 'Launch': - grpc.unary_unary_rpc_method_handler( - servicer.Launch, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - LaunchRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - LaunchResponse.SerializeToString, - ), - 'DecisionPoint': - grpc.unary_unary_rpc_method_handler( - servicer.DecisionPoint, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - DecisionPointRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - DecisionPointResponse.SerializeToString, - ), - 'Tell': - grpc.unary_unary_rpc_method_handler( - servicer.Tell, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - TellRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - TellResponse.SerializeToString, - ), - 'Listen': - grpc.unary_unary_rpc_method_handler( - servicer.Listen, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - ListenRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - ListenResponse.SerializeToString, - ), - 'CurrentStatus': - grpc.unary_unary_rpc_method_handler( - servicer.CurrentStatus, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - CurrentStatusRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - CurrentStatusResponse.SerializeToString, - ), - 'FetchOptimalAction': - grpc.unary_unary_rpc_method_handler( - servicer.FetchOptimalAction, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - FetchOptimalActionRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - FetchOptimalActionResponse.SerializeToString, - ), - 'ProposeAction': - grpc.unary_unary_rpc_method_handler( - servicer.ProposeAction, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - ProposeActionRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - ProposeActionResponse.SerializeToString, - ), - 'GetOutcome': - grpc.unary_unary_rpc_method_handler( - servicer.GetOutcome, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - GetOutcomeRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - GetOutcomeResponse.SerializeToString, - ), - 'FinalizeEpisode': - grpc.unary_unary_rpc_method_handler( - servicer.FinalizeEpisode, - request_deserializer=sight__service_dot_proto_dot_service__pb2. - FinalizeEpisodeRequest.FromString, - response_serializer=sight__service_dot_proto_dot_service__pb2. - FinalizeEpisodeResponse.SerializeToString, - ), + 'Test': grpc.unary_unary_rpc_method_handler( + servicer.Test, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + TestRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + TestResponse.SerializeToString, + ), + 'Create': grpc.unary_unary_rpc_method_handler( + servicer.Create, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + CreateRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + CreateResponse.SerializeToString, + ), + 'Close': grpc.unary_unary_rpc_method_handler( + servicer.Close, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + CloseRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + CloseResponse.SerializeToString, + ), + 'WorkerAlive': grpc.unary_unary_rpc_method_handler( + servicer.WorkerAlive, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + WorkerAliveRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + WorkerAliveResponse.SerializeToString, + ), + 'Launch': grpc.unary_unary_rpc_method_handler( + servicer.Launch, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + LaunchRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + LaunchResponse.SerializeToString, + ), + 'DecisionPoint': grpc.unary_unary_rpc_method_handler( + servicer.DecisionPoint, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + DecisionPointRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + DecisionPointResponse.SerializeToString, + ), + 'Tell': grpc.unary_unary_rpc_method_handler( + servicer.Tell, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + TellRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + TellResponse.SerializeToString, + ), + 'Listen': grpc.unary_unary_rpc_method_handler( + servicer.Listen, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + ListenRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + ListenResponse.SerializeToString, + ), + 'CurrentStatus': grpc.unary_unary_rpc_method_handler( + servicer.CurrentStatus, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + CurrentStatusRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + CurrentStatusResponse.SerializeToString, + ), + 'FetchOptimalAction': grpc.unary_unary_rpc_method_handler( + servicer.FetchOptimalAction, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + FetchOptimalActionRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + FetchOptimalActionResponse.SerializeToString, + ), + 'ProposeAction': grpc.unary_unary_rpc_method_handler( + servicer.ProposeAction, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + ProposeActionRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + ProposeActionResponse.SerializeToString, + ), + 'GetOutcome': grpc.unary_unary_rpc_method_handler( + servicer.GetOutcome, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + GetOutcomeRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + GetOutcomeResponse.SerializeToString, + ), + 'FinalizeEpisode': grpc.unary_unary_rpc_method_handler( + servicer.FinalizeEpisode, + request_deserializer=sight__service_dot_proto_dot_service__pb2. + FinalizeEpisodeRequest.FromString, + response_serializer=sight__service_dot_proto_dot_service__pb2. + FinalizeEpisodeResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'sight.x.service.SightService', rpc_method_handlers) diff --git a/sight_service/sensitivity_analysis.py b/sight_service/sensitivity_analysis.py index a8d0ac2..f272538 100644 --- a/sight_service/sensitivity_analysis.py +++ b/sight_service/sensitivity_analysis.py @@ -20,12 +20,13 @@ from helpers.logs.logs_handler import logger as logging from overrides import overrides from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto -from sight_service.optimizer_instance import param_proto_to_dict from sight_service.proto import service_pb2 _file_name = 'sensitivity_analysis.py' +from sight.utils.proto_conversion import convert_dict_to_proto +from sight.utils.proto_conversion import convert_proto_to_dict + class SensitivityAnalysis(OptimizerInstance): """Exhaustively searches over all the possible values of the action attributes. @@ -73,9 +74,9 @@ def _generate_action(self) -> Dict[str, Any]: for i, key in enumerate(self.actions): if key in self.possible_values: print('selecting from possible values') - action[key] = self.possible_values[key][ - random.randint(0, len(self.possible_values[key]) - 1) - ] + action[key] = self.possible_values[key][random.randint( + 0, + len(self.possible_values[key]) - 1)] elif self.actions[key].HasField('continuous_prob_dist'): if self.actions[key].continuous_prob_dist.HasField('gaussian'): rand_val = random.gauss( @@ -89,25 +90,29 @@ def _generate_action(self) -> Dict[str, Any]: rand_val = self.actions[key].max_value action[key] = rand_val elif self.actions[key].continuous_prob_dist.HasField('uniform'): - rand_val = random.uniform(self.actions[key].continuous_prob_dist.uniform.min_val, - self.actions[key].continuous_prob_dist.uniform.max_val) - print ('self.actions[key].continuous_prob_dist=%s, rand_val=%s' % (self.actions[key].continuous_prob_dist, rand_val)) + rand_val = random.uniform( + self.actions[key].continuous_prob_dist.uniform.min_val, + self.actions[key].continuous_prob_dist.uniform.max_val) + print('self.actions[key].continuous_prob_dist=%s, rand_val=%s' % + (self.actions[key].continuous_prob_dist, rand_val)) action[key] = rand_val else: - raise ValueError('Only support Gaussian and Uniform continuous distributions.') + raise ValueError( + 'Only support Gaussian and Uniform continuous distributions.') elif self.actions[key].HasField('discrete_prob_dist'): if self.actions[key].discrete_prob_dist.HasField('uniform'): - rand_val = random.randint(self.actions[key].discrete_prob_dist.uniform.min_val, - self.actions[key].discrete_prob_dist.uniform.max_val) - print ('self.actions[key].discrete_prob_dist=%s, rand_val=%s' % (self.actions[key].discrete_prob_dist, rand_val)) + rand_val = random.randint( + self.actions[key].discrete_prob_dist.uniform.min_val, + self.actions[key].discrete_prob_dist.uniform.max_val) + print('self.actions[key].discrete_prob_dist=%s, rand_val=%s' % + (self.actions[key].discrete_prob_dist, rand_val)) action[key] = rand_val else: raise ValueError('Only support Uniform discrete distribution.') else: print('selecting from random.uniform') - action[key] = random.uniform( - self.actions[key].min_value, self.actions[key].max_value - ) + action[key] = random.uniform(self.actions[key].min_value, + self.actions[key].max_value) print('action=', action) return action @@ -119,10 +124,11 @@ def decision_point( logging.debug('>>>> In %s of %s', method_name, _file_name) dp_response = service_pb2.DecisionPointResponse() - logging.info('DecisionPoint: %s: %s', request.worker_id, request.worker_id in self.active_samples) - dp_response.action.extend(param_dict_to_proto( - self.active_samples[request.worker_id]['action'] - )) + logging.info('DecisionPoint: %s: %s', request.worker_id, request.worker_id + in self.active_samples) + dp_response.action.CopyFrom( + convert_dict_to_proto( + dict=self.active_samples[request.worker_id]['action'])) dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT logging.debug('<<<< Out %s of %s', method_name, _file_name) return dp_response @@ -137,11 +143,14 @@ def finalize_episode( self._lock.acquire() # logging.info('FinalizeEpisode complete_samples=%s' % self.complete_samples) - logging.info('FinalizeEpisode: %s: %s', request.worker_id, request.worker_id in self.active_samples) - self.complete_samples[self.active_samples[request.worker_id]['sample_num']] = { - 'outcome': param_proto_to_dict(request.decision_outcome.outcome_params), - 'action': self.active_samples[request.worker_id]['action'], - } + logging.info('FinalizeEpisode: %s: %s', request.worker_id, request.worker_id + in self.active_samples) + self.complete_samples[self.active_samples[ + request.worker_id]['sample_num']] = { + 'outcome': convert_proto_to_dict( + proto=request.decision_outcome.outcome_params), + 'action': self.active_samples[request.worker_id]['action'], + } del self.active_samples[request.worker_id] self._lock.release() @@ -166,7 +175,7 @@ def current_status( # for s in sorted(self.complete_samples.items(), key=lambda x: x[1]['outcome'], reverse=True): self._lock.acquire() for s in self.complete_samples.items(): - response += str(s[0])+', ' + response += str(s[0]) + ', ' response += ', '.join([str(s[1]['action'][key]) for key in keys]) response += ', ' + str(s[1]['outcome']) + '\n' response += ']' @@ -179,9 +188,8 @@ def current_status( status = service_pb2.CurrentStatusResponse.Status.SUCCESS self._lock.release() - return service_pb2.CurrentStatusResponse( - status = status, - response_str=response) + return service_pb2.CurrentStatusResponse(status=status, + response_str=response) @overrides def fetch_optimal_action( @@ -189,32 +197,30 @@ def fetch_optimal_action( ) -> service_pb2.FetchOptimalActionResponse: method_name = 'fetch_optimal_action' return service_pb2.CurrentStatusResponse(response_str='') - + @overrides def WorkerAlive( self, request: service_pb2.WorkerAliveRequest ) -> service_pb2.WorkerAliveResponse: - method_name = "WorkerAlive" - logging.debug(">>>> In %s of %s", method_name, _file_name) + method_name = "WorkerAlive" + logging.debug(">>>> In %s of %s", method_name, _file_name) - if self.num_samples_issued < self.num_trials: - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT + if self.num_samples_issued < self.num_trials: + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - next_action = self._generate_action() + next_action = self._generate_action() - self._lock.acquire() - logging.info('WorkerAlive: %s: %s', request.worker_id, next_action) - self.active_samples[request.worker_id] = { - 'action': next_action, - 'sample_num': self.num_samples_issued, - } - self.num_samples_issued += 1 - self._lock.release() - - else: - worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE - logging.info("worker_alive_status is %s", worker_alive_status) - logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse( - status_type=worker_alive_status) + self._lock.acquire() + logging.info('WorkerAlive: %s: %s', request.worker_id, next_action) + self.active_samples[request.worker_id] = { + 'action': next_action, + 'sample_num': self.num_samples_issued, + } + self.num_samples_issued += 1 + self._lock.release() + else: + worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE + logging.info("worker_alive_status is %s", worker_alive_status) + logging.debug("<<<< Out %s of %s", method_name, _file_name) + return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) diff --git a/sight_service/service_root.py b/sight_service/service_root.py index 27ff463..5e5e9c5 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -22,23 +22,24 @@ def warn(*args, **kwargs): warnings.warn = warn -from absl import app -from absl import flags -from concurrent import futures from collections import defaultdict -from dotenv import load_dotenv +from concurrent import futures import functools -import grpc import logging import math +from absl import app +from absl import flags +from dotenv import load_dotenv +import grpc + load_dotenv() import os +import sys import time from typing import Any, Dict, List, Tuple import uuid -import sys # from overrides import overrides from readerwriterlock import rwlock @@ -77,11 +78,15 @@ def generate_unique_number() -> int: func_to_elapsed_time_sq = defaultdict(float) func_call_count = defaultdict(float) + def rpc_call(func): + @functools.wraps(func) def wrapper(*args, **kwargs): - logging.debug(f"<<<<<< {func.__name__}, file {os.path.basename(__file__)} with args={args}") - + logging.debug( + f"<<<<<< {func.__name__}, file {os.path.basename(__file__)} with args={args}" + ) + if 'request' in kwargs: if 'client_id' in kwargs['request'].keys(): if kwargs['request'].client_id == 0: @@ -91,23 +96,27 @@ def wrapper(*args, **kwargs): result = func(*args, **kwargs) elapsed_time = time.time() - start_time func_to_elapsed_time[func.__name__] += elapsed_time - func_to_elapsed_time_sq[func.__name__] += elapsed_time*elapsed_time + func_to_elapsed_time_sq[func.__name__] += elapsed_time * elapsed_time func_call_count[func.__name__] += 1 - mean = func_to_elapsed_time[func.__name__]/func_call_count[func.__name__] - mean_sq = func_to_elapsed_time_sq[func.__name__]/func_call_count[func.__name__] - - logging.debug('>>>>>> %s, file %s, elapsed: (this=%f, avg=%f, rel_sd=%f, count=%d)', - func.__name__, - os.path.basename(__file__), - elapsed_time, - mean, - math.sqrt(mean_sq - mean*mean)/mean if mean != 0 else 0, - func_call_count[func.__name__], - ) + mean = func_to_elapsed_time[func.__name__] / func_call_count[func.__name__] + mean_sq = func_to_elapsed_time_sq[func.__name__] / func_call_count[ + func.__name__] + + logging.debug( + '>>>>>> %s, file %s, elapsed: (this=%f, avg=%f, rel_sd=%f, count=%d)', + func.__name__, + os.path.basename(__file__), + elapsed_time, + mean, + math.sqrt(mean_sq - mean * mean) / mean if mean != 0 else 0, + func_call_count[func.__name__], + ) return result + return wrapper + class Optimizers: """ Optimizer class to create request specific optimizer and use the methods @@ -124,7 +133,8 @@ def launch(self, """ optimizer_type = request.decision_config_params.optimizer_type logging.debug(">>>>>>> In %s method of %s file. optimizer_type=%s", - sys._getframe().f_code.co_name, os.path.basename(__file__), optimizer_type) + sys._getframe().f_code.co_name, os.path.basename(__file__), + optimizer_type) with self.instances_lock.gen_wlock(): if optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER: self.instances[request.client_id] = Vizier() @@ -167,8 +177,9 @@ def launch(self, else: return service_pb2.LaunchResponse( display_string=f"OPTIMIZER '{optimizer_type}' NOT VALID!!") - - logging.debug("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) + + logging.debug("<<<<<< Out %s method of %s file.", + sys._getframe().f_code.co_name, os.path.basename(__file__)) def get_instance(self, client_id: str) -> OptimizerInstance: # logging.debug(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) @@ -182,6 +193,13 @@ def get_instance(self, client_id: str) -> OptimizerInstance: # logging.debug("<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) +import json +from typing import Any, Callable, Dict, List, Optional + +from sight.proto import sight_pb2 +from sight_service.proto import service_pb2 + + class SightService(service_pb2_grpc.SightServiceServicer): """Service class to handle the grpc request send via sight client. """ @@ -191,10 +209,14 @@ def __init__(self): self.optimizers = Optimizers() logging.debug('SightService::__init__') - @rpc_call def Test(self, request, context): - return service_pb2.TestResponse(val="222") + method_name = "Test" + logging.info(">>>>>>> In %s method of %s file.", method_name, _file_name) + obj = service_pb2.TestResponse() + obj.val = str(222) + logging.info("<<<<<< Out %s method of %s file.", method_name, _file_name) + return obj # def GetWeights(self, request, context): # logging.debug(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) @@ -268,7 +290,8 @@ def WorkerAlive(self, request, context): def serve(): """Main method that listens on port 8080 and handle requests received from client. """ - logging.info(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) + logging.info(">>>>>>> In %s method of %s file.", + sys._getframe().f_code.co_name, os.path.basename(__file__)) server = grpc.server(futures.ThreadPoolExecutor(max_workers=500), options=[ @@ -282,18 +305,21 @@ def serve(): # flask_app.run(debug=True, host="0.0.0.0", port=_PORT.value) server.wait_for_termination() - logging.info("<<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) + logging.info("<<<<<<< Out %s method of %s file.", + sys._getframe().f_code.co_name, os.path.basename(__file__)) def main(argv): logging.basicConfig(level=logging.INFO) - logging.info(">>>>>>> In %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) + logging.info(">>>>>>> In %s method of %s file.", + sys._getframe().f_code.co_name, os.path.basename(__file__)) try: app.run(serve()) except BaseException as e: logging.error("Error occurred : ") logging.error(e) - logging.info("<<<<<<< Out %s method of %s file.", sys._getframe().f_code.co_name, os.path.basename(__file__)) + logging.info("<<<<<<< Out %s method of %s file.", + sys._getframe().f_code.co_name, os.path.basename(__file__)) if __name__ == "__main__": diff --git a/sight_service/smc_py.py b/sight_service/smc_py.py index fc68bca..4abf0a1 100644 --- a/sight_service/smc_py.py +++ b/sight_service/smc_py.py @@ -24,8 +24,8 @@ from overrides import overrides from scipy.stats import uniform from sight.proto import sight_pb2 +from sight.utils.proto_conversion import convert_proto_to_dict from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 from smcpy import AdaptiveSampler as Sampler from smcpy import VectorMCMC @@ -201,9 +201,7 @@ def finalize_episode( self, request: service_pb2.FinalizeEpisodeRequest ) -> service_pb2.FinalizeEpisodeResponse: logging.info('FinalizeEpisode request=%s', request) - d = {} - for a in request.decision_point.choice_params: - d[a.key] = a.value.double_value + d = convert_proto_to_dict(proto=request.decision_point.choice_params) result = [d[key] for key in self._param_names] self._lock.acquire() diff --git a/sight_service/vizier.py b/sight_service/vizier.py index d143789..2e3878e 100644 --- a/sight_service/vizier.py +++ b/sight_service/vizier.py @@ -22,8 +22,9 @@ from google.cloud import aiplatform from helpers.logs.logs_handler import logger as logging from overrides import overrides +from sight.utils.proto_conversion import convert_dict_to_proto +from sight.utils.proto_conversion import convert_proto_to_dict from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto from sight_service.proto import service_pb2 load_dotenv() @@ -121,8 +122,8 @@ def decision_point( self.current_trial[request.worker_id] = response[0].name dp_response = service_pb2.DecisionPointResponse() - dp_response.action.extend( - param_dict_to_proto({ + dp_response.action.CopyFrom( + convert_dict_to_proto(dict={ param.parameter_id: param.value for param in response[0].parameters })) dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index d26c5ce..9d3cd64 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -20,9 +20,9 @@ from overrides import overrides from readerwriterlock import rwlock from sight.proto import sight_pb2 +from sight.utils.proto_conversion import convert_dict_to_proto +from sight.utils.proto_conversion import convert_proto_to_dict # from sight_service.optimizer_instance import OptimizerInstance -from sight_service.optimizer_instance import param_dict_to_proto -from sight_service.optimizer_instance import param_proto_to_dict from sight_service.proto import service_pb2 from sight_service.single_action_optimizer import MessageDetails from sight_service.single_action_optimizer import SingleActionOptimizer @@ -46,15 +46,18 @@ def __init__(self): self.possible_values = {} self.max_reward_sample = {} - - def add_outcome_to_outcome_response(self,msg_details : MessageDetails, sample_id, outcome: service_pb2.GetOutcomeResponse.Outcome): + def add_outcome_to_outcome_response( + self, msg_details: MessageDetails, sample_id, + outcome: service_pb2.GetOutcomeResponse.outcome): outcome.action_id = sample_id outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED outcome.reward = msg_details.reward - outcome.action_attrs.extend(param_dict_to_proto(msg_details.action)) - outcome.outcome_attrs.extend(param_dict_to_proto(msg_details.outcome)) - outcome.attributes.extend(param_dict_to_proto(msg_details.attributes)) - + outcome.action_attrs.CopyFrom( + convert_dict_to_proto(dict=msg_details.action)) + outcome.outcome_attrs.CopyFrom( + convert_dict_to_proto(dict=msg_details.outcome)) + outcome.attributes.CopyFrom( + convert_dict_to_proto(dict=msg_details.attributes)) @overrides def launch(self, @@ -69,13 +72,13 @@ def launch(self, @overrides def propose_action( self, request: service_pb2.ProposeActionRequest - ) -> service_pb2.ProposeActionResponse: + ) -> service_pb2.ProposeActionResponse: # print('request in propose actions: ', request) - attributes = param_proto_to_dict(request.attributes) - action_attrs = param_proto_to_dict(request.action_attrs) + attributes = convert_proto_to_dict(proto=request.attributes) + action_attrs = convert_proto_to_dict(proto=request.action_attrs) - message = MessageDetails.create(action=action_attrs,attributes=attributes) + message = MessageDetails.create(action=action_attrs, attributes=attributes) unique_id = self.queue.push_message(message) @@ -98,7 +101,9 @@ def GetOutcome( for sample_id in all_completed_messages: outcome = response.outcome.add() given_msg_details = all_completed_messages[sample_id] - self.add_outcome_to_outcome_response(msg_details=given_msg_details,sample_id=sample_id,outcome=response) + self.add_outcome_to_outcome_response(msg_details=given_msg_details, + sample_id=sample_id, + outcome=response) else: required_samples = list(request.unique_ids) for sample_id in required_samples: @@ -106,7 +111,9 @@ def GetOutcome( outcome.action_id = sample_id if sample_id in all_completed_messages: given_msg_details = all_completed_messages[sample_id] - self.add_outcome_to_outcome_response(msg_details=given_msg_details,sample_id=sample_id,outcome=outcome) + self.add_outcome_to_outcome_response(msg_details=given_msg_details, + sample_id=sample_id, + outcome=outcome) elif self.queue.is_message_in_pending(sample_id): outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.PENDING outcome.response_str = '!! requested sample not yet assigned to any worker !!' @@ -121,7 +128,7 @@ def GetOutcome( @overrides def decision_point( self, request: service_pb2.DecisionPointRequest - ) -> service_pb2.DecisionPointResponse: + ) -> service_pb2.DecisionPointResponse: method_name = "decision_point" logging.debug(">>>> In %s of %s", method_name, _file_name) logging.info('self.queue ==> %s', self.queue) @@ -135,49 +142,15 @@ def decision_point( raise ValueError("Key not found in active_samples") next_action = list(samples.values())[0].action logging.info('next_action=%s', next_action) - response.action.extend(param_dict_to_proto(next_action)) + response.action.CopyFrom(convert_dict_to_proto(dict=next_action)) response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT logging.debug("<<<< Out %s of %s", method_name, _file_name) return response - # --- end - - # dp_response = service_pb2.DecisionPointResponse() - # if(self.exp_completed): - # logging.info("sight experiment completed, killing the worker") - # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_DONE - # else: - # if self.pending_samples: - - # todo : meetashah : add logic to fetch action stored from propose actions and send it as repsonse - # key, sample = self.pending_samples.popitem() - # fetching the key in FIFO manner - - #? this part now handled by worker alive rpc - # with self.pending_lock.gen_wlock(): - # key = next(iter(self.pending_samples)) - # sample = self.pending_samples.pop(key) - - # with self.active_lock.gen_wlock(): - # self.active_samples[request.worker_id] = {'id': key, 'sample': sample} - - # with self.active_lock.gen_rlock(): - # if (request.worker_id in self.active_samples): - # sample = self.active_samples[request.worker_id]['sample'] - # else: - # raise ValueError("key not foung in active_samples") - # next_action = sample[0] - # logging.info('next_action=%s', next_action) - # # raise SystemExit - # dp_response.action.extend(param_dict_to_proto(next_action)) - # dp_response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT - # logging.debug("<<<< Out %s of %s", method_name, _file_name) - # return dp_response - @overrides def finalize_episode( self, request: service_pb2.FinalizeEpisodeRequest - ) -> service_pb2.FinalizeEpisodeResponse: + ) -> service_pb2.FinalizeEpisodeResponse: method_name = "finalize_episode" logging.debug(">>>> In %s of %s", method_name, _file_name) @@ -185,14 +158,19 @@ def finalize_episode( all_active_messages = self.queue.get_active() - active_messages : Dict[str,MessageDetails] = all_active_messages[request.worker_id] + active_messages: Dict[str, MessageDetails] = all_active_messages[ + request.worker_id] for action_id, message in list(active_messages.items()): self.queue.complete_message( message_id=action_id, worker_id=request.worker_id, - update_fn = lambda msg: msg.update(reward = request.decision_outcome.reward, outcome = param_proto_to_dict(request.decision_outcome.outcome_params), action = param_proto_to_dict(request.decision_point.choice_params)) - ) + update_fn=lambda msg: msg.update( + reward=request.decision_outcome.reward, + outcome=convert_proto_to_dict(proto=request.decision_outcome. + outcome_params), + action=convert_proto_to_dict(proto=request.decision_point. + choice_params))) logging.info("self.queue => %s", self.queue) logging.debug("<<<< Out %s of %s", method_name, _file_name) @@ -201,7 +179,7 @@ def finalize_episode( @overrides def current_status( self, request: service_pb2.CurrentStatusRequest - ) -> service_pb2.CurrentStatusResponse: + ) -> service_pb2.CurrentStatusResponse: method_name = "current_status" logging.debug(">>>> In %s of %s", method_name, _file_name) # add logic to check status - ref from exhaustive search @@ -209,7 +187,7 @@ def current_status( @overrides def fetch_optimal_action( self, request: service_pb2.FetchOptimalActionRequest - ) -> service_pb2.FetchOptimalActionResponse: + ) -> service_pb2.FetchOptimalActionResponse: method_name = "fetch_optimal_action" logging.debug(">>>> In %s of %s", method_name, _file_name) # add logic to check status - ref from exhaustive search @@ -229,7 +207,7 @@ def close(self, @overrides def WorkerAlive( self, request: service_pb2.WorkerAliveRequest - ) -> service_pb2.WorkerAliveResponse: + ) -> service_pb2.WorkerAliveResponse: method_name = "WorkerAlive" logging.debug(">>>> In %s of %s", method_name, _file_name) From b887cf0bc44e210f9f34e95684744559b5438b42 Mon Sep 17 00:00:00 2001 From: Hrushikesh Makode <152846252+hrushikeshm-g@users.noreply.github.com> Date: Wed, 4 Dec 2024 09:10:10 +0000 Subject: [PATCH 23/25] batch actions (#68) * batch actions * mutiple finalized episode changes * updated the PR comments * updated the shared_batch_messages class and its test --- py/helpers/cache/cache_gcs.py | 4 +- py/sight/proto/sight.proto | 7 + py/sight/proto/sight_pb2.py | 12 +- py/sight/widgets/decision/decision.py | 1433 ++++++++++++----- .../single_action_optimizer_client.py | 8 +- sight_service/proto/api_descriptor.pb | Bin 145890 -> 146455 bytes sight_service/proto/service.proto | 10 +- sight_service/proto/service_pb2.py | 52 +- sight_service/shared_batch_messages.py | 111 ++ sight_service/single_action_optimizer.py | 26 +- .../functional/test_shared_batch_messages.py | 98 ++ sight_service/worklist_scheduler_opt.py | 37 +- 12 files changed, 1278 insertions(+), 520 deletions(-) create mode 100644 sight_service/shared_batch_messages.py create mode 100644 sight_service/tests/functional/test_shared_batch_messages.py diff --git a/py/helpers/cache/cache_gcs.py b/py/helpers/cache/cache_gcs.py index dfaf7aa..bf9b78b 100644 --- a/py/helpers/cache/cache_gcs.py +++ b/py/helpers/cache/cache_gcs.py @@ -17,8 +17,8 @@ def __init__(self, config={}, with_redis_client: RedisCache | None = None): self.redis_client = with_redis_client self.gcs_base_dir = config.get("gcs_base_dir", "sight_cache") - def _gcs_cache_path(self, key: str): - return f"{self.gcs_base_dir}/{Path(key).with_suffix('.json')}" + def _gcs_cache_path(self, key: str, suffix: str = ".json"): + return f"{self.gcs_base_dir}/{Path(key).with_suffix(suffix=suffix)}" def json_get(self, key): if self.redis_client: diff --git a/py/sight/proto/sight.proto b/py/sight/proto/sight.proto index 3723801..8084c26 100644 --- a/py/sight/proto/sight.proto +++ b/py/sight/proto/sight.proto @@ -746,6 +746,13 @@ message DecisionParam { map params = 1; } +message DecisionMessage { + int64 action_id = 1; + DecisionParam action = 2; + DecisionPoint decision_point = 3; + DecisionOutcome decision_outcome = 4; +} + message DecisionPoint { // Identifies the choice being made. string choice_label = 1; diff --git a/py/sight/proto/sight_pb2.py b/py/sight/proto/sight_pb2.py index 91b0028..440a3dc 100644 --- a/py/sight/proto/sight_pb2.py +++ b/py/sight/proto/sight_pb2.py @@ -17,7 +17,7 @@ ) DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xeb\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\x91\x04\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\x8a\t\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\x45\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\x12!\n\x19\x65xclusive_elapsed_time_ns\x18\x02 \x01(\x03\"\xfb\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"\x8e\x01\n\rDecisionParam\x12\x38\n\x06params\x18\x01 \x03(\x0b\x32(.sight.x.proto.DecisionParam.ParamsEntry\x1a\x43\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value:\x02\x38\x01\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3' + b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xeb\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\x91\x04\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\x8a\t\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\x45\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\x12!\n\x19\x65xclusive_elapsed_time_ns\x18\x02 \x01(\x03\"\xfb\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"\x8e\x01\n\rDecisionParam\x12\x38\n\x06params\x18\x01 \x03(\x0b\x32(.sight.x.proto.DecisionParam.ParamsEntry\x1a\x43\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value:\x02\x38\x01\"\xc2\x01\n\x0f\x44\x65\x63isionMessage\x12\x11\n\taction_id\x18\x01 \x01(\x03\x12,\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3' ) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) @@ -174,8 +174,10 @@ _DECISIONPARAM._serialized_end = 11733 _DECISIONPARAM_PARAMSENTRY._serialized_start = 11666 _DECISIONPARAM_PARAMSENTRY._serialized_end = 11733 - _DECISIONPOINT._serialized_start = 11736 - _DECISIONPOINT._serialized_end = 11901 - _DECISIONOUTCOME._serialized_start = 11904 - _DECISIONOUTCOME._serialized_end = 12032 + _DECISIONMESSAGE._serialized_start = 11736 + _DECISIONMESSAGE._serialized_end = 11930 + _DECISIONPOINT._serialized_start = 11933 + _DECISIONPOINT._serialized_end = 12098 + _DECISIONOUTCOME._serialized_start = 12101 + _DECISIONOUTCOME._serialized_end = 12229 # @@protoc_insertion_point(module_scope) diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index e9293c9..37bbcdf 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -31,6 +31,7 @@ from sight.proto import sight_pb2 from sight.utility import poll_network_batch_outcome from sight.utils.proto_conversion import convert_dict_to_proto +from sight.utils.proto_conversion import convert_proto_to_dict # from sight.widgets.decision.cartpole_driver import driver_fn from sight.widgets.decision import decision_episode_fn from sight.widgets.decision import trials @@ -44,6 +45,8 @@ SingleActionOptimizerClient ) from sight_service.proto import service_pb2 +from sight_service.shared_batch_messages import CachedBatchMessages +from sight_service.shared_batch_messages import DecisionMessage # logging.basicConfig(level=logging.DEBUG) @@ -70,16 +73,39 @@ 'optimizer_type', None, [ - 'vizier', 'dm_acme', 'genetic_algorithm', 'exhaustive_search', - 'llm_text_bison_optimize', 'llm_chat_bison_optimize', - 'llm_gemini_pro_optimize', 'llm_text_bison_recommend', - 'llm_chat_bison_recommend', 'llm_gemini_pro_recommend', - 'llm_text_bison_interactive', 'llm_chat_bison_interactive', - 'llm_gemini_pro_interactive', 'bayesian_opt', 'sensitivity_analysis', - 'ng_auto', 'ng_bo', 'ng_cma', 'ng_two_points_de', 'ng_random_search', - 'ng_pso', 'ng_scr_hammersley_search', 'ng_de', 'ng_cga', 'ng_es', - 'ng_dl_opo', 'ng_dde', 'ng_nmm', 'ng_tiny_spsa', 'ng_voronoi_de', - 'ng_cma_small', 'smcpy', 'worklist_scheduler' + 'vizier', + 'dm_acme', + 'genetic_algorithm', + 'exhaustive_search', + 'llm_text_bison_optimize', + 'llm_chat_bison_optimize', + 'llm_gemini_pro_optimize', + 'llm_text_bison_recommend', + 'llm_chat_bison_recommend', + 'llm_gemini_pro_recommend', + 'llm_text_bison_interactive', + 'llm_chat_bison_interactive', + 'llm_gemini_pro_interactive', + 'bayesian_opt', + 'sensitivity_analysis', + 'ng_auto', + 'ng_bo', + 'ng_cma', + 'ng_two_points_de', + 'ng_random_search', + 'ng_pso', + 'ng_scr_hammersley_search', + 'ng_de', + 'ng_cga', + 'ng_es', + 'ng_dl_opo', + 'ng_dde', + 'ng_nmm', + 'ng_tiny_spsa', + 'ng_voronoi_de', + 'ng_cma_small', + 'smcpy', + 'worklist_scheduler', ], 'The optimizer to use', ) @@ -180,7 +206,7 @@ def configure( if 'rl_decision_driver' not in widget_decision_state: widget_decision_state['rl_decision_driver'] = None - logging.debug("<<<< Out %s of %s", method_name, _file_name) + logging.debug('<<<< Out %s of %s', method_name, _file_name) def init_sight_polling_thread(sight_id): @@ -216,8 +242,8 @@ def attr_to_dict(attr, array): """Converts a spec type array to a dict of attribute constraints. Args: - array: The spec array to be converted. attr: The name of the attribute. + array: The spec array to be converted. Returns: A dict of attribute constraints. @@ -245,8 +271,6 @@ def attr_to_dict(attr, array): elif isinstance(array, dm_env.specs.BoundedArray): if array.shape == () or array.shape == (1,): - # minimum = float(array.minimum if array.minimum.size == 1 else array.minimum[0]) - # maximum = float(array.maximum if array.maximum.size == 1 else array.maximum[0]) minimum = float(array.minimum[0]) maximum = float(array.maximum[0]) key = f'{attr}_{1}' @@ -256,12 +280,10 @@ def attr_to_dict(attr, array): # datatype=dtype ) else: - minimum = np.repeat( - array.minimum, - array.shape[0]) if array.minimum.size == 1 else array.minimum - maximum = np.repeat( - array.maximum, - array.shape[0]) if array.maximum.size == 1 else array.maximum + minimum = (np.repeat(array.minimum, array.shape[0]) + if array.minimum.size == 1 else array.minimum) + maximum = (np.repeat(array.maximum, array.shape[0]) + if array.maximum.size == 1 else array.maximum) for i in range(array.shape[0]): key = f'{attr}_{i + 1}' @@ -276,19 +298,353 @@ def attr_to_dict(attr, array): key = f'{attr}_{i + 1}' result[key] = sight_pb2.DecisionConfigurationStart.AttrProps() - logging.debug("<<<< Out %s of %s", method_name, _file_name) + logging.debug('<<<< Out %s of %s', method_name, _file_name) return result +def get_decision_messages_from_proto( + decision_messages_proto: List[sight_pb2.DecisionMessage], +) -> Dict[str, Any]: + messages = {} + for msg in decision_messages_proto: + messages[msg.action_id] = convert_proto_to_dict(proto=msg.action) + return messages + + +# def run( +# sight: Any, +# env: Any = None, +# driver_fn: Callable[[Any], Any] = driver_fn, +# state_attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps] = {}, +# action_attrs: Dict[str, +# sight_pb2.DecisionConfigurationStart.AttrProps] = {}, +# outcome_attrs: Dict[str, +# sight_pb2.DecisionConfigurationStart.AttrProps] = {}, +# description: str = '', +# ): +# """Driver for running applications that use the Decision API. + +# Args: +# sight: The Sight object to be used for logging. +# env: environment object if passed by user. +# driver_fn: Driver function for calling application logic that uses the Sight +# Decision API to describe decisions and their outcomes. It is assumed that +# driver_fn does not maintain state across invocations and can be called as +# many time as needed, possibly concurrently (i.e. does not keep state +# within global variables either internally or via its interactions with +# external resources). +# state_attrs: Maps the name of each state variable to its possible values. +# action_attrs: Maps the name of each variable that describes possible +# decisions to its possible values. +# outcome_attrs: Maps the name of each variable that describes possible +# outcomes to its possible values. +# description: Human-readable description of the application. +# """ + +# method_name = 'run' +# logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + +# if env is not None: +# if state_attrs == {}: +# state_attrs = attr_to_dict(env.observation_spec(), 'state') +# if action_attrs == {}: +# action_attrs = attr_to_dict(env.action_spec(), 'action') + +# sight.widget_decision_state['decision_episode_fn'] = ( +# decision_episode_fn.DecisionEpisodeFn(driver_fn, state_attrs, +# action_attrs)) +# # print(sight.widget_decision_state['decision_episode_fn']) +# # raise SystemError + +# if _OPTIMIZER_TYPE.value == 'dm_acme': +# optimizer.obj = AcmeOptimizerClient(sight) +# elif _OPTIMIZER_TYPE.value == 'vizier': +# optimizer.obj = SingleActionOptimizerClient( +# sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER, sight) +# elif _OPTIMIZER_TYPE.value == 'genetic_algorithm': +# optimizer.obj = GeneticAlgorithmOptimizerClient( +# max_population_size=_NUM_TRAIN_WORKERS.value, sight=sight) +# elif _OPTIMIZER_TYPE.value == 'exhaustive_search': +# optimizer.obj = SingleActionOptimizerClient( +# sight_pb2.DecisionConfigurationStart.OptimizerType.OT_EXHAUSTIVE_SEARCH, +# sight) +# elif _OPTIMIZER_TYPE.value.startswith('llm_'): +# optimizer.obj = LLMOptimizerClient( +# _OPTIMIZER_TYPE.value.partition('llm_')[2], description, sight) +# elif _OPTIMIZER_TYPE.value == 'bayesian_opt': +# optimizer.obj = SingleActionOptimizerClient( +# sight_pb2.DecisionConfigurationStart.OptimizerType.OT_BAYESIAN_OPT, +# sight) +# elif _OPTIMIZER_TYPE.value == 'sensitivity_analysis': +# optimizer.obj = SingleActionOptimizerClient( +# sight_pb2.DecisionConfigurationStart.OptimizerType. +# OT_SENSITIVITY_ANALYSIS, sight) +# elif _OPTIMIZER_TYPE.value.startswith('ng_'): +# optimizer.obj = SingleActionOptimizerClient( +# sight_pb2.DecisionConfigurationStart.OptimizerType.OT_NEVER_GRAD, sight, +# _OPTIMIZER_TYPE.value.partition('ng_')[2]) +# elif _OPTIMIZER_TYPE.value == 'smcpy': +# optimizer.obj = SingleActionOptimizerClient( +# sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SMC_PY, sight) +# elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': +# optimizer.obj = SingleActionOptimizerClient( +# sight_pb2.DecisionConfigurationStart.OptimizerType. +# OT_WORKLIST_SCHEDULER, sight) +# else: +# raise ValueError(f'Unknown optimizer type {_OPTIMIZER_TYPE.value}') + +# if env is not None: +# if state_attrs == {}: +# state_attrs = attr_to_dict(env.observation_spec(), 'state') +# if action_attrs == {}: +# action_attrs = attr_to_dict(env.action_spec(), 'action') +# if outcome_attrs == {}: +# outcome_attrs = { +# 'outcome': sight_pb2.DecisionConfigurationStart.AttrProps() +# } + +# decision_configuration = sight_pb2.DecisionConfigurationStart() +# decision_configuration.optimizer_type = optimizer.obj.optimizer_type() + +# if (_NUM_TRIALS.value): +# decision_configuration.num_trials = _NUM_TRIALS.value +# decision_configuration.choice_config[sight.params.label].CopyFrom( +# optimizer.obj.create_config()) +# attr_dict_to_proto(state_attrs, decision_configuration.state_attrs) +# attr_dict_to_proto(action_attrs, decision_configuration.action_attrs) +# attr_dict_to_proto(outcome_attrs, decision_configuration.outcome_attrs) + +# sight.enter_block( +# 'Decision Configuration', +# sight_pb2.Object(block_start=sight_pb2.BlockStart( +# sub_type=sight_pb2.BlockStart.ST_CONFIGURATION, +# configuration=sight_pb2.ConfigurationStart( +# sub_type=sight_pb2.ConfigurationStart.ST_DECISION_CONFIGURATION, +# decision_configuration=decision_configuration, +# ), +# )), +# ) +# sight.exit_block('Decision Configuration', sight_pb2.Object()) +# sight.widget_decision_state['num_decision_points'] = 0 + +# sight.widget_decision_state['decision_episode_fn'] = ( +# decision_episode_fn.DecisionEpisodeFn(driver_fn, state_attrs, +# action_attrs)) +# sight.widget_decision_state['proposed_actions'] = [] + +# if _DECISON_MODE.value == 'run': +# logging.info('_DECISON_MODE.value == run') + +# if (not FLAGS.sight_log_id): +# raise ValueError( +# "sight_log_id have to be passed from the trained run for decision_mokde = run" +# ) + +# req = service_pb2.FetchOptimalActionRequest( +# client_id=FLAGS.sight_log_id, +# # worker_id=f'client_{client_id}_worker_{worker_location}', +# ) +# response = service.call( +# lambda s, meta: s.FetchOptimalAction(req, 300, metadata=meta)) +# print('response : ', response.response_str) + +# elif _DECISON_MODE.value == 'configured_run': +# # ? not proper flow right now +# # If the run configuration is provided in a file. +# # if _DECISION_RUN_CONFIG_FILE.value: +# if flags.FLAGS.decision_run_config_file: +# sight.add_config_file(_DECISION_RUN_CONFIG_FILE.value) +# # If the run configuration is provided on the command line. +# elif _DECISION_PARAMS.value: +# chosen_action = {} +# for key_val in _DECISION_PARAMS.value.split(':'): +# key, val = tuple(key_val.split('=')) +# chosen_action[key] = float(val) +# sight.widget_decision_state['constant_action'] = chosen_action +# # sight.widget_decision_state['sum_outcome'] = 0 +# sight.widget_decision_state['last_reward'] = None +# else: +# raise ValueError( +# 'In configured_run mode decision_run_config_file is required.') + +# # If a docker image is provided, run within it. +# logging.info( +# 'decision_train_alg=%s docker_image=%s', +# FLAGS.deployment_mode, +# _DOCKER_IMAGE.value, +# ) +# if FLAGS.deployment_mode == 'local' and _DOCKER_IMAGE.value: +# trials.start_job_in_docker( +# 1, +# _BINARY_PATH.value, +# _OPTIMIZER_TYPE.value, +# _DOCKER_IMAGE.value, +# _DECISON_MODE.value, +# 'docker_worker', +# 'worker_mode', +# _DECISION_PARAMS.value, +# sight, +# ) +# # Otherwise, run within the current process. +# else: +# driver_fn(sight) + +# elif _DECISON_MODE.value == 'train': +# details = sight.widget_decision_state['decision_episode_fn'] +# possible_actions = list(details.action_max.values())[0] - list( +# details.action_min.values())[0] + 2 + +# print('_DECISON_MODE.value : ', _DECISON_MODE.value) +# if FLAGS.deployment_mode in ['distributed', 'vm']: +# if (_OPTIMIZER_TYPE.value == 'exhaustive_search' and +# possible_actions < _NUM_TRIALS.value): +# raise ValueError( +# f"max possible value for num_trials is : {possible_actions}") +# # logging.info('FLAGS.deployment_mode == distributed') +# if (not _DOCKER_IMAGE.value): +# raise ValueError("docker_image must be provided for distributed mode") +# # print("decision_config : ", decision_configuration) +# trials.launch( +# optimizer.obj, +# decision_configuration, +# _NUM_TRAIN_WORKERS.value, +# sight, +# ) +# trials.start_jobs( +# _NUM_TRAIN_WORKERS.value, +# _BINARY_PATH.value, +# _OPTIMIZER_TYPE.value, +# _DOCKER_IMAGE.value, +# _DECISON_MODE.value, +# 'worker_mode', +# 'dsub_cloud_worker', +# sight, +# ) +# elif FLAGS.deployment_mode in [ +# 'local', +# 'dsub_local', +# 'docker_local', +# 'worker_mode', +# ]: +# if FLAGS.deployment_mode == 'worker_mode' or 'PARENT_LOG_ID' in os.environ: +# # not used anymore - for worklist scheduler +# # num_samples_to_run = int(os.environ['num_samples']) +# pass +# else: +# trials.launch( +# optimizer.obj, +# decision_configuration, +# _NUM_TRAIN_WORKERS.value, +# sight, +# ) +# # not used anymore - for worklist scheduler +# num_samples_to_run = _NUM_TRIALS.value + +# # If a docker image is provided, run within it. +# if (FLAGS.deployment_mode == 'docker_local' +# ): # and _NUM_TRAIN_WORKERS.value==1: +# trials.start_job_in_docker( +# _NUM_TRIALS.value, +# _BINARY_PATH.value, +# _OPTIMIZER_TYPE.value, +# _DOCKER_IMAGE.value, +# _DECISON_MODE.value, +# 'worker_mode', +# 'docker_local_worker', +# _DECISION_PARAMS.value, +# sight, +# ) +# # run d-sub locally +# elif (FLAGS.deployment_mode == 'dsub_local' +# ): # and _NUM_TRAIN_WORKERS.value>1: +# trials.start_job_in_dsub_local( +# _NUM_TRAIN_WORKERS.value, +# _NUM_TRIALS.value, +# _BINARY_PATH.value, +# _OPTIMIZER_TYPE.value, +# _DOCKER_IMAGE.value, +# _DECISON_MODE.value, +# 'worker_mode', +# 'dsub_local_worker', +# sight, +# ) +# # Otherwise, run within the current process. +# else: # local & worker_mode + +# if FLAGS.deployment_mode == 'local': +# client_id = str(sight.id) +# worker_location = '0' +# elif (FLAGS.deployment_mode == 'worker_mode' +# # or FLAGS.deployment_mode == 'docker_mode' +# ): +# client_id = os.environ['PARENT_LOG_ID'] +# worker_location = os.environ['worker_location'] + +# while (True): +# # #? new rpc just to check move forward or not? +# req = service_pb2.WorkerAliveRequest( +# client_id=client_id, +# worker_id=f'client_{client_id}_worker_{worker_location}') +# response = service.call( +# lambda s, meta: s.WorkerAlive(req, 300, metadata=meta)) + +# logging.info("response from workAlive rpc is : %s", +# response.status_type) +# if (response.status_type == +# service_pb2.WorkerAliveResponse.StatusType.ST_DONE): +# break +# elif (response.status_type == +# service_pb2.WorkerAliveResponse.StatusType.ST_RETRY): +# logging.info('sleeping for 5 seconds......') +# time.sleep(5) +# elif (response.status_type == +# service_pb2.WorkerAliveResponse.StatusType.ST_ACT): +# decision_messages = get_decision_messages_from_proto( +# decision_messages_proto=response.decision_messages) +# shared_batch_messages = CachedBatchMessages.get_instance(sight_id=sight.id) +# sight.widget_decision_state[ +# 'cached_messages'] = shared_batch_messages +# for action_id, action_params in decision_messages.items(): +# sight.enter_block('Decision Sample', sight_pb2.Object()) +# if 'constant_action' in sight.widget_decision_state: +# del sight.widget_decision_state['constant_action'] +# cached_messages: CachedBatchMessages = sight.widget_decision_state[ +# 'cached_messages'] +# sight.widget_decision_state['discount'] = 0 +# sight.widget_decision_state['last_reward'] = None +# sight.widget_decision_state['action_id'] = action_id + +# cached_messages.set( +# action_id, +# DecisionMessage( +# action_id=action_id, +# action_params=action_params, +# )) + +# if env: +# driver_fn(env, sight) +# else: +# driver_fn(sight) + +# sight.exit_block('Decision Sample', sight_pb2.Object()) +# finalize_episode(sight) +# else: +# raise ValueError("invalid response from server") +# logging.info('exiting from the loop.....') + +# logging.debug("<<<< Out %s of %s", method_name, _file_name) + + def run( sight: Any, env: Any = None, - driver_fn: Callable[[Any], Any] = driver_fn, - state_attrs: Dict[str, sight_pb2.DecisionConfigurationStart.AttrProps] = {}, + driver_fn: Callable[[Any], Any] = None, + state_attrs: Dict[str, + sight_pb2.DecisionConfigurationStart.AttrProps] = None, action_attrs: Dict[str, - sight_pb2.DecisionConfigurationStart.AttrProps] = {}, + sight_pb2.DecisionConfigurationStart.AttrProps] = None, outcome_attrs: Dict[str, - sight_pb2.DecisionConfigurationStart.AttrProps] = {}, + sight_pb2.DecisionConfigurationStart.AttrProps] = None, description: str = '', ): """Driver for running applications that use the Decision API. @@ -309,78 +665,326 @@ def run( outcomes to its possible values. description: Human-readable description of the application. """ + state_attrs = state_attrs or {} + action_attrs = action_attrs or {} + outcome_attrs = outcome_attrs or {} - method_name = 'run' - logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) + logging.debug('>>>>>>>>> Entering run method') - if env is not None: - if state_attrs == {}: - state_attrs = attr_to_dict(env.observation_spec(), 'state') - if action_attrs == {}: - action_attrs = attr_to_dict(env.action_spec(), 'action') + initialize_env(env, state_attrs, action_attrs) sight.widget_decision_state['decision_episode_fn'] = ( decision_episode_fn.DecisionEpisodeFn(driver_fn, state_attrs, action_attrs)) - # print(sight.widget_decision_state['decision_episode_fn']) - # raise SystemError - if _OPTIMIZER_TYPE.value == 'dm_acme': - optimizer.obj = AcmeOptimizerClient(sight) - elif _OPTIMIZER_TYPE.value == 'vizier': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER, sight) - elif _OPTIMIZER_TYPE.value == 'genetic_algorithm': - optimizer.obj = GeneticAlgorithmOptimizerClient( - max_population_size=_NUM_TRAIN_WORKERS.value, sight=sight) - elif _OPTIMIZER_TYPE.value == 'exhaustive_search': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_EXHAUSTIVE_SEARCH, - sight) - elif _OPTIMIZER_TYPE.value.startswith('llm_'): - optimizer.obj = LLMOptimizerClient( - _OPTIMIZER_TYPE.value.partition('llm_')[2], description, sight) - elif _OPTIMIZER_TYPE.value == 'bayesian_opt': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_BAYESIAN_OPT, - sight) - elif _OPTIMIZER_TYPE.value == 'sensitivity_analysis': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_SENSITIVITY_ANALYSIS, sight) - elif _OPTIMIZER_TYPE.value.startswith('ng_'): - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_NEVER_GRAD, sight, - _OPTIMIZER_TYPE.value.partition('ng_')[2]) - elif _OPTIMIZER_TYPE.value == 'smcpy': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SMC_PY, sight) - elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_WORKLIST_SCHEDULER, sight) + optimizer.obj = setup_optimizer(sight, description) + + decision_configuration = configure_decision(sight, state_attrs, action_attrs, + outcome_attrs) + + sight.widget_decision_state['num_decision_points'] = 0 + sight.widget_decision_state['proposed_actions'] = [] + + decision_mode_actions = { + 'run': execute_run_mode, + 'configured_run': (lambda sight=sight, driver_fn=driver_fn: + execute_configured_run_mode(sight, driver_fn)), + 'train': + (lambda sight=sight, decision_configuration=decision_configuration, + driver_fn=driver_fn, env=env: execute_train_mode( + sight, decision_configuration, driver_fn, env)), + } + + action = decision_mode_actions.get(_DECISON_MODE.value) + if action: + action() else: - raise ValueError(f'Unknown optimizer type {_OPTIMIZER_TYPE.value}') + raise ValueError(f'Unknown decision mode {_DECISON_MODE.value}') - if env is not None: - if state_attrs == {}: - state_attrs = attr_to_dict(env.observation_spec(), 'state') - if action_attrs == {}: - action_attrs = attr_to_dict(env.action_spec(), 'action') - if outcome_attrs == {}: - outcome_attrs = { - 'outcome': sight_pb2.DecisionConfigurationStart.AttrProps() + logging.debug('<<<<<< Exiting run method') + + +def execute_run_mode(): + """Executes the run mode. + + Raises: + ValueError: + If sight_log_id is not provided. + """ + + logging.info('_DECISON_MODE.value == run') + if not FLAGS.sight_log_id: + raise ValueError('sight_log_id must be provided for decision_mode = run') + + req = service_pb2.FetchOptimalActionRequest( + client_id=FLAGS.sight_log_id, + # worker_id=f'client_{client_id}_worker_{worker_location}', + ) + response = service.call( + lambda s, meta: s.FetchOptimalAction(req, 300, metadata=meta)) + print('response:', response.response_str) + + +def execute_configured_run_mode(sight, driver_fn): + """Executes the configured run mode. + + Args: + sight: The Sight object to be used for logging. + driver_fn: Driver function for calling application logic that uses the Sight + Decision API to describe decisions and their outcomes. It is assumed that + driver_fn does not maintain state across invocations and can be called as + many time as needed, possibly concurrently (i.e. does not keep state + within global variables either internally or via its interactions with + external resources). + """ + if FLAGS.decision_run_config_file: + sight.add_config_file(_DECISION_RUN_CONFIG_FILE.value) + elif _DECISION_PARAMS.value: + chosen_action = { + key: float(val) for key, val in ( + key_val.split('=') for key_val in _DECISION_PARAMS.value.split(':')) } + sight.widget_decision_state['constant_action'] = chosen_action + sight.widget_decision_state['last_reward'] = None + else: + raise ValueError( + 'In configured_run mode, decision_run_config_file is required.') + + logging.info( + 'decision_train_alg=%s docker_image=%s', + FLAGS.deployment_mode, + _DOCKER_IMAGE.value, + ) + + if FLAGS.deployment_mode == 'local' and _DOCKER_IMAGE.value: + trials.start_job_in_docker( + 1, + _BINARY_PATH.value, + _OPTIMIZER_TYPE.value, + _DOCKER_IMAGE.value, + _DECISON_MODE.value, + 'docker_worker', + 'worker_mode', + _DECISION_PARAMS.value, + sight, + ) + else: + driver_fn(sight) + + +def execute_train_mode(sight, decision_configuration, driver_fn, env): + """Executes the train mode. + + Args: + sight: The Sight object to be used for logging. + decision_configuration: The decision configuration proto. + driver_fn: Driver function for calling application logic that uses the Sight + Decision API to describe decisions and their outcomes. It is assumed that + driver_fn does not maintain state across invocations and can be called as + many time as needed, possibly concurrently (i.e. does not keep state + within global variables either internally or via its interactions with + external resources). + env: environment object if passed by user. + + Raises: + ValueError: + """ + if FLAGS.deployment_mode in ['distributed', 'vm']: + details = sight.widget_decision_state['decision_episode_fn'] + possible_actions = (list(details.action_max.values())[0] - + list(details.action_min.values())[0] + 2) + validate_train_mode(possible_actions) + execute_distributed_training(sight, decision_configuration) + elif FLAGS.deployment_mode in [ + 'local', + 'dsub_local', + 'docker_local', + 'worker_mode', + ]: + execute_local_training(sight, decision_configuration, driver_fn, env) + else: + raise ValueError(f'Unsupported deployment mode {FLAGS.deployment_mode}') + + +def validate_train_mode(possible_actions): + if (_OPTIMIZER_TYPE.value == 'exhaustive_search' and + possible_actions < _NUM_TRIALS.value): + raise ValueError( + f'Max possible value for num_trials is: {possible_actions}') + if not _DOCKER_IMAGE.value: + raise ValueError('docker_image must be provided for distributed mode') + + +def execute_local_training(sight, decision_configuration, driver_fn, env): + """Executes the local training mode. + + Args: + sight: The Sight object to be used for logging. + decision_configuration : decision_configuration + driver_fn: Driver function for calling application logic that uses the Sight + Decision API to describe decisions and their outcomes. It is assumed that + driver_fn does not maintain state across invocations and can be called as + many time as needed, possibly concurrently (i.e. does not keep state + within global variables either internally or via its interactions with + external resources). + env: environment object if passed by user. + + Raises: + ValueError: + """ + if FLAGS.deployment_mode == 'worker_mode' or 'PARENT_LOG_ID' in os.environ: + # not used anymore - for worklist scheduler + # num_samples_to_run = int(os.environ['num_samples']) + pass + else: + trials.launch( + optimizer.obj, + decision_configuration, + _NUM_TRAIN_WORKERS.value, + sight, + ) + # not used anymore - for worklist scheduler + # num_samples_to_run = _NUM_TRIALS.value + + if FLAGS.deployment_mode == 'docker_local': + trials.start_job_in_docker( + _NUM_TRIALS.value, + _BINARY_PATH.value, + _OPTIMIZER_TYPE.value, + _DOCKER_IMAGE.value, + _DECISON_MODE.value, + 'worker_mode', + 'docker_local_worker', + _DECISION_PARAMS.value, + sight, + ) + elif FLAGS.deployment_mode == 'dsub_local': + trials.start_job_in_dsub_local( + _NUM_TRAIN_WORKERS.value, + _NUM_TRIALS.value, + _BINARY_PATH.value, + _OPTIMIZER_TYPE.value, + _DOCKER_IMAGE.value, + _DECISON_MODE.value, + 'worker_mode', + 'dsub_local_worker', + sight, + ) + else: + client_id, worker_location = _configure_client_and_worker(sight=sight) + + while True: + # #? new rpc just to check move forward or not? + req = service_pb2.WorkerAliveRequest( + client_id=client_id, + worker_id=f'client_{client_id}_worker_{worker_location}', + ) + response = service.call( + lambda s, meta: s.WorkerAlive(req, 300, metadata=meta)) + logging.info('Response from WorkerAlive RPC: %s', response.status_type) + if (response.status_type == + service_pb2.WorkerAliveResponse.StatusType.ST_DONE): + break + elif (response.status_type == + service_pb2.WorkerAliveResponse.StatusType.ST_RETRY): + logging.info('Retrying in 5 seconds......') + time.sleep(5) + elif (response.status_type == + service_pb2.WorkerAliveResponse.StatusType.ST_ACT): + process_worker_action(response, sight, driver_fn, env) + else: + raise ValueError('Invalid response from server') + + logging.info('Exiting the training loop.') + + +def process_worker_action(response, sight, driver_fn, env): + """Processes worker actions during local training. + + Args: + response: The response from the WorkerAlive RPC. + sight: Sight object used for logging and configuration. + driver_fn: The driver function that drives the training. + env: The environment in which the training takes place (optional). + """ + decision_messages = get_decision_messages_from_proto( + decision_messages_proto=response.decision_messages) + # shared_batch_messages = CachedBatchMessages() + sight.widget_decision_state['cached_messages'] = optimizer.obj.cache + + for action_id, action_params in decision_messages.items(): + sight.enter_block('Decision Sample', sight_pb2.Object()) + + if 'constant_action' in sight.widget_decision_state: + del sight.widget_decision_state['constant_action'] + + cached_messages = sight.widget_decision_state['cached_messages'] + sight.widget_decision_state['discount'] = 0 + sight.widget_decision_state['last_reward'] = None + sight.widget_decision_state['action_id'] = action_id + + cached_messages.set( + action_id, + DecisionMessage( + action_id=action_id, + action_params=action_params, + ), + ) + + if env: + driver_fn(env, sight) + else: + driver_fn(sight) + + sight.exit_block('Decision Sample', sight_pb2.Object()) + + finalize_episode(sight) + + +def execute_distributed_training(sight, decision_configuration): + """Executes the distributed training mode. + + Args: + sight: The Sight object to be used for logging. + decision_configuration: The decision configuration proto. + """ + trials.launch(optimizer.obj, decision_configuration, _NUM_TRAIN_WORKERS.value, + sight) + trials.start_jobs( + _NUM_TRAIN_WORKERS.value, + _BINARY_PATH.value, + _OPTIMIZER_TYPE.value, + _DOCKER_IMAGE.value, + _DECISON_MODE.value, + 'worker_mode', + 'dsub_cloud_worker', + sight, + ) + + +def configure_decision(sight, state_attrs, action_attrs, outcome_attrs): + """Configures the decision configuration for the Sight logger. + + Args: + sight: The Sight object to be used for logging. + state_attrs: Maps the name of each state variable to its possible values. + action_attrs: Maps the name of each variable that describes possible + decisions to its possible values. + outcome_attrs: Maps the name of each variable that describes possible + outcomes to its possible values. + + Returns: + The decision configuration proto. + """ + if not outcome_attrs: + outcome_attrs['outcome'] = sight_pb2.DecisionConfigurationStart.AttrProps() decision_configuration = sight_pb2.DecisionConfigurationStart() decision_configuration.optimizer_type = optimizer.obj.optimizer_type() - if (_NUM_TRIALS.value): + if _NUM_TRIALS.value: decision_configuration.num_trials = _NUM_TRIALS.value - # if FLAGS.deployment_mode == 'worker_mode': - # decision_configuration.num_trials = int(os.environ['num_samples']) - # else: - # decision_configuration.num_trials = _NUM_TRIALS.value + decision_configuration.choice_config[sight.params.label].CopyFrom( optimizer.obj.create_config()) attr_dict_to_proto(state_attrs, decision_configuration.state_attrs) @@ -398,240 +1002,77 @@ def run( )), ) sight.exit_block('Decision Configuration', sight_pb2.Object()) - sight.widget_decision_state['num_decision_points'] = 0 - sight.widget_decision_state['decision_episode_fn'] = ( - decision_episode_fn.DecisionEpisodeFn(driver_fn, state_attrs, - action_attrs)) - sight.widget_decision_state['proposed_actions'] = [] + return decision_configuration - if _DECISON_MODE.value == 'run': - logging.info('_DECISON_MODE.value == run') - # sight.widget_decision_state['sum_outcome'] = 0 - # sight.widget_decision_state['last_reward'] = None - # if env: - # driver_fn(env, sight) - # else: - # driver_fn(sight) - # finalize_episode(sight) - - if (not FLAGS.sight_log_id): - raise ValueError( - "sight_log_id have to be passed from the trained run for decision_mokde = run" - ) - req = service_pb2.FetchOptimalActionRequest( - client_id=FLAGS.sight_log_id, - # worker_id=f'client_{client_id}_worker_{worker_location}', - ) - response = service.call( - lambda s, meta: s.FetchOptimalAction(req, 300, metadata=meta)) - print('response : ', response.response_str) - - elif _DECISON_MODE.value == 'configured_run': - # ? not proper flow right now - # If the run configuration is provided in a file. - # if _DECISION_RUN_CONFIG_FILE.value: - if flags.FLAGS.decision_run_config_file: - sight.add_config_file(_DECISION_RUN_CONFIG_FILE.value) - # If the run configuration is provided on the command line. - elif _DECISION_PARAMS.value: - chosen_action = {} - for key_val in _DECISION_PARAMS.value.split(':'): - key, val = tuple(key_val.split('=')) - chosen_action[key] = float(val) - sight.widget_decision_state['constant_action'] = chosen_action - # sight.widget_decision_state['sum_outcome'] = 0 - sight.widget_decision_state['last_reward'] = None - else: - raise ValueError( - 'In configured_run mode decision_run_config_file is required.') +def setup_optimizer(sight, description): + """Sets up the optimizer based on the given type. - # If a docker image is provided, run within it. - logging.info( - 'decision_train_alg=%s docker_image=%s', - FLAGS.deployment_mode, - _DOCKER_IMAGE.value, - ) - if FLAGS.deployment_mode == 'local' and _DOCKER_IMAGE.value: - trials.start_job_in_docker( - 1, - _BINARY_PATH.value, - _OPTIMIZER_TYPE.value, - _DOCKER_IMAGE.value, - _DECISON_MODE.value, - 'docker_worker', - 'worker_mode', - _DECISION_PARAMS.value, - sight, - ) - # Otherwise, run within the current process. - else: - driver_fn(sight) + Args: + sight: The Sight object to be used for logging. + description: Human-readable description of the application. - elif _DECISON_MODE.value == 'train': - details = sight.widget_decision_state['decision_episode_fn'] - possible_actions = list(details.action_max.values())[0] - list( - details.action_min.values())[0] + 2 - - print('_DECISON_MODE.value : ', _DECISON_MODE.value) - if FLAGS.deployment_mode in ['distributed', 'vm']: - if (_OPTIMIZER_TYPE.value == 'exhaustive_search' and - possible_actions < _NUM_TRIALS.value): - raise ValueError( - f"max possible value for num_trials is : {possible_actions}") - # logging.info('FLAGS.deployment_mode == distributed') - if (not _DOCKER_IMAGE.value): - raise ValueError("docker_image must be provided for distributed mode") - # print("decision_config : ", decision_configuration) - trials.launch( - optimizer.obj, - decision_configuration, - _NUM_TRAIN_WORKERS.value, + Returns: + The optimizer to be used for training. + + Raises: + ValueError: + If the optimizer type is unknown. + """ + optimizer_map = { + 'dm_acme': lambda: AcmeOptimizerClient(sight), + 'vizier': lambda: SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_VIZIER, sight), + 'genetic_algorithm': lambda: GeneticAlgorithmOptimizerClient( + max_population_size=_NUM_TRAIN_WORKERS.value, sight=sight), + 'exhaustive_search': lambda: SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_EXHAUSTIVE_SEARCH, sight, - ) - trials.start_jobs( - _NUM_TRAIN_WORKERS.value, - _BINARY_PATH.value, - _OPTIMIZER_TYPE.value, - _DOCKER_IMAGE.value, - _DECISON_MODE.value, - 'worker_mode', - 'dsub_cloud_worker', + ), + 'bayesian_opt': lambda: SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_BAYESIAN_OPT, sight, - ) - elif FLAGS.deployment_mode in [ - 'local', - 'dsub_local', - 'docker_local', - 'worker_mode', - ]: - if FLAGS.deployment_mode == 'worker_mode' or 'PARENT_LOG_ID' in os.environ: - # not used anymore - for worklist scheduler - # num_samples_to_run = int(os.environ['num_samples']) - pass - else: - trials.launch( - optimizer.obj, - decision_configuration, - _NUM_TRAIN_WORKERS.value, - sight, - ) - # not used anymore - for worklist scheduler - num_samples_to_run = _NUM_TRIALS.value - - # If a docker image is provided, run within it. - if (FLAGS.deployment_mode == 'docker_local' - ): # and _NUM_TRAIN_WORKERS.value==1: - trials.start_job_in_docker( - _NUM_TRIALS.value, - _BINARY_PATH.value, - _OPTIMIZER_TYPE.value, - _DOCKER_IMAGE.value, - _DECISON_MODE.value, - 'worker_mode', - 'docker_local_worker', - _DECISION_PARAMS.value, - sight, - ) - # run d-sub locally - elif (FLAGS.deployment_mode == 'dsub_local' - ): # and _NUM_TRAIN_WORKERS.value>1: - trials.start_job_in_dsub_local( - _NUM_TRAIN_WORKERS.value, - _NUM_TRIALS.value, - _BINARY_PATH.value, - _OPTIMIZER_TYPE.value, - _DOCKER_IMAGE.value, - _DECISON_MODE.value, - 'worker_mode', - 'dsub_local_worker', - sight, - ) - # Otherwise, run within the current process. - else: # local & worker_mode - # if _OPTIMIZER_TYPE.value == 'dm_acme': - # optimizer.obj = acme_optimizer_client.Acme(sight) - # elif _OPTIMIZER_TYPE.value == 'vizier': - # optimizer.obj = vizier_optimizer_client.Vizier(sight) - # elif _OPTIMIZER_TYPE.value == 'exhaustive_search': - # optimizer.obj = exhaustive_search_client.ExhaustiveSearch(sight) - - # actions_list = [ - # {'action_1': 1, 'action_2': 1, 'action_3': 1}, - # {'action_1': 2, 'action_2': 2, 'action_3': 2}, - # {'action_1': 3, 'action_2': 3, 'action_3': 3} - # ] - # unique_action_ids = propose_actions(sight, actions_list) - - if FLAGS.deployment_mode == 'local': - client_id = str(sight.id) - worker_location = '0' - elif (FLAGS.deployment_mode == 'worker_mode' - # or FLAGS.deployment_mode == 'docker_mode' - ): - client_id = os.environ['PARENT_LOG_ID'] - worker_location = os.environ['worker_location'] - - # for _ in range(num_samples_to_run): - # if(FLAGS.optimizer_type == "worklist_scheduler"): - # if (FLAGS.deployment_mode == 'worker_mode'): - while (True): - # #? new rpc just to check move forward or not? - req = service_pb2.WorkerAliveRequest( - client_id=client_id, - worker_id=f'client_{client_id}_worker_{worker_location}') - response = service.call( - lambda s, meta: s.WorkerAlive(req, 300, metadata=meta)) - - logging.info("response from workAlive rpc is : %s", - response.status_type) - if (response.status_type == - service_pb2.WorkerAliveResponse.StatusType.ST_DONE): - break - elif (response.status_type == - service_pb2.WorkerAliveResponse.StatusType.ST_RETRY): - logging.info('sleeping for 5 seconds......') - time.sleep(5) - elif (response.status_type == - service_pb2.WorkerAliveResponse.StatusType.ST_ACT): - sight.enter_block('Decision Sample', sight_pb2.Object()) - if 'constant_action' in sight.widget_decision_state: - del sight.widget_decision_state['constant_action'] - sight.widget_decision_state['discount'] = 0 - sight.widget_decision_state['last_reward'] = None - - if env: - driver_fn(env, sight) - else: - driver_fn(sight) - - finalize_episode(sight) - sight.exit_block('Decision Sample', sight_pb2.Object()) - else: - raise ValueError("invalid response from server") - logging.info('exiting from the loop.....') - # else: - # for _ in range(num_samples_to_run): - # sight.enter_block('Decision Sample', sight_pb2.Object()) - # if 'constant_action' in sight.widget_decision_state: - # del sight.widget_decision_state['constant_action'] - # sight.widget_decision_state['discount'] = 0 - # sight.widget_decision_state['last_reward'] = None - - # if env: - # driver_fn(env, sight) - # else: - # driver_fn(sight) - # finalize_episode(sight) - # sight.exit_block('Decision Sample', sight_pb2.Object()) - - # req = service_pb2.TestRequest(client_id=str(sight.id)) - # response = service.call( - # lambda s, meta: s.PrintInsertionTime(req, 300, metadata=meta) - # ) - - logging.debug("<<<< Out %s of %s", method_name, _file_name) + ), + 'sensitivity_analysis': lambda: SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_SENSITIVITY_ANALYSIS, + sight, + ), + 'smcpy': lambda: SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_SMC_PY, sight), + 'worklist_scheduler': lambda: SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_WORKLIST_SCHEDULER, + sight, + ), + } + + # Add support for dynamic optimizers + if _OPTIMIZER_TYPE.value.startswith('llm_'): + return LLMOptimizerClient( + _OPTIMIZER_TYPE.value.partition('llm_')[2], description, sight) + + if _OPTIMIZER_TYPE.value.startswith('ng_'): + return SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType.OT_NEVER_GRAD, + sight, + _OPTIMIZER_TYPE.value.partition('ng_')[2], + ) + + if _OPTIMIZER_TYPE.value not in optimizer_map: + raise ValueError(f'Unknown optimizer type {_OPTIMIZER_TYPE.value}') + + return optimizer_map[_OPTIMIZER_TYPE.value]() + + +def initialize_env(env, state_attrs, action_attrs): + if env is not None: + if not state_attrs: + state_attrs.update(attr_to_dict(env.observation_spec(), 'state')) + if not action_attrs: + action_attrs.update(attr_to_dict(env.action_spec(), 'action')) def get_state_attrs(sight: Any) -> list[str]: @@ -662,20 +1103,131 @@ def state_updated( sight.widget_decision_state['state'][name] = obj_to_log +# Works in case of decision outcome def get_decision_outcome_proto(outcome_label: str, sight: Any) -> sight_pb2.DecisionOutcome: - decision_outcome = sight_pb2.DecisionOutcome(outcome_label=outcome_label) + """Returns the decision outcome proto for the given outcome label.""" + decision_outcome_proto = sight_pb2.DecisionOutcome( + outcome_label=outcome_label) if 'sum_reward' in sight.widget_decision_state: - decision_outcome.reward = sight.widget_decision_state['sum_reward'] + decision_outcome_proto.reward = sight.widget_decision_state['sum_reward'] if 'sum_outcome' in sight.widget_decision_state: - decision_outcome.outcome_params.CopyFrom( + decision_outcome_proto.outcome_params.CopyFrom( convert_dict_to_proto(dict=sight.widget_decision_state['sum_outcome'])) if 'discount' in sight.widget_decision_state: - decision_outcome.discount = sight.widget_decision_state['discount'] + decision_outcome_proto.discount = sight.widget_decision_state['discount'] + + return decision_outcome_proto + + +def get_decision_outcome_from_decision_message( + outcome_label: str, decision_message: DecisionMessage): + """Returns the decision outcome from the decision message.""" + + # logging.debug('decision message =>%s', decision_message) + + decision_outcome_proto = sight_pb2.DecisionOutcome( + outcome_label=outcome_label) + decision_outcome_proto.reward = decision_message.reward + decision_outcome_proto.outcome_params.CopyFrom( + convert_dict_to_proto(dict=decision_message.outcome_params)) + decision_outcome_proto.discount = decision_message.discount + return decision_outcome_proto + + +def _configure_client_and_worker(sight): + """Configures the client and worker identifiers.""" + if FLAGS.deployment_mode in ['local'] or _TRAINED_MODEL_LOG_ID.value: + client_id = str(sight.id) + worker_location = '0' + elif FLAGS.deployment_mode == 'worker_mode': + client_id = os.environ['PARENT_LOG_ID'] + worker_location = os.environ['worker_location'] + else: + client_id = 'unknown' + worker_location = 'unknown' + return client_id, worker_location + - return decision_outcome +def _process_acme_action(selected_action, widget_state): + """Processes the action for 'dm_acme' optimizer.""" + # ? when action space is scalar (DQN agent - cartpole) + if selected_action.shape == (): + return { + widget_state['decision_episode_fn'].action_attrs[0]: selected_action[()] + } + # ? when action space is 1d array (D4pg agent - pendulum) + return { + widget_state['decision_episode_fn'].action_attrs[i]: selected_action[i] + for i in range(len(widget_state['decision_episode_fn'].action_attrs)) + } + + +def _process_worklist_scheduler(sight, req): + """Processes the action for 'worklist_scheduler' optimizer.""" + widget_state = sight.widget_decision_state + if not optimizer.obj: + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_WORKLIST_SCHEDULER, + sight, + ) + if widget_state['action_id']: + return (widget_state['cached_messages'].get( + widget_state['action_id']).action_params) + return optimizer.get_instance().decision_point(sight, req) + + +def _process_llm_action(sight, req, optimizer_obj): + """Processes the action for 'llm_' optimizers.""" + widget_state = sight.widget_decision_state + if 'reward' in widget_state: + req.decision_outcome.reward = widget_state['reward'] + if 'outcome_value' in widget_state: + req.decision.outcome.outcome_params.CopyFrom( + convert_dict_to_proto(dict=widget_state['outcome_value'])) + req.decision_outcome.discount = widget_state['discount'] + return optimizer_obj.decision_point(sight, req) + + +def _make_decision(sight, req): + """Handles decision-making based on the optimizer type.""" + optimizer_obj = optimizer.get_instance() + optimizer_type = _OPTIMIZER_TYPE.value + widget_state = sight.widget_decision_state + if optimizer_type == 'dm_acme': + selected_action = optimizer_obj.decision_point(sight, req) + chosen_action = _process_acme_action(selected_action, widget_state) + elif optimizer_type in [ + 'vizier', + 'genetic_algorithm', + 'exhaustive_search', + 'bayesian_opt', + 'sensitivity_analysis', + 'smcpy', + ] or optimizer_type.startswith('ng_'): + chosen_action = optimizer_obj.decision_point(sight, req) + elif optimizer_type == 'worklist_scheduler': + chosen_action = _process_worklist_scheduler(sight, req) + elif optimizer_type.startswith('llm_'): + chosen_action = _process_llm_action(sight, req, optimizer_obj) + else: + raise ValueError(f'Unsupported optimizer type: {optimizer_type}') + return chosen_action + + +def _log_decision(choice_label, chosen_action, sight): + """Logs the decision to the Sight logger.""" + choice_params = sight_pb2.DecisionParam() + choice_params.CopyFrom(convert_dict_to_proto(dict=chosen_action)) + obj = sight_pb2.Object( + sub_type=sight_pb2.Object.ST_DECISION_POINT, + decision_point=sight_pb2.DecisionPoint(choice_label=choice_label), + ) + obj.decision_point.choice_params.CopyFrom(choice_params) + sight.log_object(obj, inspect.currentframe().f_back.f_back) def decision_point( @@ -698,99 +1250,59 @@ def decision_point( """ method_name = 'decision_point' logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - # logging.info('>>>>>>>>> In %s of %s, sight.widget_decision_state=%s', method_name, _file_name, sight.widget_decision_state) - + # logging.info( + # '>>>>>>>>> In %s of %s, sight.widget_decision_state=%s', + # method_name, + # _file_name, + # sight.widget_decision_state, + # ) + + # Increment decision point count sight.widget_decision_state['num_decision_points'] += 1 - chosen_action = None + # Return cached action if available if 'constant_action' in sight.widget_decision_state: return sight.widget_decision_state['constant_action'] + # Prepare the request req = service_pb2.DecisionPointRequest() - - if FLAGS.deployment_mode == 'local' or _TRAINED_MODEL_LOG_ID.value: - global _sight_id - _sight_id = str(sight.id) - client_id = str(sight.id) - worker_location = '0' - elif (FLAGS.deployment_mode == 'worker_mode' - # or FLAGS.deployment_mode == 'docker_mode' - ): - client_id = os.environ['PARENT_LOG_ID'] - worker_location = os.environ['worker_location'] - + client_id, worker_location = _configure_client_and_worker(sight) req.client_id = client_id req.worker_id = f'client_{client_id}_worker_{worker_location}' - if _OPTIMIZER_TYPE.value == 'dm_acme': - optimizer_obj = optimizer.get_instance() - selected_action = optimizer_obj.decision_point(sight, req) - # print("selected_action : ", selected_action, type(selected_action), selected_action.shape, ) - # raise SystemError - - chosen_action = {} - #? when action space is scalar (DQN agent - cartpole) - if (selected_action.shape == ()): - chosen_action[sight.widget_decision_state['decision_episode_fn']. - action_attrs[0]] = selected_action[()] - #? when action space is 1d array (D4pg agent - pendulum) - else: - for i in range( - len(sight.widget_decision_state['decision_episode_fn'].action_attrs)): - chosen_action[sight.widget_decision_state['decision_episode_fn']. - action_attrs[i]] = selected_action[i] - # print("chosen_action : ", chosen_action) - - # selected_action will be same for all calls of decision point in these - # optimizers. As such, it is cached as the constant action. - elif _OPTIMIZER_TYPE.value in [ - 'vizier', 'genetic_algorithm', 'exhaustive_search', 'bayesian_opt', - 'sensitivity_analysis', 'smcpy' - ] or _OPTIMIZER_TYPE.value.startswith('ng_'): - optimizer_obj = optimizer.get_instance() - chosen_action = optimizer_obj.decision_point(sight, req) - sight.widget_decision_state['constant_action'] = chosen_action - elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': - if (not optimizer.obj): - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_WORKLIST_SCHEDULER, sight) - optimizer_obj = optimizer.get_instance() - chosen_action = optimizer_obj.decision_point(sight, req) - # if(chosen_action == None): - # print("received None in chosen action") - # return None - sight.widget_decision_state['constant_action'] = chosen_action + # perform the decision-making process + chosen_action = _make_decision(sight, req) - elif _OPTIMIZER_TYPE.value.startswith('llm_'): - optimizer_obj = optimizer.get_instance() - if 'reward' in sight.widget_decision_state: - req.decision_outcome.reward = sight.widget_decision_state['reward'] - if 'outcome_value' in sight.widget_decision_state: - req.decision.outcome.outcome_params.CopyFrom( - convert_dict_to_proto( - dict=sight.widget_decision_state["outcome_value"])) - req.decision_outcome.discount = sight.widget_decision_state['discount'] - chosen_action = optimizer_obj.decision_point(sight, req) + # setting the constant_action in sight widget + sight.widget_decision_state['constant_action'] = chosen_action - choice_params = sight_pb2.DecisionParam() - choice_params.CopyFrom(convert_dict_to_proto(dict=chosen_action)) - - # pytype: disable=attribute-error - obj = sight_pb2.Object( - sub_type=sight_pb2.Object.ST_DECISION_POINT, - decision_point=sight_pb2.DecisionPoint(choice_label=choice_label, - # choice_params=choice_params, - ), - ) - obj.decision_point.choice_params.CopyFrom(choice_params) - sight.log_object(obj, inspect.currentframe().f_back.f_back) + # log the decision + _log_decision(choice_label, chosen_action, sight) logging.info('decision_point() chosen_action=%s', chosen_action) logging.debug('<<<< Out %s of %s', method_name, _file_name) return chosen_action +def _update_cached_batch(sight: Any): + """Updates the cached batch with the latest decision state. + + Args: + sight: Instance of a Sight logger. + """ + action_id = sight.widget_decision_state.get('action_id', None) + cached_messages = sight.widget_decision_state.get('cached_messages', None) + if cached_messages and action_id: + logging.info(f'Caching batch for action_id: {action_id}') + cached_messages.update( + key=action_id, + action_params=cached_messages.get(action_id).action_params, + discount=sight.widget_decision_state['discount'], + reward=sight.widget_decision_state.get('sum_reward', 0), + outcome_params=sight.widget_decision_state.get('sum_outcome', {}), + ) + + def decision_outcome( outcome_label: str, sight: Any, @@ -804,9 +1316,10 @@ def decision_outcome( Args: outcome_label: Label that identifies the outcome. sight: Instance of a Sight logger. - reward: The numeric value of the quality of this outcome, with higher values being - more desirable. - outcome: Dictionary that describes the various outcome attributes of the application. + reward: The numeric value of the quality of this outcome, with higher values + being more desirable. + outcome: Dictionary that describes the various outcome attributes of the + application. discount: discount value to be used """ method_name = 'decision_outcome' @@ -835,15 +1348,10 @@ def decision_outcome( # converting json into string else: # converting pandas datafram to json and storing it as json string - # sight.widget_decision_state['sum_outcome'][key] = json.dumps(outcome[key].to_json()) + # sight.widget_decision_state['sum_outcome'][key] = + # json.dumps(outcome[key].to_json()) sight.widget_decision_state['sum_outcome'][key] = outcome[key] - # if not isinstance(outcome[key], float) and not isinstance(outcome[key], int): - # continue - # if key not in sight.widget_decision_state['sum_outcome']: - # sight.widget_decision_state['sum_outcome'][key] = 0 - # sight.widget_decision_state['sum_outcome'][key] += outcome[key] - sight.log_object( sight_pb2.Object( sub_type=sight_pb2.Object.ST_DECISION_OUTCOME, @@ -852,10 +1360,19 @@ def decision_outcome( inspect.currentframe().f_back.f_back, ) - logging.debug("<<<< Out %s of %s", method_name, _file_name) + _update_cached_batch(sight) + + if 'sum_reward' in sight.widget_decision_state: + _rewards.append(sight.widget_decision_state['sum_reward']) + + sight.widget_decision_state.pop('sum_reward', None) + sight.widget_decision_state.pop('sum_outcome', None) + + logging.debug('<<<< Out %s of %s', method_name, _file_name) def propose_actions(sight, action_dict): + """Proposes actions to the server.""" attr_dict = sight.fetch_attributes() @@ -882,65 +1399,84 @@ def propose_actions(sight, action_dict): return action_id +def _handle_optimizer_finalize(sight: Any, req: Any) -> None: + """Handles optimizer-specific finalization logic. + + Args: + sight: Instance of a Sight logger. + req: FinalizeEpisodeRequest object. + """ + optimizer_obj = optimizer.get_instance() + + # Get the list of action messages (supports multiple action IDs) + cached_messages_obj = sight.widget_decision_state.get('cached_messages', {}) + all_messages: dict[str, DecisionMessage] = cached_messages_obj.all_messages() + logging.info('action_messages => %s', all_messages) + + for action_id, msg in all_messages.items(): + decision_message = sight_pb2.DecisionMessage() + decision_message.decision_outcome.CopyFrom( + get_decision_outcome_from_decision_message(outcome_label='outcome', + decision_message=msg)) + decision_message.action_id = action_id + req.decision_messages.append(decision_message) + + # clearing the cached + cached_messages_obj.clear() + + if _OPTIMIZER_TYPE.value in { + 'genetic_algorithm', + 'exhaustive_search', + 'vizier', + 'bayesian_opt', + 'sensitivity_analysis', + 'smcpy', + } or _OPTIMIZER_TYPE.value.startswith(('llm_', 'ng_')): + optimizer_obj.finalize_episode(sight, req) + + elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': + if not optimizer.obj: + optimizer.obj = SingleActionOptimizerClient( + sight_pb2.DecisionConfigurationStart.OptimizerType. + OT_WORKLIST_SCHEDULER, + sight, + ) + optimizer_obj.finalize_episode(sight, req) + + elif _OPTIMIZER_TYPE.value == 'dm_acme': + optimizer_obj.finalize_episode(sight) + + if 'outcome_value' in sight.widget_decision_state: + del sight.widget_decision_state['outcome_value'] + + def finalize_episode(sight): # , optimizer_obj """Finalize the run. Args: sight: Instance of a Sight logger. - optimizer_obj: Object of Optimizer instance """ method_name = 'finalize_episode' logging.debug('>>>>>>>>> In %s of %s', method_name, _file_name) - if (FLAGS.deployment_mode == 'local' - # or FLAGS.deployment_mode == 'docker_mode' - or FLAGS.deployment_mode == 'worker_mode'): - if FLAGS.deployment_mode == 'local': - client_id = str(sight.id) - worker_location = '0' - elif (FLAGS.deployment_mode == 'worker_mode' - # or FLAGS.deployment_mode == 'docker_mode' - ): - client_id = os.environ['PARENT_LOG_ID'] - worker_location = os.environ['worker_location'] + if FLAGS.deployment_mode in {'local', 'worker_mode'}: + client_id, worker_location = _configure_client_and_worker(sight) + # create the req req = service_pb2.FinalizeEpisodeRequest( client_id=client_id, worker_id=f'client_{client_id}_worker_{worker_location}', ) - if _OPTIMIZER_TYPE.value in [ - 'genetic_algorithm', 'exhaustive_search', 'vizier', 'bayesian_opt', - 'sensitivity_analysis', 'smcpy' - ] or _OPTIMIZER_TYPE.value.startswith( - 'llm_') or _OPTIMIZER_TYPE.value.startswith('ng_'): - req.decision_outcome.CopyFrom(get_decision_outcome_proto( - 'outcome', sight)) - optimizer_obj = optimizer.get_instance() - optimizer_obj.finalize_episode(sight, req) - elif _OPTIMIZER_TYPE.value == 'worklist_scheduler': - if (not optimizer.obj): - optimizer.obj = SingleActionOptimizerClient( - sight_pb2.DecisionConfigurationStart.OptimizerType. - OT_WORKLIST_SCHEDULER, sight) - req.decision_outcome.CopyFrom(get_decision_outcome_proto( - 'outcome', sight)) - # print('request : ', req) - optimizer_obj = optimizer.get_instance() - optimizer_obj.finalize_episode(sight, req) - elif _OPTIMIZER_TYPE.value == 'dm_acme': - optimizer_obj = optimizer.get_instance() - optimizer_obj.finalize_episode(sight) - - if 'outcome_value' in sight.widget_decision_state: - del sight.widget_decision_state['outcome_value'] + _handle_optimizer_finalize(sight, req) else: logging.info('Not in local/worker mode, so skipping it') + client_id, worker_location = _configure_client_and_worker(sight) + if sight.widget_decision_state['proposed_actions']: for proposal in sight.widget_decision_state['proposed_actions']: - # logging.info('proposal=%s', proposal) proposal_req = service_pb2.ProposeActionRequest( client_id=client_id, worker_id=f'client_{client_id}_worker_{worker_location}', @@ -950,27 +1486,30 @@ def finalize_episode(sight): # , optimizer_obj ), action=proposal['action'], ) - + # logging.info('proposal=%s', proposal) response = service.call( lambda s, meta: s.ProposeAction(proposal_req, 300, metadata=meta)) sight.widget_decision_state['proposed_actions'] = [] - if 'sum_reward' in sight.widget_decision_state: - _rewards.append(sight.widget_decision_state['sum_reward']) - sight.widget_decision_state.pop('sum_reward', None) - sight.widget_decision_state.pop('sum_outcome', None) - - logging.debug("<<<< Out %s of %s", method_name, _file_name) + logging.debug('<<<< Out %s of %s', method_name, _file_name) def get_outcome(sight): + """Returns the outcome from the server. + + Args: + sight: Instance of a Sight logger. + + Returns: + outcome_list: List of outcomes + """ request = service_pb2.GetOutcomeRequest() request.client_id = str(sight.id) # request.unique_ids.append(3) response = service.call( lambda s, meta: s.GetOutcome(request, 300, metadata=meta)) - if (response.response_str): + if response.response_str: return response.response_str outcome_list = [] diff --git a/py/sight/widgets/decision/single_action_optimizer_client.py b/py/sight/widgets/decision/single_action_optimizer_client.py index 7608d29..b977093 100644 --- a/py/sight/widgets/decision/single_action_optimizer_client.py +++ b/py/sight/widgets/decision/single_action_optimizer_client.py @@ -104,13 +104,7 @@ def decision_point(self, sight, request: service_pb2.DecisionPointRequest): @override def finalize_episode(self, sight, request: service_pb2.FinalizeEpisodeRequest): - # logging.info('SingleActionOptimizerClient() finalize_episode') - if self._last_action: - logging.info('finalize episode => %s', - request.decision_point.choice_params) - update_proto_map( - existing_proto_map=request.decision_point.choice_params, - new_proto_map=convert_proto_to_dict(proto=self._last_action)) + logging.info('SingleActionOptimizerClient() finalize_episode') response = service.call( lambda s, meta: s.FinalizeEpisode(request, 300, metadata=meta)) return response diff --git a/sight_service/proto/api_descriptor.pb b/sight_service/proto/api_descriptor.pb index 3e7cc3adf87c886dc4095f9a5c15fa9009d2f4b0..94ac09d4b126a5b8338f7cd72490a766e4671fae 100644 GIT binary patch delta 2739 zcma)8-%k`*7@hmwxx+C0T4on^*%e&Ss!gl4q7-V-{!rVfO%rSEgAc?itu#?g!K6u( zuDW8h(FWYmiLnU&1#LnaUYgiGH>tj9FhZr;U=deDQVb|P_s76flfLbl@0@eL`^~v? zcWl}XX5Am#w%b>(lNWrQXNkeC?H@PzPnlP?t@U7AM_c>R4_Z4q4ty3&K4Ue(#0%CC zls;$OgSWrxVMX8HVP1hRO06y@NIhiLgQLx_(%>(d*29@8!Mq}m+f~fB9|||_6??W_ zMV4<*3aa!szZK?@ja3EI4f)Pz+?^P;xSNP!*;T5XCPQoyi94(Vgz*M(I@erc6gywW2(={wWH zdTs`h1a(@t72fL@L{bi7Cim9(+IsxN6lMgu$^;O^8BtgfLU_)IM3vX9bSTc|2F9D9 zW6;em>6k2uLOlXhp_`QxcvtB#fw^4qgRPk)$?eaD+XsYhPDHBBdIa5^NY|?ECncH} zRBMWI`+$0KC_*|fBFn<6A}>!vN_NV376gK!V~_%KLB?Q*%t1+CJ=T4s3!mj z@(IW61Q50-9PDJjlA?IZ$wmI&mr1D+oN`D92vF7BQq>P2gr}TxUu`8t7(vz{Jy#&C z2^dP{6ta#RF;cnjtP_nHDbQ)ht3?umpn z^PnTx@`9ab>EO2)EIm}az#PV&^{%1A|KTbAkLOR_H18Dt&?=90+`2l{TR@|$+3n!? zYjh^kiTV3-j5mpPCqGRGC#PAeG9C8Sa;&}WXlJ@PUBB5&=X#D+1|!p~AhT3&Ki+w; z{YWbq8#jAu&~U>nVb>T!jPfQmy%$1^@+R%vrCx(CU&<|_<)$cK1E7A}C_;LR%93GK zk*jMm-b&TB?c2jdTa%S(G}*8*ec)gxK5i{-htSR&i|t^8k`{JnV~JrZT4OwIEHO+k zzZpU-F-*-{RNkY$bBC-yNasBY<{fgEd&L>79+i0?A=148A$x#2?|>kWkU8;yupJ?s zczIN!GMJ-8akVB8q<~HcRkgqmA*7>JQK>qL`OaOkGJl$uVxZn7H>zx7vZJxk76`V0 zx+6ev$EZ--0>XAIw5{-+du07(Tr0r5N3K3771H%Sna?8-T-O0>TVRmyQ(>{{77(`g zsVw3(cqA;}87J#W@c9PXw0WFFsq9l&((!?Dln4%h%0WVe@qu($r?@Ci1j8F>ea!?> ziChI72F?UU%Dwf912h?|sizG!lO{Xh$WTubz7)s`Ao|J&U)IyAz4@iJI5>H^R!nga zlqnb_o%GxL_I#)U1Eov|9v7OmZD^*bxWv28@ky-oou_0i3_Yo*-&y1-;WB8#Qe0qn@v(ib_=X+^~Kr~b_)pG9^I|Hkl}Q3Nh^;TU?`R2 zySOBky281dJTU6t@q%*H9a)wG4!9ll z%De)NLBNC3r3c7Rz>&=cqX6Mj(AP+fb(gs9E%Ic}FUcspVtBxpgMS)nZQW&V7cKH+ z)-UtYGVdM3!)E$|9Zj??)5q=NMV`!iA4lHh8&fxSKPUY!0SF&PKz)++jor^n5_*$r zwEaA_%t+Dj0H>{{$Z&E1b%Jq8!)bu4PwWbTb>yfAxcSEFJA8mQZ~4SI_Cs?e!Y2kY~2bP9@)QhSLq+= zs#+zyf*X31GRUcR<#`2iLT{@2*u=^XJW4!E3RF@_jzBmi6S)`#V^cw`(>?^Jt3wm3 z;d9}eUhugbl8Hv^bK#qog`!%oeH3~(tLpE^D+MAS&ywn`kl?#1)6Fr$cT<*IRh{;M zZb@pjK>;f;whCBp$@HQ)s>O@Tu##5{{TYdNnB5nloRL9ZJ;iB@y!}~8C2a&@CqUBy zAjGrrPJl=|%R6~dTM?X-)zswnN>R69PEw7{!!4MTK`KTV=VU{uw^AguU|y2xD+p@> zV=H%IUIuAvC0OTWCTp#r+mi0HLGA*u*|ecZ+=kmS-4chjy64)r*W3-B{A9pYt2?BX)tv7(rMCXhsYO?FF)j42ZNB2#3sA5gaAT>oqfa$bf!~I$9IP2xezW;4fGFA+7U3jaQUNDDS;fe>G!M4he{h_sieKCLUt@i<&2dcX!s@N1wkEc zSsF)Sy-drNt1d;-3H>YNOp{w&Uf#BAXZh9Ld!j-Y<)ZgiQFiSWk||z6I$YgVt`5a9 zAtg{(!-!$M%GEriwFpi`Kdz#-mI5rEt7UgkcWqUl5o(M zY7GjVieB!d`?pOktRwM_%7tROK_m5I{{eBTN>OXv*cit0GH+8J+5%xEOmsp@j z1bRIWh*En*GNm?SWb^zv4pgHbp03~#pYedOpApC*k;rmkEQIfX$T#WfXbSy9!ue2` zW6%WUAz`PZiL2-pc9cQL3N*t3gtk}MnFAtguZTyv5c-FO^N~%<*VkczE%OP{*H@pg zhs)exK$F(oV143$H&~z0hf7}zM}+gS&C1up5fK!1R(&hHHIO;i`2pk4AF6c;q0;w2u?)3Yp8q43E`=GIv(^B zJPQw69e6MrUqdUG3<|IIo{oooP^9YBlU9e#oQ&Gn(uNmK3a{>-jtBmvut&Ybbq)!8 jwm~@RK=WGwqNX8{Y%-hFgD@mYi(c~I+OvD_?vDQeEVd~G diff --git a/sight_service/proto/service.proto b/sight_service/proto/service.proto index da1a705..b8ed585 100644 --- a/sight_service/proto/service.proto +++ b/sight_service/proto/service.proto @@ -339,10 +339,12 @@ message GetOutcomeResponse { message FinalizeEpisodeRequest { string client_id = 1; string worker_id = 2; - sight.x.proto.DecisionPoint decision_point = 3; - sight.x.proto.DecisionOutcome decision_outcome = 4; + // sight.x.proto.DecisionPoint decision_point = 3; + // sight.x.proto.DecisionOutcome decision_outcome = 4; sight.x.proto.DecisionConfigurationStart.OptimizerType optimizer_type = 5; - Acme_Request acme_config = 6; + Acme_Request acme_config = 6; + // int64 action_id = 7; + repeated sight.x.proto.DecisionMessage decision_messages = 8; } message FinalizeEpisodeResponse { @@ -354,7 +356,6 @@ message TestRequest { } message TestResponse { string val = 1; - sight.x.proto.DecisionParam action = 2; } // The format in which the log is stored. enum LogFormat { @@ -413,4 +414,5 @@ message WorkerAliveResponse { ST_RETRY = 3; } StatusType status_type = 4; + repeated sight.x.proto.DecisionMessage decision_messages = 5; } diff --git a/sight_service/proto/service_pb2.py b/sight_service/proto/service_pb2.py index 7723dce..ed250bb 100644 --- a/sight_service/proto/service_pb2.py +++ b/sight_service/proto/service_pb2.py @@ -20,7 +20,7 @@ ) DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n!sight_service/proto/service.proto\x12\x0fsight.x.service\x1a\x17sight/proto/sight.proto\x1a\x33sight_service/proto/numproto/protobuf/ndarray.proto\x1a\x1cgoogle/api/annotations.proto\"\xa5\x03\n\x0c\x41\x63me_Request\x12G\n\x14\x65pisode_observations\x18\x01 \x03(\x0b\x32).sight.x.service.Acme_Request.Observation\x12\x14\n\x0clearner_keys\x18\x02 \x03(\t\x1a\xfe\x01\n\x0bObservation\x12*\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x38\n\x08steptype\x18\x02 \x01(\x0e\x32&.sight.x.service.Acme_Request.StepType\x12*\n\x06reward\x18\x03 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12,\n\x08\x64iscount\x18\x04 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12/\n\x0bobservation\x18\x05 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\"5\n\x08StepType\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\x07\n\x03MID\x10\x02\x12\x08\n\x04LAST\x10\x03\"\x80\x02\n\rAcme_Response\x12\x34\n\x06layers\x18\x01 \x03(\x0b\x32$.sight.x.service.Acme_Response.Layer\x1a\xb8\x01\n\x05Layer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x07weights\x18\x02 \x01(\x0b\x32\x30.sight.x.service.Acme_Response.Layer.WeightsData\x1a^\n\x0bWeightsData\x12\t\n\x01\x62\x18\x01 \x03(\x02\x12%\n\x01w\x18\x02 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x0e\n\x06offset\x18\x03 \x03(\x02\x12\r\n\x05scale\x18\x04 \x03(\x02\"\xe0\x01\n\x14\x44\x65\x63isionPointRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12\x32\n\x0b\x61\x63me_config\x18\x05 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"\x9f\x02\n\x15\x44\x65\x63isionPointResponse\x12,\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x35\n\racme_response\x18\x02 \x01(\x0b\x32\x1e.sight.x.service.Acme_Response\x12\x14\n\x0cresponse_idx\x18\x03 \x01(\x03\x12\x46\n\x0b\x61\x63tion_type\x18\x04 \x01(\x0e\x32\x31.sight.x.service.DecisionPointResponse.ActionType\"C\n\nActionType\x12\x0e\n\nAT_UNKNOWN\x10\x00\x12\n\n\x06\x41T_ACT\x10\x01\x12\x0b\n\x07\x41T_DONE\x10\x02\x12\x0c\n\x08\x41T_RETRY\x10\x03\"A\n\x19\x46\x65tchOptimalActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"2\n\x1a\x46\x65tchOptimalActionResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"5\n\x0bTellRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0bmessage_str\x18\x02 \x01(\t\"$\n\x0cTellResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"\"\n\rListenRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\">\n\x0eListenResponse\x12\x16\n\x0eresponse_ready\x18\x01 \x01(\x08\x12\x14\n\x0cresponse_str\x18\x02 \x01(\t\"<\n\x14\x43urrentStatusRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xae\x01\n\x15\x43urrentStatusResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12=\n\x06status\x18\x02 \x01(\x0e\x32-.sight.x.service.CurrentStatusResponse.Status\"@\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0f\n\x0bIN_PROGRESS\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\"|\n\rLaunchRequest\x12I\n\x16\x64\x65\x63ision_config_params\x18\x01 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStart\x12\r\n\x05label\x18\x03 \x01(\t\x12\x11\n\tclient_id\x18\x04 \x01(\t\"(\n\x0eLaunchResponse\x12\x16\n\x0e\x64isplay_string\x18\x01 \x01(\t\"\x8f\x01\n\x14ProposeActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"*\n\x15ProposeActionResponse\x12\x11\n\taction_id\x18\x01 \x01(\x03\":\n\x11GetOutcomeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\nunique_ids\x18\x02 \x03(\x03\"\xf7\x03\n\x12GetOutcomeResponse\x12<\n\x07outcome\x18\x01 \x03(\x0b\x32+.sight.x.service.GetOutcomeResponse.Outcome\x1a\xa2\x03\n\x07Outcome\x12\x31\n\x0bstate_attrs\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x0e\n\x06reward\x18\x03 \x01(\x02\x12\x33\n\routcome_attrs\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x05 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x42\n\x06status\x18\x06 \x01(\x0e\x32\x32.sight.x.service.GetOutcomeResponse.Outcome.Status\x12\x14\n\x0cresponse_str\x18\x07 \x01(\t\x12\x11\n\taction_id\x18\x08 \x01(\x03\"L\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\r\n\tNOT_EXIST\x10\x04\"\xb3\x02\n\x16\x46inalizeEpisodeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12O\n\x0eoptimizer_type\x18\x05 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12\x32\n\x0b\x61\x63me_config\x18\x06 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"D\n\x17\x46inalizeEpisodeResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12\x13\n\x0bstop_worker\x18\x02 \x01(\x08\" \n\x0bTestRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"I\n\x0cTestResponse\x12\x0b\n\x03val\x18\x01 \x01(\t\x12,\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x0f\n\rCreateRequest\"1\n\x0e\x43reateResponse\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x13\n\x0bpath_prefix\x18\x02 \x01(\t\"!\n\x0c\x43loseRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"%\n\rCloseResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\":\n\x12WorkerAliveRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xa0\x01\n\x13WorkerAliveResponse\x12\x44\n\x0bstatus_type\x18\x04 \x01(\x0e\x32/.sight.x.service.WorkerAliveResponse.StatusType\"C\n\nStatusType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\n\n\x06ST_ACT\x10\x01\x12\x0b\n\x07ST_DONE\x10\x02\x12\x0c\n\x08ST_RETRY\x10\x03*[\n\tLogFormat\x12\x0e\n\nLF_UNKNOWN\x10\x00\x12\x0f\n\x0bLF_COLUMNIO\x10\x01\x12\x10\n\x0cLF_CAPACITOR\x10\x02\x12\x0e\n\nLF_SPANNER\x10\x03\x12\x0b\n\x07LF_AVRO\x10\x04\x32\xd6\x0c\n\x0cSightService\x12\x61\n\x04Test\x12\x1c.sight.x.service.TestRequest\x1a\x1d.sight.x.service.TestResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1/test/{client_id}\x12]\n\x06\x43reate\x12\x1e.sight.x.service.CreateRequest\x1a\x1f.sight.x.service.CreateResponse\"\x12\x82\xd3\xe4\x93\x02\x0c\x12\n/v1/create\x12Y\n\x05\x43lose\x12\x1d.sight.x.service.CloseRequest\x1a\x1e.sight.x.service.CloseResponse\"\x11\x82\xd3\xe4\x93\x02\x0b\x12\t/v1/Close\x12q\n\x0bWorkerAlive\x12#.sight.x.service.WorkerAliveRequest\x1a$.sight.x.service.WorkerAliveResponse\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/v1/WorkerAlive\x12\x89\x01\n\x06Launch\x12\x1e.sight.x.service.LaunchRequest\x1a\x1f.sight.x.service.LaunchResponse\">\x82\xd3\xe4\x93\x02\x38\"\x1e/v1/launch/{client_id}/{label}:\x16\x64\x65\x63ision_config_params\x12\x9f\x01\n\rDecisionPoint\x12%.sight.x.service.DecisionPointRequest\x1a&.sight.x.service.DecisionPointResponse\"?\x82\xd3\xe4\x93\x02\x39\"*/v1/decision_point/{client_id}/{worker_id}:\x0b\x61\x63me_config\x12m\n\x04Tell\x12\x1c.sight.x.service.TellRequest\x1a\x1d.sight.x.service.TellResponse\"(\x82\xd3\xe4\x93\x02\"\" /v1/tell/{client_id}/{worker_id}\x12u\n\x06Listen\x12\x1e.sight.x.service.ListenRequest\x1a\x1f.sight.x.service.ListenResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/listen/{client_id}/{worker_id}\x12\x92\x01\n\rCurrentStatus\x12%.sight.x.service.CurrentStatusRequest\x1a&.sight.x.service.CurrentStatusResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v1/current_status/{client_id}/{worker_id}\x12\xa7\x01\n\x12\x46\x65tchOptimalAction\x12*.sight.x.service.FetchOptimalActionRequest\x1a+.sight.x.service.FetchOptimalActionResponse\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1/fetch_optimal_action/{client_id}/{worker_id}\x12`\n\rProposeAction\x12%.sight.x.service.ProposeActionRequest\x1a&.sight.x.service.ProposeActionResponse\"\x00\x12W\n\nGetOutcome\x12\".sight.x.service.GetOutcomeRequest\x1a#.sight.x.service.GetOutcomeResponse\"\x00\x12\xa7\x01\n\x0f\x46inalizeEpisode\x12\'.sight.x.service.FinalizeEpisodeRequest\x1a(.sight.x.service.FinalizeEpisodeResponse\"A\x82\xd3\xe4\x93\x02;\",/v1/finalize_episode/{client_id}/{worker_id}:\x0b\x61\x63me_configb\x06proto3' + b'\n!sight_service/proto/service.proto\x12\x0fsight.x.service\x1a\x17sight/proto/sight.proto\x1a\x33sight_service/proto/numproto/protobuf/ndarray.proto\x1a\x1cgoogle/api/annotations.proto\"\xa5\x03\n\x0c\x41\x63me_Request\x12G\n\x14\x65pisode_observations\x18\x01 \x03(\x0b\x32).sight.x.service.Acme_Request.Observation\x12\x14\n\x0clearner_keys\x18\x02 \x03(\t\x1a\xfe\x01\n\x0bObservation\x12*\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x38\n\x08steptype\x18\x02 \x01(\x0e\x32&.sight.x.service.Acme_Request.StepType\x12*\n\x06reward\x18\x03 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12,\n\x08\x64iscount\x18\x04 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12/\n\x0bobservation\x18\x05 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\"5\n\x08StepType\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x46IRST\x10\x01\x12\x07\n\x03MID\x10\x02\x12\x08\n\x04LAST\x10\x03\"\x80\x02\n\rAcme_Response\x12\x34\n\x06layers\x18\x01 \x03(\x0b\x32$.sight.x.service.Acme_Response.Layer\x1a\xb8\x01\n\x05Layer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x07weights\x18\x02 \x01(\x0b\x32\x30.sight.x.service.Acme_Response.Layer.WeightsData\x1a^\n\x0bWeightsData\x12\t\n\x01\x62\x18\x01 \x03(\x02\x12%\n\x01w\x18\x02 \x01(\x0b\x32\x1a.numproto.protobuf.NDArray\x12\x0e\n\x06offset\x18\x03 \x03(\x02\x12\r\n\x05scale\x18\x04 \x03(\x02\"\xe0\x01\n\x14\x44\x65\x63isionPointRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\x12\x32\n\x0b\x61\x63me_config\x18\x05 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\"\x9f\x02\n\x15\x44\x65\x63isionPointResponse\x12,\n\x06\x61\x63tion\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x35\n\racme_response\x18\x02 \x01(\x0b\x32\x1e.sight.x.service.Acme_Response\x12\x14\n\x0cresponse_idx\x18\x03 \x01(\x03\x12\x46\n\x0b\x61\x63tion_type\x18\x04 \x01(\x0e\x32\x31.sight.x.service.DecisionPointResponse.ActionType\"C\n\nActionType\x12\x0e\n\nAT_UNKNOWN\x10\x00\x12\n\n\x06\x41T_ACT\x10\x01\x12\x0b\n\x07\x41T_DONE\x10\x02\x12\x0c\n\x08\x41T_RETRY\x10\x03\"A\n\x19\x46\x65tchOptimalActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"2\n\x1a\x46\x65tchOptimalActionResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"5\n\x0bTellRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0bmessage_str\x18\x02 \x01(\t\"$\n\x0cTellResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\"\"\n\rListenRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\">\n\x0eListenResponse\x12\x16\n\x0eresponse_ready\x18\x01 \x01(\x08\x12\x14\n\x0cresponse_str\x18\x02 \x01(\t\"<\n\x14\x43urrentStatusRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xae\x01\n\x15\x43urrentStatusResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12=\n\x06status\x18\x02 \x01(\x0e\x32-.sight.x.service.CurrentStatusResponse.Status\"@\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0f\n\x0bIN_PROGRESS\x10\x01\x12\x0b\n\x07SUCCESS\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\"|\n\rLaunchRequest\x12I\n\x16\x64\x65\x63ision_config_params\x18\x01 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStart\x12\r\n\x05label\x18\x03 \x01(\t\x12\x11\n\tclient_id\x18\x04 \x01(\t\"(\n\x0eLaunchResponse\x12\x16\n\x0e\x64isplay_string\x18\x01 \x01(\t\"\x8f\x01\n\x14ProposeActionRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"*\n\x15ProposeActionResponse\x12\x11\n\taction_id\x18\x01 \x01(\x03\":\n\x11GetOutcomeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\nunique_ids\x18\x02 \x03(\x03\"\xf7\x03\n\x12GetOutcomeResponse\x12<\n\x07outcome\x18\x01 \x03(\x0b\x32+.sight.x.service.GetOutcomeResponse.Outcome\x1a\xa2\x03\n\x07Outcome\x12\x31\n\x0bstate_attrs\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0c\x61\x63tion_attrs\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x0e\n\x06reward\x18\x03 \x01(\x02\x12\x33\n\routcome_attrs\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x05 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x42\n\x06status\x18\x06 \x01(\x0e\x32\x32.sight.x.service.GetOutcomeResponse.Outcome.Status\x12\x14\n\x0cresponse_str\x18\x07 \x01(\t\x12\x11\n\taction_id\x18\x08 \x01(\x03\"L\n\x06Status\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x12\r\n\tNOT_EXIST\x10\x04\"\xfe\x01\n\x16\x46inalizeEpisodeRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\x12O\n\x0eoptimizer_type\x18\x05 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12\x32\n\x0b\x61\x63me_config\x18\x06 \x01(\x0b\x32\x1d.sight.x.service.Acme_Request\x12\x39\n\x11\x64\x65\x63ision_messages\x18\x08 \x03(\x0b\x32\x1e.sight.x.proto.DecisionMessage\"D\n\x17\x46inalizeEpisodeResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\x12\x13\n\x0bstop_worker\x18\x02 \x01(\x08\" \n\x0bTestRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x1b\n\x0cTestResponse\x12\x0b\n\x03val\x18\x01 \x01(\t\"\x0f\n\rCreateRequest\"1\n\x0e\x43reateResponse\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x13\n\x0bpath_prefix\x18\x02 \x01(\t\"!\n\x0c\x43loseRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"%\n\rCloseResponse\x12\x14\n\x0cresponse_str\x18\x01 \x01(\t\":\n\x12WorkerAliveRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x11\n\tworker_id\x18\x02 \x01(\t\"\xdb\x01\n\x13WorkerAliveResponse\x12\x44\n\x0bstatus_type\x18\x04 \x01(\x0e\x32/.sight.x.service.WorkerAliveResponse.StatusType\x12\x39\n\x11\x64\x65\x63ision_messages\x18\x05 \x03(\x0b\x32\x1e.sight.x.proto.DecisionMessage\"C\n\nStatusType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\n\n\x06ST_ACT\x10\x01\x12\x0b\n\x07ST_DONE\x10\x02\x12\x0c\n\x08ST_RETRY\x10\x03*[\n\tLogFormat\x12\x0e\n\nLF_UNKNOWN\x10\x00\x12\x0f\n\x0bLF_COLUMNIO\x10\x01\x12\x10\n\x0cLF_CAPACITOR\x10\x02\x12\x0e\n\nLF_SPANNER\x10\x03\x12\x0b\n\x07LF_AVRO\x10\x04\x32\xd6\x0c\n\x0cSightService\x12\x61\n\x04Test\x12\x1c.sight.x.service.TestRequest\x1a\x1d.sight.x.service.TestResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1/test/{client_id}\x12]\n\x06\x43reate\x12\x1e.sight.x.service.CreateRequest\x1a\x1f.sight.x.service.CreateResponse\"\x12\x82\xd3\xe4\x93\x02\x0c\x12\n/v1/create\x12Y\n\x05\x43lose\x12\x1d.sight.x.service.CloseRequest\x1a\x1e.sight.x.service.CloseResponse\"\x11\x82\xd3\xe4\x93\x02\x0b\x12\t/v1/Close\x12q\n\x0bWorkerAlive\x12#.sight.x.service.WorkerAliveRequest\x1a$.sight.x.service.WorkerAliveResponse\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/v1/WorkerAlive\x12\x89\x01\n\x06Launch\x12\x1e.sight.x.service.LaunchRequest\x1a\x1f.sight.x.service.LaunchResponse\">\x82\xd3\xe4\x93\x02\x38\"\x1e/v1/launch/{client_id}/{label}:\x16\x64\x65\x63ision_config_params\x12\x9f\x01\n\rDecisionPoint\x12%.sight.x.service.DecisionPointRequest\x1a&.sight.x.service.DecisionPointResponse\"?\x82\xd3\xe4\x93\x02\x39\"*/v1/decision_point/{client_id}/{worker_id}:\x0b\x61\x63me_config\x12m\n\x04Tell\x12\x1c.sight.x.service.TellRequest\x1a\x1d.sight.x.service.TellResponse\"(\x82\xd3\xe4\x93\x02\"\" /v1/tell/{client_id}/{worker_id}\x12u\n\x06Listen\x12\x1e.sight.x.service.ListenRequest\x1a\x1f.sight.x.service.ListenResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/listen/{client_id}/{worker_id}\x12\x92\x01\n\rCurrentStatus\x12%.sight.x.service.CurrentStatusRequest\x1a&.sight.x.service.CurrentStatusResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v1/current_status/{client_id}/{worker_id}\x12\xa7\x01\n\x12\x46\x65tchOptimalAction\x12*.sight.x.service.FetchOptimalActionRequest\x1a+.sight.x.service.FetchOptimalActionResponse\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1/fetch_optimal_action/{client_id}/{worker_id}\x12`\n\rProposeAction\x12%.sight.x.service.ProposeActionRequest\x1a&.sight.x.service.ProposeActionResponse\"\x00\x12W\n\nGetOutcome\x12\".sight.x.service.GetOutcomeRequest\x1a#.sight.x.service.GetOutcomeResponse\"\x00\x12\xa7\x01\n\x0f\x46inalizeEpisode\x12\'.sight.x.service.FinalizeEpisodeRequest\x1a(.sight.x.service.FinalizeEpisodeResponse\"A\x82\xd3\xe4\x93\x02;\",/v1/finalize_episode/{client_id}/{worker_id}:\x0b\x61\x63me_configb\x06proto3' ) _LOGFORMAT = DESCRIPTOR.enum_types_by_name['LogFormat'] @@ -434,8 +434,8 @@ _SIGHTSERVICE.methods_by_name['FinalizeEpisode']._options = None _SIGHTSERVICE.methods_by_name[ 'FinalizeEpisode']._serialized_options = b'\202\323\344\223\002;\",/v1/finalize_episode/{client_id}/{worker_id}:\013acme_config' - _LOGFORMAT._serialized_start = 3691 - _LOGFORMAT._serialized_end = 3782 + _LOGFORMAT._serialized_start = 3651 + _LOGFORMAT._serialized_end = 3742 _ACME_REQUEST._serialized_start = 163 _ACME_REQUEST._serialized_end = 584 _ACME_REQUEST_OBSERVATION._serialized_start = 275 @@ -489,27 +489,27 @@ _GETOUTCOMERESPONSE_OUTCOME_STATUS._serialized_start = 2759 _GETOUTCOMERESPONSE_OUTCOME_STATUS._serialized_end = 2835 _FINALIZEEPISODEREQUEST._serialized_start = 2838 - _FINALIZEEPISODEREQUEST._serialized_end = 3145 - _FINALIZEEPISODERESPONSE._serialized_start = 3147 - _FINALIZEEPISODERESPONSE._serialized_end = 3215 - _TESTREQUEST._serialized_start = 3217 - _TESTREQUEST._serialized_end = 3249 - _TESTRESPONSE._serialized_start = 3251 - _TESTRESPONSE._serialized_end = 3324 - _CREATEREQUEST._serialized_start = 3326 - _CREATEREQUEST._serialized_end = 3341 - _CREATERESPONSE._serialized_start = 3343 - _CREATERESPONSE._serialized_end = 3392 - _CLOSEREQUEST._serialized_start = 3394 - _CLOSEREQUEST._serialized_end = 3427 - _CLOSERESPONSE._serialized_start = 3429 - _CLOSERESPONSE._serialized_end = 3466 - _WORKERALIVEREQUEST._serialized_start = 3468 - _WORKERALIVEREQUEST._serialized_end = 3526 - _WORKERALIVERESPONSE._serialized_start = 3529 - _WORKERALIVERESPONSE._serialized_end = 3689 - _WORKERALIVERESPONSE_STATUSTYPE._serialized_start = 3622 - _WORKERALIVERESPONSE_STATUSTYPE._serialized_end = 3689 - _SIGHTSERVICE._serialized_start = 3785 - _SIGHTSERVICE._serialized_end = 5407 + _FINALIZEEPISODEREQUEST._serialized_end = 3092 + _FINALIZEEPISODERESPONSE._serialized_start = 3094 + _FINALIZEEPISODERESPONSE._serialized_end = 3162 + _TESTREQUEST._serialized_start = 3164 + _TESTREQUEST._serialized_end = 3196 + _TESTRESPONSE._serialized_start = 3198 + _TESTRESPONSE._serialized_end = 3225 + _CREATEREQUEST._serialized_start = 3227 + _CREATEREQUEST._serialized_end = 3242 + _CREATERESPONSE._serialized_start = 3244 + _CREATERESPONSE._serialized_end = 3293 + _CLOSEREQUEST._serialized_start = 3295 + _CLOSEREQUEST._serialized_end = 3328 + _CLOSERESPONSE._serialized_start = 3330 + _CLOSERESPONSE._serialized_end = 3367 + _WORKERALIVEREQUEST._serialized_start = 3369 + _WORKERALIVEREQUEST._serialized_end = 3427 + _WORKERALIVERESPONSE._serialized_start = 3430 + _WORKERALIVERESPONSE._serialized_end = 3649 + _WORKERALIVERESPONSE_STATUSTYPE._serialized_start = 3582 + _WORKERALIVERESPONSE_STATUSTYPE._serialized_end = 3649 + _SIGHTSERVICE._serialized_start = 3745 + _SIGHTSERVICE._serialized_end = 5367 # @@protoc_insertion_point(module_scope) diff --git a/sight_service/shared_batch_messages.py b/sight_service/shared_batch_messages.py new file mode 100644 index 0000000..371b74c --- /dev/null +++ b/sight_service/shared_batch_messages.py @@ -0,0 +1,111 @@ +"""A module for refactoring Kokua code.""" + +import dataclasses +import threading +from typing import Any, Dict, Optional + +dataclass = dataclasses.dataclass + + +@dataclass +class DecisionMessage: + """A message that is sent to the agent to make a decision. + + Attributes: + action_params: A dictionary of parameters for the action. + action_id: The ID of the action. + reward: The reward for the action. + discount: The discount for the action. + outcome_params: A dictionary of parameters for the outcome. + """ + + action_params: Dict[str, Any] + action_id: int + reward: Optional[int] = None + discount: Optional[int] = None + outcome_params: Optional[Dict[str, Any]] = None + + +class CachedBatchMessages: + """A class for caching batch messages. + + Attributes: + batch_messages: A dictionary of cached DecisionMessages. + """ + + def __init__(self): + """Initialize the CachedBatchMessages instance.""" + self._lock = threading.Lock() + self.batch_messages: Dict[int, DecisionMessage] = {} + + def all_messages(self) -> Dict[int, DecisionMessage]: + """Return all messages in the cache. + + Returns: + A dictionary of all cached messages. + """ + with self._lock: + return self.batch_messages.copy() + + def get(self, key: int) -> Optional[DecisionMessage]: + """Retrieve a value from the cache. + + Args: + key: The key of the message to retrieve. + + Returns: + The DecisionMessage if found, None otherwise. + """ + with self._lock: + return self.batch_messages.get(key) + + def set(self, key: int, value: DecisionMessage) -> None: + """Set a value in the cache. + + Args: + key: The key to set. + value: The DecisionMessage to store. + """ + with self._lock: + self.batch_messages[key] = value + + def update(self, key: int, **kwargs) -> None: + """Update fields of an existing DecisionMessage in the cache. + + Args: + key: The key of the message to update. + **kwargs: Keyword arguments representing fields to update. + + Raises: + KeyError: If the key is not found in the cache. + AttributeError: If an invalid field is provided. + """ + with self._lock: + decision_message = self.batch_messages.get(key) + if not decision_message: + raise KeyError(f'Key {key} not found in cache.') + + for field_name, field_value in kwargs.items(): + if hasattr(decision_message, field_name): + setattr(decision_message, field_name, field_value) + else: + raise AttributeError( + f'Field {field_name} does not exist in DecisionMessage.') + + # Save the updated object back to the cache + self.batch_messages[key] = decision_message + + def delete(self, key: int) -> None: + """Delete a value from the cache. + + Args: + key: The key of the message to delete. + """ + with self._lock: + if key in self.batch_messages: + del self.batch_messages[key] + + def clear(self) -> None: + """Clear the entire cache.""" + with self._lock: + self.batch_messages.clear() diff --git a/sight_service/single_action_optimizer.py b/sight_service/single_action_optimizer.py index e356771..2ef3852 100644 --- a/sight_service/single_action_optimizer.py +++ b/sight_service/single_action_optimizer.py @@ -24,6 +24,7 @@ from sight_service.message_queue import MessageQueue from sight_service.optimizer_instance import OptimizerInstance from sight_service.proto import service_pb2 +from sight_service.shared_batch_messages import CachedBatchMessages _file_name = "single_action_optimizer.py" @@ -50,22 +51,23 @@ def create(cls, action, attributes, reward=None, outcome=None): def update(self, reward=None, outcome=None, action=None, attributes=None): if reward is not None: - self.reward = reward + self.reward = reward if outcome is not None: - self.outcome = outcome + self.outcome = outcome if action is not None: - self.action = action + self.action = action if attributes is not None: - self.attributes = attributes + self.attributes = attributes return self def __str__(self): - return (f"[X]") - # (f"MessageDetails(\n" - # f"action: {self.action},\n" - # f"attributes: {self.attributes},\n" - # f"reward: {self.reward},\n" - # f"outcome: {self.outcome}\n)") + return (f"[X]") + # (f"MessageDetails(\n" + # f"action: {self.action},\n" + # f"attributes: {self.attributes},\n" + # f"reward: {self.reward},\n" + # f"outcome: {self.outcome}\n)") + class SingleActionOptimizer(OptimizerInstance): """An SingleActionOptimizer class that is generic for all optimizers. @@ -76,4 +78,6 @@ class SingleActionOptimizer(OptimizerInstance): def __init__(self): super().__init__() - self.queue: IMessageQueue = MessageQueue[MessageDetails](id_generator=IncrementalUUID()) + self.queue: IMessageQueue = MessageQueue[MessageDetails]( + id_generator=IncrementalUUID()) + self.cache: CachedBatchMessages = CachedBatchMessages() diff --git a/sight_service/tests/functional/test_shared_batch_messages.py b/sight_service/tests/functional/test_shared_batch_messages.py new file mode 100644 index 0000000..033ada6 --- /dev/null +++ b/sight_service/tests/functional/test_shared_batch_messages.py @@ -0,0 +1,98 @@ +"""Tests for CachedBatchMessages.""" + +import unittest + +from sight_service import shared_batch_messages +from sight_service.tests import colorful_tests + + +class TestCachedBatchMessages(unittest.TestCase): + """Tests for the CachedBatchMessages class. + + Attributes: + cache: The CachedBatchMessages object to test. + """ + + def setUp(self): + """Sets up the CachedBatchMessages object for testing.""" + super().setUp() + self.cache = shared_batch_messages.CachedBatchMessages() + + def test_set_and_get_message(self): + msg = shared_batch_messages.DecisionMessage(action_params={"key": "value"}, + action_id=1) + self.cache.set(1, msg) + + retrieved_msg = self.cache.get(1) + self.assertIsNotNone(retrieved_msg) + self.assertEqual(retrieved_msg.action_id, 1) + + def test_get_nonexistent_message(self): + self.assertIsNone(self.cache.get(999)) + + def test_update_message(self): + msg = shared_batch_messages.DecisionMessage(action_params={"key": "value"}, + action_id=1) + self.cache.set(1, msg) + + self.cache.update(1, reward=20, discount=5) + updated_msg = self.cache.get(1) + + self.assertEqual(updated_msg.reward, 20) + self.assertEqual(updated_msg.discount, 5) + + def test_update_nonexistent_message(self): + with self.assertRaises(KeyError): + self.cache.update(999, reward=20) + + def test_update_invalid_field(self): + msg = shared_batch_messages.DecisionMessage(action_params={"key": "value"}, + action_id=1) + self.cache.set(1, msg) + + with self.assertRaises(AttributeError): + self.cache.update(1, invalid_field="value") + + def test_delete_message(self): + msg = shared_batch_messages.DecisionMessage(action_params={"key": "value"}, + action_id=1) + self.cache.set(1, msg) + + self.cache.delete(1) + self.assertIsNone(self.cache.get(1)) + + def test_delete_nonexistent_message(self): + # Deleting a non-existent message should not raise an error + self.cache.delete(999) + + def test_all_messages(self): + """Tests that all_messages returns all messages in the cache.""" + msg1 = shared_batch_messages.DecisionMessage( + action_params={"key1": "value1"}, action_id=1) + msg2 = shared_batch_messages.DecisionMessage( + action_params={"key2": "value2"}, action_id=2) + + self.cache.set(1, msg1) + self.cache.set(2, msg2) + + all_messages = self.cache.all_messages() + self.assertEqual(len(all_messages), 2) + self.assertEqual(all_messages[1].action_id, 1) + self.assertEqual(all_messages[2].action_id, 2) + + def test_clear_cache(self): + """Tests that clear_cache clears the cache.""" + msg1 = shared_batch_messages.DecisionMessage( + action_params={"key1": "value1"}, action_id=1) + msg2 = shared_batch_messages.DecisionMessage( + action_params={"key2": "value2"}, action_id=2) + + self.cache.set(1, msg1) + self.cache.set(2, msg2) + + self.cache.clear() + self.assertEqual(len(self.cache.all_messages()), 0) + + +if __name__ == "__main__": + unittest.main(testRunner=colorful_tests.ColorfulTestRunner()) diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index 9d3cd64..334ddd3 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -48,7 +48,7 @@ def __init__(self): def add_outcome_to_outcome_response( self, msg_details: MessageDetails, sample_id, - outcome: service_pb2.GetOutcomeResponse.outcome): + outcome: service_pb2.GetOutcomeResponse.Outcome): outcome.action_id = sample_id outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.COMPLETED outcome.reward = msg_details.reward @@ -156,21 +156,16 @@ def finalize_episode( logging.info("self.queue => %s", self.queue) - all_active_messages = self.queue.get_active() - - active_messages: Dict[str, MessageDetails] = all_active_messages[ - request.worker_id] - - for action_id, message in list(active_messages.items()): + for i in range(len(request.decision_messages)): self.queue.complete_message( - message_id=action_id, worker_id=request.worker_id, + message_id=request.decision_messages[i].action_id, update_fn=lambda msg: msg.update( - reward=request.decision_outcome.reward, - outcome=convert_proto_to_dict(proto=request.decision_outcome. - outcome_params), - action=convert_proto_to_dict(proto=request.decision_point. - choice_params))) + reward=request.decision_messages[i].decision_outcome.reward, + outcome=convert_proto_to_dict(proto=request.decision_messages[i]. + decision_outcome.outcome_params), + action=convert_proto_to_dict(proto=request.decision_messages[i]. + decision_point.choice_params))) logging.info("self.queue => %s", self.queue) logging.debug("<<<< Out %s of %s", method_name, _file_name) @@ -210,19 +205,25 @@ def WorkerAlive( ) -> service_pb2.WorkerAliveResponse: method_name = "WorkerAlive" logging.debug(">>>> In %s of %s", method_name, _file_name) - logging.info("self.queue => %s", self.queue) + response = service_pb2.WorkerAliveResponse() + if (self.exp_completed): worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_DONE elif (not self.queue.get_status()["pending"]): worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_RETRY else: worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT - - self.queue.create_active_batch(worker_id=request.worker_id) - + batched_msgs = self.queue.create_active_batch(worker_id=request.worker_id, + new_batch_size=2) + for action_id, msg in batched_msgs.items(): + decision_message = response.decision_messages.add() + decision_message.action_id = action_id + decision_message.action.CopyFrom(convert_dict_to_proto(dict=msg.action)) + + response.status_type = worker_alive_status logging.info("self.queue => %s", self.queue) logging.info("worker_alive_status is %s", worker_alive_status) logging.debug("<<<< Out %s of %s", method_name, _file_name) - return service_pb2.WorkerAliveResponse(status_type=worker_alive_status) + return response From 738f13b497a7c82d50f18d52a42a4cd2562197c4 Mon Sep 17 00:00:00 2001 From: hrushikeshm-g Date: Wed, 11 Dec 2024 05:55:13 +0000 Subject: [PATCH 24/25] msg queue __str__ changed --- fvs_sight/fvs_api.py | 2 +- fvs_sight/fvs_sight_worker.py | 6 + py/sight/widgets/decision/decision.py | 4 +- .../single_action_optimizer_client.py | 2 + sight_service/message_queue.py | 110 +++++++++--------- sight_service/single_action_optimizer.py | 2 - sight_service/worklist_scheduler_opt.py | 15 ++- 7 files changed, 76 insertions(+), 65 deletions(-) diff --git a/fvs_sight/fvs_api.py b/fvs_sight/fvs_api.py index 4cd8eca..142b87e 100644 --- a/fvs_sight/fvs_api.py +++ b/fvs_sight/fvs_api.py @@ -56,7 +56,7 @@ def get_action_attrs(): """Returns the action attributes for the FVS action. """ action_config = {'region': None, 'project_id': None} - action_config.update(expand_params_for_cycles(fvs_params=FVS_PARAMS)) + # action_config.update(expand_params_for_cycles(fvs_params=FVS_PARAMS)) return create_attr_props(action_config) diff --git a/fvs_sight/fvs_sight_worker.py b/fvs_sight/fvs_sight_worker.py index 57e7204..4ac98f4 100644 --- a/fvs_sight/fvs_sight_worker.py +++ b/fvs_sight/fvs_sight_worker.py @@ -1,5 +1,7 @@ import json import os +import random +import time from typing import Sequence from absl import app @@ -21,6 +23,10 @@ def simulate_fvs(sight, params_dict): 227.6, 273.4, 273.3, 248.6, 165.3, 130.6, 106.4, 92.1, 81.7, 62.8 ] sim_stream = pd.Series(mitigation_list) + rand_number = 5 + # random.randint(60,120) + time.sleep(rand_number) + print(f'sleeping for some time he he 😪 , {rand_number}') # print(sim_stream) return sim_stream diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index 37bbcdf..9c8e572 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -39,7 +39,7 @@ # from sight.widgets.decision.acme.acme_optimizer_client import ( # AcmeOptimizerClient # ) -from sight.widgets.decision.env_driver import driver_fn +# from sight.widgets.decision.env_driver import driver_fn from sight.widgets.decision.llm_optimizer_client import LLMOptimizerClient from sight.widgets.decision.single_action_optimizer_client import ( SingleActionOptimizerClient @@ -844,7 +844,7 @@ def execute_local_training(sight, decision_configuration, driver_fn, env): sight, ) # not used anymore - for worklist scheduler - # num_samples_to_run = _NUM_TRIALS.value + num_samples_to_run = _NUM_TRIALS.value if FLAGS.deployment_mode == 'docker_local': trials.start_job_in_docker( diff --git a/py/sight/widgets/decision/single_action_optimizer_client.py b/py/sight/widgets/decision/single_action_optimizer_client.py index b977093..e43cbea 100644 --- a/py/sight/widgets/decision/single_action_optimizer_client.py +++ b/py/sight/widgets/decision/single_action_optimizer_client.py @@ -23,6 +23,7 @@ from sight.utils.proto_conversion import update_proto_map from sight.widgets.decision.optimizer_client import OptimizerClient from sight_service.proto import service_pb2 +from sight_service.shared_batch_messages import CachedBatchMessages class SingleActionOptimizerClient(OptimizerClient): @@ -34,6 +35,7 @@ def __init__( sight, algorithm=None): super().__init__(optimizer_type) + self.cache: CachedBatchMessages = CachedBatchMessages() self._sight = sight self._last_action = None self.exp_completed = False diff --git a/sight_service/message_queue.py b/sight_service/message_queue.py index 6ed9397..a551bc2 100644 --- a/sight_service/message_queue.py +++ b/sight_service/message_queue.py @@ -92,9 +92,9 @@ def push_message(self, message: T) -> ID: """ ... - def create_active_batch( - self, worker_id: str, new_batch_size: Optional[int] = None - ) -> Dict[ID, T]: + def create_active_batch(self, + worker_id: str, + new_batch_size: Optional[int] = None) -> Dict[ID, T]: """Move a batch of messages for a given worker into active list. Args: @@ -107,9 +107,10 @@ def create_active_batch( """ ... - def complete_message( - self, message_id: ID, worker_id: str, update_fn: Callable[[T], T] = None - ) -> None: + def complete_message(self, + message_id: ID, + worker_id: str, + update_fn: Callable[[T], T] = None) -> None: """Completes a message of the given message ID of the given worker it moves it to the completed queue. Args: @@ -129,32 +130,33 @@ def get_all_messages(self) -> Dict[str, Dict[ID, T]]: ... def get_pending(self) -> Dict[ID, T]: - """Returns all pending messages in the queue.""" - ... + """Returns all pending messages in the queue.""" + ... def get_active(self) -> Dict[str, Dict[ID, T]]: - """Returns all active messages in the queue.""" - ... + """Returns all active messages in the queue.""" + ... def get_completed(self) -> Dict[ID, T]: - """Returns all completed messages in the queue.""" - ... + """Returns all completed messages in the queue.""" + ... def find_message_location(self, message_id: ID) -> MessageState: """Returns the location of the message in the message queue.""" ... def is_message_in_pending(self, message_id: ID) -> bool: - """Checks if the message is in the pending state.""" - ... + """Checks if the message is in the pending state.""" + ... def is_message_in_active(self, message_id: ID) -> bool: - """Checks if the message is in the active state.""" - ... + """Checks if the message is in the active state.""" + ... def is_message_in_completed(self, message_id: ID) -> bool: - """Checks if the message is in the completed state.""" - ... + """Checks if the message is in the completed state.""" + ... + class MessageQueue(IMessageQueue[T]): """A message queue is a data structure that stores messages. @@ -211,20 +213,25 @@ def __init__( self.completed_lock = lock_factory() def __str__(self) -> str: - all_messages = self.get_all_messages() - + # all_messages = self.get_all_messages() + messages_status = self.get_status() result = ['MessageQueue:'] result.append(' Pending Messages:') - for msg_id, message in all_messages['pending'].items(): - result.append(f' ID: {msg_id}, Message: {message}') + result.append(f' Messages 📩 : {messages_status["pending"]}') + # for msg_id, message in all_messages['pending'].items(): + # result.append(f' ID: {msg_id}, Message: {message}') result.append(' Active Messages:') - for msg_id, message in all_messages['active'].items(): - result.append(f' ID: {msg_id}, Message: {message}') + result.append(f' Messages 📨 : {messages_status["active"]}') + + for worker_id, messages in self.get_active().items(): + result.append(f' ID: {worker_id}, Messages 📨: {len(messages)}') result.append(' Completed Messages:') - for msg_id, message in all_messages['completed'].items(): - result.append(f' ID: {msg_id}, Message: {message}') + result.append(f' Messages ✉️ : {messages_status["completed"]}') + + # for msg_id, message in all_messages['completed'].items(): + # result.append(f' ID: {msg_id}, Message: {message}') return '\n'.join(result) @@ -244,9 +251,9 @@ def push_message(self, message: T) -> ID: return unique_id @overrides - def create_active_batch( - self, worker_id: str, new_batch_size: Optional[int] = None - ) -> Dict[ID, T]: + def create_active_batch(self, + worker_id: str, + new_batch_size: Optional[int] = None) -> Dict[ID, T]: """Move a batch of messages for a given worker into active list. Args: @@ -257,9 +264,8 @@ def create_active_batch( Returns: A dictionary of messages that were processed, keyed by message ID. """ - batch_size = ( - new_batch_size if new_batch_size is not None else self.batch_size - ) + batch_size = (new_batch_size + if new_batch_size is not None else self.batch_size) batch: Dict[ID, T] = {} with self.pending_lock.gen_wlock(): @@ -276,9 +282,10 @@ def create_active_batch( return batch @overrides - def complete_message( - self, message_id: ID, worker_id: str, update_fn: Callable[[T], T] = None - ) -> None: + def complete_message(self, + message_id: ID, + worker_id: str, + update_fn: Callable[[T], T] = None) -> None: """Completes a message of the given message ID of the given worker it moves it to the completed queue. Args: @@ -292,9 +299,9 @@ def complete_message( del self.active[worker_id][message_id] if update_fn is not None: - logging.info('Before update_fn msg: %s', message) - message = update_fn(message) # Apply the lambda to update the message - logging.info('After update_fn msg: %s', message) + logging.info('Before update_fn msg: %s', message) + message = update_fn(message) # Apply the lambda to update the message + logging.info('After update_fn msg: %s', message) with self.completed_lock.gen_wlock(): self.completed[message_id] = message @@ -337,38 +344,37 @@ def get_all_messages(self) -> Dict[str, Any]: @overrides def get_pending(self) -> Dict[ID, T]: - """Returns all pending messages in the queue.""" - with self.pending_lock.gen_rlock(): - return copy.copy(self.pending) + """Returns all pending messages in the queue.""" + with self.pending_lock.gen_rlock(): + return copy.copy(self.pending) @overrides def get_active(self) -> Dict[str, Dict[ID, T]]: - """Returns all active messages in the queue.""" - with self.active_lock.gen_rlock(): - return copy.copy(self.active) + """Returns all active messages in the queue.""" + with self.active_lock.gen_rlock(): + return copy.copy(self.active) @overrides def get_completed(self) -> Dict[ID, T]: - """Returns all completed messages in the queue.""" - with self.completed_lock.gen_rlock(): - return copy.copy(self.completed) - + """Returns all completed messages in the queue.""" + with self.completed_lock.gen_rlock(): + return copy.copy(self.completed) @overrides - def is_message_in_pending(self,message_id: ID) -> bool: + def is_message_in_pending(self, message_id: ID) -> bool: """Returns the true if the message in the pending queue.""" with self.pending_lock.gen_rlock(): return message_id in self.pending @overrides - def is_message_in_active(self,message_id: ID) -> bool: + def is_message_in_active(self, message_id: ID) -> bool: """Returns the true if the message in the active queue.""" with self.active_lock.gen_rlock(): for _, messages in self.active.items(): return message_id in messages @overrides - def is_message_in_completed(self,message_id: ID) -> bool: + def is_message_in_completed(self, message_id: ID) -> bool: """Returns the true if the message in the completed queue.""" with self.completed_lock.gen_rlock(): return message_id in self.completed @@ -378,7 +384,7 @@ def find_message_location(self, message_id: ID) -> MessageState: """Returns the location of the message in the message queue.""" with self.pending_lock.gen_rlock(): if message_id in self.pending: - return MessageState.PENDING + return MessageState.PENDING with self.active_lock.gen_rlock(): for _, messages in self.active.items(): diff --git a/sight_service/single_action_optimizer.py b/sight_service/single_action_optimizer.py index 2ef3852..e648464 100644 --- a/sight_service/single_action_optimizer.py +++ b/sight_service/single_action_optimizer.py @@ -24,7 +24,6 @@ from sight_service.message_queue import MessageQueue from sight_service.optimizer_instance import OptimizerInstance from sight_service.proto import service_pb2 -from sight_service.shared_batch_messages import CachedBatchMessages _file_name = "single_action_optimizer.py" @@ -80,4 +79,3 @@ def __init__(self): super().__init__() self.queue: IMessageQueue = MessageQueue[MessageDetails]( id_generator=IncrementalUUID()) - self.cache: CachedBatchMessages = CachedBatchMessages() diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index 334ddd3..ffd50a1 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -82,9 +82,8 @@ def propose_action( unique_id = self.queue.push_message(message) - logging.info("self.queue => %s", self.queue) - response = service_pb2.ProposeActionResponse(action_id=unique_id) + logging.info("self.queue => %s", self.queue) return response @overrides @@ -92,8 +91,6 @@ def GetOutcome( self, request: service_pb2.GetOutcomeRequest) -> service_pb2.GetOutcomeResponse: - logging.info('self.queue => %s', self.queue) - all_completed_messages = self.queue.get_completed() response = service_pb2.GetOutcomeResponse() @@ -123,6 +120,7 @@ def GetOutcome( else: outcome.status = service_pb2.GetOutcomeResponse.Outcome.Status.NOT_EXIST outcome.response_str = f'!! requested sample Id {sample_id} does not exist !!' + logging.info('self.queue => %s', self.queue) return response @overrides @@ -131,7 +129,6 @@ def decision_point( ) -> service_pb2.DecisionPointResponse: method_name = "decision_point" logging.debug(">>>> In %s of %s", method_name, _file_name) - logging.info('self.queue ==> %s', self.queue) all_active_messages = self.queue.get_active() @@ -145,6 +142,7 @@ def decision_point( response.action.CopyFrom(convert_dict_to_proto(dict=next_action)) response.action_type = service_pb2.DecisionPointResponse.ActionType.AT_ACT logging.debug("<<<< Out %s of %s", method_name, _file_name) + logging.info('self.queue ==> %s', self.queue) return response @overrides @@ -154,7 +152,7 @@ def finalize_episode( method_name = "finalize_episode" logging.debug(">>>> In %s of %s", method_name, _file_name) - logging.info("self.queue => %s", self.queue) + # logging.info("self.queue => %s", self.queue) for i in range(len(request.decision_messages)): self.queue.complete_message( @@ -166,6 +164,7 @@ def finalize_episode( decision_outcome.outcome_params), action=convert_proto_to_dict(proto=request.decision_messages[i]. decision_point.choice_params))) + logging.info("self.queue => %s", self.queue) logging.debug("<<<< Out %s of %s", method_name, _file_name) @@ -205,7 +204,7 @@ def WorkerAlive( ) -> service_pb2.WorkerAliveResponse: method_name = "WorkerAlive" logging.debug(">>>> In %s of %s", method_name, _file_name) - logging.info("self.queue => %s", self.queue) + # logging.info("self.queue => %s", self.queue) response = service_pb2.WorkerAliveResponse() @@ -216,7 +215,7 @@ def WorkerAlive( else: worker_alive_status = service_pb2.WorkerAliveResponse.StatusType.ST_ACT batched_msgs = self.queue.create_active_batch(worker_id=request.worker_id, - new_batch_size=2) + new_batch_size=10) for action_id, msg in batched_msgs.items(): decision_message = response.decision_messages.add() decision_message.action_id = action_id From 2155617e1b669b08232c8e2b191f9128f5b891a9 Mon Sep 17 00:00:00 2001 From: Hrushikesh Makode <152846252+hrushikeshm-g@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:34:17 +0000 Subject: [PATCH 25/25] Batch action analysis updates (#70) * analysis changes * analysis.ipynb added * propose_action updated to async method * little refactor * batch_size from flag added * little refactor * pr comments --- fvs_sight/fvs_sight_worker.py | 7 +- py/exp/analysis.ipynb | 3102 ++++++++++++++++++++++ py/sight/demo/portfolio_demo.py | 212 +- py/sight/proto/sight.proto | 1 + py/sight/proto/sight_pb2.py | 112 +- py/sight/utility.py | 2 +- py/sight/widgets/decision/decision.py | 8 +- py/sight/widgets/decision/proposal.py | 38 +- sight_service/message_queue.py | 61 + sight_service/proto/api_descriptor.pb | Bin 146455 -> 146569 bytes sight_service/service_root.py | 4 +- sight_service/single_action_optimizer.py | 4 +- sight_service/worklist_scheduler_opt.py | 13 +- 13 files changed, 3343 insertions(+), 221 deletions(-) create mode 100644 py/exp/analysis.ipynb diff --git a/fvs_sight/fvs_sight_worker.py b/fvs_sight/fvs_sight_worker.py index 4ac98f4..7be25c9 100644 --- a/fvs_sight/fvs_sight_worker.py +++ b/fvs_sight/fvs_sight_worker.py @@ -23,10 +23,9 @@ def simulate_fvs(sight, params_dict): 227.6, 273.4, 273.3, 248.6, 165.3, 130.6, 106.4, 92.1, 81.7, 62.8 ] sim_stream = pd.Series(mitigation_list) - rand_number = 5 - # random.randint(60,120) - time.sleep(rand_number) - print(f'sleeping for some time he he 😪 , {rand_number}') + simulation_time = 10 + time.sleep(simulation_time) + print(f'sleeping for some time he he 😪 , {simulation_time}') # print(sim_stream) return sim_stream diff --git a/py/exp/analysis.ipynb b/py/exp/analysis.ipynb new file mode 100644 index 0000000..acab6d7 --- /dev/null +++ b/py/exp/analysis.ipynb @@ -0,0 +1,3102 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from google.cloud import storage\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import json\n", + "from datetime import datetime\n", + "from urllib.parse import urlparse\n", + "import plotly.express as px\n", + "import seaborn as sns\n", + "\n", + "\n", + "\n", + "def read_logs_from_gs_uri(gs_uri):\n", + " \"\"\"\n", + " Read logs from a Google Cloud Storage file given a gs:// URI.\n", + " \"\"\"\n", + " # Parse the gs:// URI\n", + " parsed = urlparse(gs_uri)\n", + " if parsed.scheme != \"gs\":\n", + " raise ValueError(f\"Invalid URI scheme: {gs_uri}. Must start with 'gs://'.\")\n", + "\n", + " bucket_name = parsed.netloc\n", + " file_path = parsed.path.lstrip('/')\n", + "\n", + " # Initialize GCS client\n", + " client = storage.Client()\n", + " bucket = client.bucket(bucket_name)\n", + " blob = bucket.blob(file_path)\n", + " \n", + " # Download the JSON content as a string\n", + " logs_json = blob.download_as_string()\n", + "\n", + " # Parse the JSON string into a list of dictionaries\n", + " logs = json.loads(logs_json)\n", + " return logs\n", + "\n", + "def analyze_message_flow(logs,desc = ''):\n", + " \n", + " # Create a DataFrame from the logs\n", + " df = pd.DataFrame(logs)\n", + "\n", + " # Convert the 'timestamp' column to datetime type\n", + " df['timestamp'] = pd.to_datetime(df['timestamp'])\n", + "\n", + " # Sort by 'timestamp' to maintain proper order\n", + " df = df.sort_values('timestamp')\n", + "\n", + " # Create an interactive scatter plot for the message states over time\n", + " fig = px.line(\n", + " df,\n", + " x='timestamp',\n", + " y='message_id',\n", + " color='state', # Color by the state to differentiate states visually\n", + " title= desc + \"Plot for Message Transitions Over Time\",\n", + " labels={\"timestamp\": \"Time\", \"message id\": \"Message_ID\"},\n", + " hover_data=[\"message_id\"],\n", + " template=\"plotly_dark\", # Optional: dark theme\n", + " markers=True\n", + " )\n", + "\n", + " # Customize layout for better readability\n", + " fig.update_layout(\n", + " hovermode=\"closest\",\n", + " xaxis=dict(showgrid=True, title=\"Timestamp\"),\n", + " yaxis=dict(showgrid=True, title=\"State\"),\n", + " margin=dict(t=40, b=40, l=40, r=40), # Adjust margins to make space for labels\n", + " )\n", + " \n", + " # Show the plot\n", + " fig.show()\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def calculate_latency_per_messages(logs):\n", + " df = pd.DataFrame(logs)\n", + " df['timestamp'] = pd.to_datetime(df['timestamp']) # Convert to datetime\n", + " pending_times = df[df['state'] == 'pending'].set_index('message_id')['timestamp']\n", + " completed_times = df[df['state'] == 'completed'].set_index('message_id')['timestamp']\n", + " latency = completed_times - pending_times\n", + " latency_df = latency.reset_index(name='latency')\n", + " latency_df['latency_seconds'] = latency_df['latency'].dt.total_seconds()\n", + " # Summary statistics\n", + " # stats = latency_df['latency_seconds'].describe()\n", + " # print(stats)\n", + " return latency_df\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "def calculate_throughput(logs, time_interval='10S'):\n", + " \"\"\"\n", + " Calculate throughput (tasks completed per time interval) from a list of logs.\n", + " \n", + " Args:\n", + " logs (list): List of dictionaries containing log data. Each log should include 'timestamp' and 'state'.\n", + " time_interval (str): Pandas offset string for time intervals (e.g., '1T' for 1 minute).\n", + " \n", + " Returns:\n", + " pd.DataFrame: DataFrame with time intervals and corresponding throughput.\n", + " \"\"\"\n", + " # Convert logs to a DataFrame\n", + " df = pd.DataFrame(logs)\n", + " \n", + " # Ensure 'timestamp' is in datetime format\n", + " df['timestamp'] = pd.to_datetime(df['timestamp'])\n", + " \n", + " # Filter logs for the 'completed' state\n", + " completed_logs = df[df['state'] == 'completed']\n", + " \n", + " # Calculate throughput by resampling based on time intervals\n", + " throughput = completed_logs.set_index('timestamp').resample(time_interval).size()\n", + " \n", + " print(f\"throughput => {throughput}\")\n", + " \n", + " # Reset index and rename the throughput column\n", + " return throughput.reset_index(name='throughput')\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import plotly.graph_objects as go\n", + "\n", + "def plot_throughput_trends(throughput_df, desc=''):\n", + " \"\"\"\n", + " Plots interactive throughput trends over time using Plotly.\n", + "\n", + " Args:\n", + " throughput_df (pd.DataFrame): DataFrame with `timestamp` and `throughput` columns.\n", + " desc (str): Description or title prefix for the plot.\n", + " \"\"\"\n", + " fig = go.Figure()\n", + "\n", + " # Add throughput trace\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=throughput_df['timestamp'],\n", + " y=throughput_df['throughput'],\n", + " mode='lines+markers',\n", + " marker=dict(color='green'),\n", + " line=dict(color='green'),\n", + " name='Throughput'\n", + " )\n", + " )\n", + "\n", + " # Update layout\n", + " fig.update_layout(\n", + " title=f'{desc} Plot for Throughput Trends Over Time',\n", + " xaxis_title='Time',\n", + " yaxis_title='Tasks Completed (Throughput)',\n", + " template='plotly_dark',\n", + " legend=dict(title='Legend'),\n", + " xaxis=dict(showgrid=True),\n", + " yaxis=dict(showgrid=True),\n", + " )\n", + "\n", + " fig.show()\n", + "\n", + "def plot_latency_trends(latency_df, desc=''):\n", + " \"\"\"\n", + " Plots interactive latency trends over message IDs using Plotly.\n", + "\n", + " Args:\n", + " latency_df (pd.DataFrame): DataFrame with `message_id` and `latency_seconds` columns.\n", + " desc (str): Description or title prefix for the plot.\n", + " \"\"\"\n", + " fig = go.Figure()\n", + "\n", + " # Add latency trace\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=latency_df['message_id'],\n", + " y=latency_df['latency_seconds'],\n", + " mode='lines+markers',\n", + " marker=dict(color='blue'),\n", + " line=dict(color='blue'),\n", + " name='Latency'\n", + " )\n", + " )\n", + "\n", + " # Update layout\n", + " fig.update_layout(\n", + " title=f'{desc} Plot for Latency Trends Over Time',\n", + " xaxis_title='Message ID',\n", + " yaxis_title='Latency (seconds)',\n", + " template='plotly_dark',\n", + " legend=dict(title='Legend'),\n", + " xaxis=dict(showgrid=True),\n", + " yaxis=dict(showgrid=True),\n", + " )\n", + "\n", + " fig.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "hovertemplate": "state=pending
Time=%{x}
message_id=%{y}", + "legendgroup": "pending", + "line": { + "color": "#636efa", + "dash": "solid" + }, + "marker": { + "symbol": "circle" + }, + "mode": "lines+markers", + "name": "pending", + "orientation": "v", + "showlegend": true, + "type": "scatter", + "x": [ + "2025-01-08T16:12:16.149913", + "2025-01-08T16:12:16.159763", + "2025-01-08T16:12:16.316935", + "2025-01-08T16:12:16.334822", + "2025-01-08T16:12:16.339846", + "2025-01-08T16:12:16.356698", + "2025-01-08T16:12:16.358289", + "2025-01-08T16:12:16.387182", + "2025-01-08T16:12:16.391966", + "2025-01-08T16:12:16.404037" + ], + "xaxis": "x", + "y": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10 + ], + "yaxis": "y" + }, + { + "hovertemplate": "state=active
Time=%{x}
message_id=%{y}", + "legendgroup": "active", + "line": { + "color": "#EF553B", + "dash": "solid" + }, + "marker": { + "symbol": "circle" + }, + "mode": "lines+markers", + "name": "active", + "orientation": "v", + "showlegend": true, + "type": "scatter", + "x": [ + "2025-01-08T16:12:41.143474", + "2025-01-08T16:12:41.143484", + "2025-01-08T16:12:41.143488", + "2025-01-08T16:12:41.143491", + "2025-01-08T16:12:41.143494", + "2025-01-08T16:13:33.548054", + "2025-01-08T16:13:33.548110", + "2025-01-08T16:13:33.548115", + "2025-01-08T16:13:33.548118", + "2025-01-08T16:13:33.548121" + ], + "xaxis": "x", + "y": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10 + ], + "yaxis": "y" + }, + { + "hovertemplate": "state=completed
Time=%{x}
message_id=%{y}", + "legendgroup": "completed", + "line": { + "color": "#00cc96", + "dash": "solid" + }, + "marker": { + "symbol": "circle" + }, + "mode": "lines+markers", + "name": "completed", + "orientation": "v", + "showlegend": true, + "type": "scatter", + "x": [ + "2025-01-08T16:13:32.500320", + "2025-01-08T16:13:32.500639", + "2025-01-08T16:13:32.501018", + "2025-01-08T16:13:32.501279", + "2025-01-08T16:13:32.501498", + "2025-01-08T16:14:24.761279", + "2025-01-08T16:14:24.761518", + "2025-01-08T16:14:24.761727", + "2025-01-08T16:14:24.761980", + "2025-01-08T16:14:24.762229" + ], + "xaxis": "x", + "y": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10 + ], + "yaxis": "y" + } + ], + "layout": { + "hovermode": "closest", + "legend": { + "title": { + "text": "state" + }, + "tracegroupgap": 0 + }, + "margin": { + "b": 40, + "l": 40, + "r": 40, + "t": 40 + }, + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#f2f5fa" + }, + "error_y": { + "color": "#f2f5fa" + }, + "marker": { + "line": { + "color": "rgb(17,17,17)", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "rgb(17,17,17)", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#A2B1C6", + "gridcolor": "#506784", + "linecolor": "#506784", + "minorgridcolor": "#506784", + "startlinecolor": "#A2B1C6" + }, + "baxis": { + "endlinecolor": "#A2B1C6", + "gridcolor": "#506784", + "linecolor": "#506784", + "minorgridcolor": "#506784", + "startlinecolor": "#A2B1C6" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "marker": { + "line": { + "color": "#283442" + } + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "line": { + "color": "#283442" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#506784" + }, + "line": { + "color": "rgb(17,17,17)" + } + }, + "header": { + "fill": { + "color": "#2a3f5f" + }, + "line": { + "color": "rgb(17,17,17)" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#f2f5fa", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#f2f5fa" + }, + "geo": { + "bgcolor": "rgb(17,17,17)", + "lakecolor": "rgb(17,17,17)", + "landcolor": "rgb(17,17,17)", + "showlakes": true, + "showland": true, + "subunitcolor": "#506784" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "dark" + }, + "paper_bgcolor": "rgb(17,17,17)", + "plot_bgcolor": "rgb(17,17,17)", + "polar": { + "angularaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "bgcolor": "rgb(17,17,17)", + "radialaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + }, + "yaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + }, + "zaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + } + }, + "shapedefaults": { + "line": { + "color": "#f2f5fa" + } + }, + "sliderdefaults": { + "bgcolor": "#C8D4E3", + "bordercolor": "rgb(17,17,17)", + "borderwidth": 1, + "tickwidth": 0 + }, + "ternary": { + "aaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "baxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "bgcolor": "rgb(17,17,17)", + "caxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "updatemenudefaults": { + "bgcolor": "#506784", + "borderwidth": 0 + }, + "xaxis": { + "automargin": true, + "gridcolor": "#283442", + "linecolor": "#506784", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "#283442", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "#283442", + "linecolor": "#506784", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "#283442", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "okPlot for Message Transitions Over Time" + }, + "xaxis": { + "anchor": "y", + "domain": [ + 0, + 1 + ], + "showgrid": true, + "title": { + "text": "Timestamp" + } + }, + "yaxis": { + "anchor": "x", + "domain": [ + 0, + 1 + ], + "showgrid": true, + "title": { + "text": "State" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "line": { + "color": "blue" + }, + "marker": { + "color": "blue" + }, + "mode": "lines+markers", + "name": "Latency", + "type": "scatter", + "x": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10 + ], + "y": [ + 76.350407, + 76.340876, + 76.184083, + 76.166457, + 76.161652, + 128.404581, + 128.403229, + 128.374545, + 128.370014, + 128.358192 + ] + } + ], + "layout": { + "legend": { + "title": { + "text": "Legend" + } + }, + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#f2f5fa" + }, + "error_y": { + "color": "#f2f5fa" + }, + "marker": { + "line": { + "color": "rgb(17,17,17)", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "rgb(17,17,17)", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#A2B1C6", + "gridcolor": "#506784", + "linecolor": "#506784", + "minorgridcolor": "#506784", + "startlinecolor": "#A2B1C6" + }, + "baxis": { + "endlinecolor": "#A2B1C6", + "gridcolor": "#506784", + "linecolor": "#506784", + "minorgridcolor": "#506784", + "startlinecolor": "#A2B1C6" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "marker": { + "line": { + "color": "#283442" + } + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "line": { + "color": "#283442" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#506784" + }, + "line": { + "color": "rgb(17,17,17)" + } + }, + "header": { + "fill": { + "color": "#2a3f5f" + }, + "line": { + "color": "rgb(17,17,17)" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#f2f5fa", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#f2f5fa" + }, + "geo": { + "bgcolor": "rgb(17,17,17)", + "lakecolor": "rgb(17,17,17)", + "landcolor": "rgb(17,17,17)", + "showlakes": true, + "showland": true, + "subunitcolor": "#506784" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "dark" + }, + "paper_bgcolor": "rgb(17,17,17)", + "plot_bgcolor": "rgb(17,17,17)", + "polar": { + "angularaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "bgcolor": "rgb(17,17,17)", + "radialaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + }, + "yaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + }, + "zaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + } + }, + "shapedefaults": { + "line": { + "color": "#f2f5fa" + } + }, + "sliderdefaults": { + "bgcolor": "#C8D4E3", + "bordercolor": "rgb(17,17,17)", + "borderwidth": 1, + "tickwidth": 0 + }, + "ternary": { + "aaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "baxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "bgcolor": "rgb(17,17,17)", + "caxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "updatemenudefaults": { + "bgcolor": "#506784", + "borderwidth": 0 + }, + "xaxis": { + "automargin": true, + "gridcolor": "#283442", + "linecolor": "#506784", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "#283442", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "#283442", + "linecolor": "#506784", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "#283442", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "ok Plot for Latency Trends Over Time" + }, + "xaxis": { + "showgrid": true, + "title": { + "text": "Message ID" + } + }, + "yaxis": { + "showgrid": true, + "title": { + "text": "Latency (seconds)" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "throughput => timestamp\n", + "2025-01-08 16:13:30 5\n", + "2025-01-08 16:13:40 0\n", + "2025-01-08 16:13:50 0\n", + "2025-01-08 16:14:00 0\n", + "2025-01-08 16:14:10 0\n", + "2025-01-08 16:14:20 5\n", + "Freq: 10S, dtype: int64\n" + ] + }, + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "line": { + "color": "green" + }, + "marker": { + "color": "green" + }, + "mode": "lines+markers", + "name": "Throughput", + "type": "scatter", + "x": [ + "2025-01-08T16:13:30", + "2025-01-08T16:13:40", + "2025-01-08T16:13:50", + "2025-01-08T16:14:00", + "2025-01-08T16:14:10", + "2025-01-08T16:14:20" + ], + "y": [ + 5, + 0, + 0, + 0, + 0, + 5 + ] + } + ], + "layout": { + "legend": { + "title": { + "text": "Legend" + } + }, + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#f2f5fa" + }, + "error_y": { + "color": "#f2f5fa" + }, + "marker": { + "line": { + "color": "rgb(17,17,17)", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "rgb(17,17,17)", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#A2B1C6", + "gridcolor": "#506784", + "linecolor": "#506784", + "minorgridcolor": "#506784", + "startlinecolor": "#A2B1C6" + }, + "baxis": { + "endlinecolor": "#A2B1C6", + "gridcolor": "#506784", + "linecolor": "#506784", + "minorgridcolor": "#506784", + "startlinecolor": "#A2B1C6" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "marker": { + "line": { + "color": "#283442" + } + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "line": { + "color": "#283442" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#506784" + }, + "line": { + "color": "rgb(17,17,17)" + } + }, + "header": { + "fill": { + "color": "#2a3f5f" + }, + "line": { + "color": "rgb(17,17,17)" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#f2f5fa", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#f2f5fa" + }, + "geo": { + "bgcolor": "rgb(17,17,17)", + "lakecolor": "rgb(17,17,17)", + "landcolor": "rgb(17,17,17)", + "showlakes": true, + "showland": true, + "subunitcolor": "#506784" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "dark" + }, + "paper_bgcolor": "rgb(17,17,17)", + "plot_bgcolor": "rgb(17,17,17)", + "polar": { + "angularaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "bgcolor": "rgb(17,17,17)", + "radialaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + }, + "yaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + }, + "zaxis": { + "backgroundcolor": "rgb(17,17,17)", + "gridcolor": "#506784", + "gridwidth": 2, + "linecolor": "#506784", + "showbackground": true, + "ticks": "", + "zerolinecolor": "#C8D4E3" + } + }, + "shapedefaults": { + "line": { + "color": "#f2f5fa" + } + }, + "sliderdefaults": { + "bgcolor": "#C8D4E3", + "bordercolor": "rgb(17,17,17)", + "borderwidth": 1, + "tickwidth": 0 + }, + "ternary": { + "aaxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "baxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + }, + "bgcolor": "rgb(17,17,17)", + "caxis": { + "gridcolor": "#506784", + "linecolor": "#506784", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "updatemenudefaults": { + "bgcolor": "#506784", + "borderwidth": 0 + }, + "xaxis": { + "automargin": true, + "gridcolor": "#283442", + "linecolor": "#506784", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "#283442", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "#283442", + "linecolor": "#506784", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "#283442", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "ok Plot for Throughput Trends Over Time" + }, + "xaxis": { + "showgrid": true, + "title": { + "text": "Time" + } + }, + "yaxis": { + "showgrid": true, + "title": { + "text": "Tasks Completed (Throughput)" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "\n", + "def analyze_lentency(logs,desc):\n", + " latency_df = calculate_latency_per_messages(logs)\n", + " # print(latency_df)\n", + " # plot_latency_distribution(latency_df)\n", + " plot_latency_trends(latency_df,desc)\n", + " \n", + "\n", + "def analyze_throughtput(logs,desc):\n", + " throughput_df = calculate_throughput(logs)\n", + " # print(throughput_df)\n", + " plot_throughput_trends(throughput_df,desc)\n", + " \n", + "\n", + "\n", + "exps = [\n", + " \n", + " # (\"10 tasks 2 workers batch-size => 5 : slow \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-20T09:00:43.541533.json\"),\n", + " # (\"50 tasks 2 workers batch-size => 5 : slow \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-23T15:15:49.032019.json\"),\n", + " # (\"100 tasks 2 workers batch-size => 5 : slow \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-23T19:26:50.355211.json\"),\n", + " # (\"500 tasks 2 workers batch-size => 5 : slow \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-22T15:48:18.956719.json\"),\n", + " \n", + " # (\"50 tasks 4 workers batch-size => 5 : slow \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-27T09:26:30.675248.json\"),\n", + " # (\"100 tasks 4 workers batch-size => 5 : slow \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-27T15:29:03.534160.json\"),\n", + " # (\"500 tasks 4 workers batch-size => 5 : slow \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-22T15:48:18.956719.json\"),\n", + " \n", + " \n", + " # (\"10 tasks 1 workers batch-size => 1 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2025-01-04T19:48:06.083435.json\"),\n", + " \n", + " # (\"10 tasks 2 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-31T09:40:23.962527.json\"),\n", + " # (\"50 tasks 2 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-31T10:27:32.520731.json\"),\n", + " # (\"100 tasks 2 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-31T10:52:02.779873.json\"),\n", + " # (\"500 tasks 2 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-31T12:06:54.093683.json\"), \n", + " \n", + " # (\"50 tasks 4 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-31T08:09:08.847832.json\"), \n", + " # (\"100 tasks 4 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-31T09:08:46.723731.json\"), \n", + " # (\"500 tasks 4 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2024-12-31T12:06:54.093683.json\"),\n", + "\n", + " # # more workers\n", + "\n", + " # (\"200 tasks 20 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2025-01-05T19:21:16.802607.json\"),\n", + " # (\"50 tasks 25 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2025-01-05T19:34:17.502357.json\"),\n", + " \n", + " # (\"1000 tasks 50 workers batch-size => 5 : fast \",\"gs://cameltrain-sight/doing_mq_analysis/2025-01-06T11:42:44.340375.json\")\n", + " ('ok',\"gs://cameltrain-sight/doing_mq_analysis/2025-01-08T16:14:33.056443.json\")\n", + "]\n", + "\n", + "\n", + "for (desc,uri) in exps:\n", + " logsx = read_logs_from_gs_uri(gs_uri = uri )\n", + " analyze_message_flow(logsx,desc)\n", + " analyze_lentency(logsx,desc)\n", + " analyze_throughtput(logsx,desc)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/py/sight/demo/portfolio_demo.py b/py/sight/demo/portfolio_demo.py index 25c4ea6..aad1530 100644 --- a/py/sight/demo/portfolio_demo.py +++ b/py/sight/demo/portfolio_demo.py @@ -13,144 +13,35 @@ # limitations under the License. """Demo of using the Sight Decision API to run forest simulator.""" -import time -import warnings - - -def warn(*args, **kwargs): - pass - - -warnings.warn = warn - import asyncio import os -import threading +import time from typing import Any, Sequence +import warnings from absl import app from absl import flags # from fvs_sight.fvs_api import action_attrs # from fvs_sight.fvs_api import outcome_attrs from fvs_sight import fvs_api +from helpers.logs.logs_handler import logger as logging import pandas as pd from sight.attribute import Attribute from sight.block import Block from sight.proto import sight_pb2 from sight.sight import Sight -# from sight.widgets.decision.proposal import spawn_workers, launch_worklist_scheduler, propose_actions from sight.widgets.decision import decision from sight.widgets.decision import proposal -from sight.widgets.decision.resource_lock import RWLockDictWrapper -global_outcome_mapping = RWLockDictWrapper() + +def warn(*args, **kwargs): + pass + + +warnings.warn = warn sample = { - 'project_id': '133a6365-01cf-4b5e-8197-d4779e5ce25c', - 'fire-SIMFIRE_0-6_stand_area_burned': 100, - 'fire-SIMFIRE_0-1_cycle': 2013, - 'fire-SIMFIRE_1-6_stand_area_burned': 71, - 'fire-SIMFIRE_1-1_cycle': 2014, - 'fire-SIMFIRE_2-6_stand_area_burned': 100, - 'fire-SIMFIRE_2-1_cycle': 2015, - 'fire-SIMFIRE_4-6_stand_area_burned': 10, - 'fire-SIMFIRE_4-1_cycle': 2017, - 'fire-SIMFIRE_11-6_stand_area_burned': 80, - 'fire-SIMFIRE_11-1_cycle': 2024, - 'fire-SIMFIRE_17-6_stand_area_burned': 45, - 'fire-SIMFIRE_17-1_cycle': 2030, - 'fire-SIMFIRE_19-6_stand_area_burned': 45, - 'fire-SIMFIRE_19-1_cycle': 2032, - 'fire-SIMFIRE_20-6_stand_area_burned': 21, - 'fire-SIMFIRE_20-1_cycle': 2033, - 'fire-SIMFIRE_22-6_stand_area_burned': 34, - 'fire-SIMFIRE_22-1_cycle': 2035, - 'fire-SIMFIRE_23-6_stand_area_burned': 100, - 'fire-SIMFIRE_23-1_cycle': 2036, - 'fire-SIMFIRE_26-6_stand_area_burned': 16, - 'fire-SIMFIRE_26-1_cycle': 2039, - 'fire-SIMFIRE_28-6_stand_area_burned': 100, - 'fire-SIMFIRE_28-1_cycle': 2041, - 'fire-SIMFIRE_29-6_stand_area_burned': 7, - 'fire-SIMFIRE_29-1_cycle': 2042, - 'fire-SIMFIRE_33-6_stand_area_burned': 100, - 'fire-SIMFIRE_33-1_cycle': 2046, - 'fire-SIMFIRE_35-6_stand_area_burned': 87, - 'fire-SIMFIRE_35-1_cycle': 2048, - 'fire-SIMFIRE_36-6_stand_area_burned': 53, - 'fire-SIMFIRE_36-1_cycle': 2049, - 'fire-SIMFIRE_37-6_stand_area_burned': 51, - 'fire-SIMFIRE_37-1_cycle': 2050, - 'fire-SIMFIRE_39-6_stand_area_burned': 8, - 'fire-SIMFIRE_39-1_cycle': 2052, - 'fire-SIMFIRE_42-6_stand_area_burned': 100, - 'fire-SIMFIRE_42-1_cycle': 2055, - 'fire-SIMFIRE_43-6_stand_area_burned': 95, - 'fire-SIMFIRE_43-1_cycle': 2056, - 'fire-SIMFIRE_44-6_stand_area_burned': 14, - 'fire-SIMFIRE_44-1_cycle': 2057, - 'fire-SIMFIRE_45-6_stand_area_burned': 18, - 'fire-SIMFIRE_45-1_cycle': 2058, - 'fire-SIMFIRE_47-6_stand_area_burned': 100, - 'fire-SIMFIRE_47-1_cycle': 2060, - 'fire-SIMFIRE_49-6_stand_area_burned': 100, - 'fire-SIMFIRE_49-1_cycle': 2062, - 'fire-SIMFIRE_50-6_stand_area_burned': 25, - 'fire-SIMFIRE_50-1_cycle': 2063, - 'fire-SIMFIRE_53-6_stand_area_burned': 100, - 'fire-SIMFIRE_53-1_cycle': 2066, - 'fire-SIMFIRE_54-6_stand_area_burned': 66, - 'fire-SIMFIRE_54-1_cycle': 2067, - 'fire-SIMFIRE_56-6_stand_area_burned': 63, - 'fire-SIMFIRE_56-1_cycle': 2069, - 'fire-SIMFIRE_59-6_stand_area_burned': 45, - 'fire-SIMFIRE_59-1_cycle': 2072, - 'fire-SIMFIRE_60-6_stand_area_burned': 79, - 'fire-SIMFIRE_60-1_cycle': 2073, - 'fire-SIMFIRE_63-6_stand_area_burned': 80, - 'fire-SIMFIRE_63-1_cycle': 2076, - 'fire-SIMFIRE_64-6_stand_area_burned': 47, - 'fire-SIMFIRE_64-1_cycle': 2077, - 'fire-SIMFIRE_65-6_stand_area_burned': 64, - 'fire-SIMFIRE_65-1_cycle': 2078, - 'fire-SIMFIRE_66-6_stand_area_burned': 100, - 'fire-SIMFIRE_66-1_cycle': 2079, - 'fire-SIMFIRE_68-6_stand_area_burned': 100, - 'fire-SIMFIRE_68-1_cycle': 2081, - 'fire-SIMFIRE_70-6_stand_area_burned': 30, - 'fire-SIMFIRE_70-1_cycle': 2083, - 'fire-SIMFIRE_71-6_stand_area_burned': 12, - 'fire-SIMFIRE_71-1_cycle': 2084, - 'fire-SIMFIRE_72-6_stand_area_burned': 51, - 'fire-SIMFIRE_72-1_cycle': 2085, - 'fire-SIMFIRE_75-6_stand_area_burned': 17, - 'fire-SIMFIRE_75-1_cycle': 2088, - 'fire-SIMFIRE_76-6_stand_area_burned': 100, - 'fire-SIMFIRE_76-1_cycle': 2089, - 'fire-SIMFIRE_79-6_stand_area_burned': 60, - 'fire-SIMFIRE_79-1_cycle': 2092, - 'fire-SIMFIRE_81-6_stand_area_burned': 45, - 'fire-SIMFIRE_81-1_cycle': 2094, - 'fire-SIMFIRE_84-6_stand_area_burned': 100, - 'fire-SIMFIRE_84-1_cycle': 2097, - 'fire-SIMFIRE_88-6_stand_area_burned': 58, - 'fire-SIMFIRE_88-1_cycle': 2101, - 'fire-SIMFIRE_90-6_stand_area_burned': 82, - 'fire-SIMFIRE_90-1_cycle': 2103, - 'fire-SIMFIRE_92-6_stand_area_burned': 60, - 'fire-SIMFIRE_92-1_cycle': 2105, - 'fire-SIMFIRE_94-6_stand_area_burned': 56, - 'fire-SIMFIRE_94-1_cycle': 2107, - 'fire-SIMFIRE_96-6_stand_area_burned': 100, - 'fire-SIMFIRE_96-1_cycle': 2109, - 'fire-SIMFIRE_97-6_stand_area_burned': 3, - 'fire-SIMFIRE_97-1_cycle': 2110, - 'fire-SIMFIRE_98-6_stand_area_burned': 87, - 'fire-SIMFIRE_98-1_cycle': 2111, - 'region': 'NC', - 'base-FERTILIZ-howManyCycle': 1.0, - 'base-FERTILIZ-extra_step': 0.0, - 'base-FERTILIZ-extra_offset': 0.0 + 'region': "NC", } FLAGS = flags.FLAGS @@ -167,30 +58,36 @@ def get_sight_instance(): async def propose_actions(sight: Sight, base_project_config: dict[str, Any], treatments: dict[str, Any]) -> pd.Series: + """Proposes actions for both base and treatment projects. + + Args: + sight: The Sight instance. + base_project_config: The configuration for the base project. + treatments: The configuration for the treatment project. + + Returns: + A pandas Series containing the proposed actions. + """ + + x_start_time = time.perf_counter() + logging.info(f"Proposing Start m-um ") + treatment_project_config = treatments tasks = [] with Attribute("Managed", "0", sight): - # base_sim = decision.propose_actions(sight, - # action_dict=base_project_config) - # await proposal.push_message(sight.id, base_sim) - # unmanaged_task = sight.create_task( - # proposal.fetch_outcome(sight.id, base_sim)) - # tasks.append(unmanaged_task) unmanaged_task = sight.create_task( proposal.propose_actions(sight, action_dict=base_project_config)) tasks.append(unmanaged_task) with Attribute("Managed", "1", sight): - # treatment_sim = decision.propose_actions( - # sight, action_dict=treatment_project_config) - # await proposal.push_message(sight.id, treatment_sim) - # managed_task = sight.create_task( - # proposal.fetch_outcome(sight.id, treatment_sim)) - # tasks.append(managed_task) managed_task = sight.create_task( proposal.propose_actions(sight, action_dict=treatment_project_config)) tasks.append(managed_task) [unmanaged_response, managed_response] = await asyncio.gather(*tasks) + + x_end_time = time.perf_counter() + logging.info( + f"Propose actions m-um took {x_end_time - x_start_time:.4f} seconds.") return unmanaged_response, managed_response @@ -204,7 +101,11 @@ async def main(sight: Sight, argv: Sequence[str]) -> None: with Block("Propose actions", sight): with Attribute("project_id", "APR107", sight): tasks = [] - print("len(sample_list) : ", len(sample_list)) + logging.info("len(sample_list) : %s", len(sample_list)) + + x_start_time = time.perf_counter() + logging.info(f"Proposing Start ") + for id in range(len(sample_list)): with Attribute("sample_id", id, sight): tasks.append( @@ -212,34 +113,43 @@ async def main(sight: Sight, argv: Sequence[str]) -> None: # both base and treatment are considerred to be same dict here propose_actions(sight, sample_list[id], sample_list[id]))) - print("waiting for all get outcome to finish.....") + x_end_time = time.perf_counter() + logging.info( + f"Propose actions took {x_end_time - x_start_time:.4f} seconds.") + diff_time_series = await asyncio.gather(*tasks) - print("all get outcome are finished.....") - print(f'Combine Series : {diff_time_series}') + + logging.info("waiting for all get outcome to finish.....") + logging.info("all get outcome are finished.....") + logging.info(f'Combine Series : {diff_time_series}') def main_wrapper(argv): - start_time = time.perf_counter() with get_sight_instance() as sight: decision.run(action_attrs=fvs_api.get_action_attrs(), outcome_attrs=fvs_api.get_outcome_attrs(), sight=sight) + start_time = time.perf_counter() + sleep_time_in_min = 0 + logging.info( + f"Waiting for {sleep_time_in_min} min for workers to start ...") + time.sleep(sleep_time_in_min * 60) asyncio.run(main(sight, argv)) - - end_time = time.perf_counter() - elapsed_time = end_time - start_time - print(f"Elapsed time: {elapsed_time} seconds") - hours, remainder = divmod(elapsed_time, 3600) - minutes, seconds = divmod(remainder, 60) - - if hours > 0: - print( - f"Elapsed time: {int(hours)} hour(s), {int(minutes)} minute(s), {seconds:.2f} second(s)" - ) - elif minutes > 0: - print(f"Elapsed time: {int(minutes)} minute(s), {seconds:.2f} second(s)") - else: - print(f"Elapsed time: {seconds:.2f} second(s)") + end_time = time.perf_counter() + elapsed_time = end_time - start_time + logging.info(f"Elapsed time: {elapsed_time} seconds") + hours, remainder = divmod(elapsed_time, 3600) + minutes, seconds = divmod(remainder, 60) + + if hours > 0: + logging.info( + f"Elapsed time: {int(hours)} hour(s), {int(minutes)} minute(s)," + f" {seconds:.2f} second(s)") + elif minutes > 0: + logging.info( + f"Elapsed time: {int(minutes)} minute(s), {seconds:.2f} second(s)") + else: + logging.info(f"Elapsed time: {seconds:.2f} second(s)") if __name__ == "__main__": diff --git a/py/sight/proto/sight.proto b/py/sight/proto/sight.proto index 8084c26..388bba2 100644 --- a/py/sight/proto/sight.proto +++ b/py/sight/proto/sight.proto @@ -740,6 +740,7 @@ message DecisionConfigurationStart { map outcome_attrs = 5; int64 num_trials = 6; + int64 server_queue_batch_size = 7; } message DecisionParam { diff --git a/py/sight/proto/sight_pb2.py b/py/sight/proto/sight_pb2.py index 440a3dc..f44665b 100644 --- a/py/sight/proto/sight_pb2.py +++ b/py/sight/proto/sight_pb2.py @@ -17,7 +17,7 @@ ) DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xeb\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\x91\x04\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\x8a\t\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\x45\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\x12!\n\x19\x65xclusive_elapsed_time_ns\x18\x02 \x01(\x03\"\xfb\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xa6\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"\x8e\x01\n\rDecisionParam\x12\x38\n\x06params\x18\x01 \x03(\x0b\x32(.sight.x.proto.DecisionParam.ParamsEntry\x1a\x43\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value:\x02\x38\x01\"\xc2\x01\n\x0f\x44\x65\x63isionMessage\x12\x11\n\taction_id\x18\x01 \x01(\x03\x12,\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3' + b'\n\x17sight/proto/sight.proto\x12\rsight.x.proto\x1a\x19sight/proto/example.proto\x1a.sight/proto/widgets/pipeline/flume/flume.proto\"\'\n\tAttribute\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xb1\x0c\n\x06Object\x12\x10\n\x08location\x18\x01 \x01(\t\x12\r\n\x05index\x18\x02 \x01(\x03\x12\x0f\n\x07log_uid\x18\x18 \x01(\t\x12+\n\tattribute\x18\x03 \x03(\x0b\x32\x18.sight.x.proto.Attribute\x12/\n\x08sub_type\x18\x04 \x01(\x0e\x32\x1d.sight.x.proto.Object.SubType\x12#\n\x04text\x18\x05 \x01(\x0b\x32\x13.sight.x.proto.TextH\x00\x12\x30\n\x0b\x62lock_start\x18\x06 \x01(\x0b\x32\x19.sight.x.proto.BlockStartH\x00\x12,\n\tblock_end\x18\x07 \x01(\x0b\x32\x17.sight.x.proto.BlockEndH\x00\x12\x38\n\x0f\x61ttribute_start\x18\x08 \x01(\x0b\x32\x1d.sight.x.proto.AttributeStartH\x00\x12\x34\n\rattribute_end\x18\t \x01(\x0b\x32\x1b.sight.x.proto.AttributeEndH\x00\x12\x46\n\x10\x66lume_do_fn_emit\x18\x0e \x01(\x0b\x32*.sight.x.widgets.flume.proto.FlumeDoFnEmitH\x00\x12@\n\x0c\x66lume_depend\x18\x0f \x01(\x0b\x32(.sight.x.widgets.flume.proto.FlumeDependH\x00\x12%\n\x05value\x18\x10 \x01(\x0b\x32\x14.sight.x.proto.ValueH\x00\x12-\n\texception\x18\x11 \x01(\x0b\x32\x18.sight.x.proto.ExceptionH\x00\x12\'\n\x06tensor\x18\x14 \x01(\x0b\x32\x15.sight.x.proto.TensorH\x00\x12?\n\x13tensor_flow_example\x18\x15 \x01(\x0b\x32 .sight.x.proto.TensorFlowExampleH\x00\x12\x36\n\x0e\x64\x65\x63ision_point\x18\x16 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPointH\x00\x12:\n\x10\x64\x65\x63ision_outcome\x18\x17 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcomeH\x00\x12#\n\x04link\x18\x19 \x01(\x0b\x32\x13.sight.x.proto.LinkH\x00\x12\x36\n\x0epropose_action\x18\x1a \x01(\x0b\x32\x1c.sight.x.proto.ProposeActionH\x00\x12\x0c\n\x04\x66ile\x18\n \x01(\t\x12\x0c\n\x04line\x18\x0b \x01(\x05\x12\x0c\n\x04\x66unc\x18\x0c \x01(\t\x12\x1f\n\x17\x61ncestor_start_location\x18\r \x03(\t\x12.\n\x07metrics\x18\x12 \x01(\x0b\x32\x1d.sight.x.proto.Object.Metrics\x12*\n\x05order\x18\x13 \x01(\x0b\x32\x1b.sight.x.proto.Object.Order\x1aX\n\x07Metrics\x12%\n\x1dprocess_free_swap_space_bytes\x18\x01 \x01(\x03\x12&\n\x1eprocess_total_swap_space_bytes\x18\x02 \x01(\x03\x1a\x1d\n\x05Order\x12\x14\n\x0ctimestamp_ns\x18\x01 \x01(\x03\"\xd2\x02\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x12\n\x0eST_BLOCK_START\x10\x02\x12\x10\n\x0cST_BLOCK_END\x10\x03\x12\x16\n\x12ST_ATTRIBUTE_START\x10\x04\x12\x14\n\x10ST_ATTRIBUTE_END\x10\x05\x12\x17\n\x13ST_FLUME_DO_FN_EMIT\x10\x06\x12\x13\n\x0fST_FLUME_DEPEND\x10\x07\x12\x0c\n\x08ST_VALUE\x10\x08\x12\x10\n\x0cST_EXCEPTION\x10\t\x12\r\n\tST_TENSOR\x10\n\x12\x19\n\x15ST_TENSORFLOW_EXAMPLE\x10\x0c\x12\x15\n\x11ST_DECISION_POINT\x10\r\x12\x17\n\x13ST_DECISION_OUTCOME\x10\x0e\x12\n\n\x06ST_GAP\x10\x0b\x12\x0b\n\x07ST_LINK\x10\x0f\x12\x15\n\x11ST_PROPOSE_ACTION\x10\x10\x42\x12\n\x10sub_type_message\"\x88\x01\n\rProposeAction\x12\x32\n\x0c\x61\x63tion_attrs\x18\x01 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x30\n\nattributes\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x11\n\taction_id\x18\x03 \x01(\t\"\xec\x01\n\x12\x43onfigurationStart\x12;\n\x08sub_type\x18\x01 \x01(\x0e\x32).sight.x.proto.ConfigurationStart.SubType\x12K\n\x16\x64\x65\x63ision_configuration\x18\x02 \x01(\x0b\x32).sight.x.proto.DecisionConfigurationStartH\x00\"8\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1d\n\x19ST_DECISION_CONFIGURATION\x10\x01\x42\x12\n\x10sub_type_message\";\n\tException\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x11\n\ttraceback\x18\x03 \x01(\t\"\xd7\x05\n\x06Tensor\x12/\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1d.sight.x.proto.Tensor.SubType\x12\r\n\x05label\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x11\n\tdim_label\x18\t \x03(\t\x12;\n\x0f\x64im_axis_values\x18\n \x03(\x0b\x32\".sight.x.proto.Tensor.StringValues\x12;\n\rstring_values\x18\x04 \x01(\x0b\x32\".sight.x.proto.Tensor.StringValuesH\x00\x12\x39\n\x0c\x62ytes_values\x18\x05 \x01(\x0b\x32!.sight.x.proto.Tensor.BytesValuesH\x00\x12\x39\n\x0cint64_values\x18\x06 \x01(\x0b\x32!.sight.x.proto.Tensor.Int64ValuesH\x00\x12;\n\rdouble_values\x18\x07 \x01(\x0b\x32\".sight.x.proto.Tensor.DoubleValuesH\x00\x12\x37\n\x0b\x62ool_values\x18\x08 \x01(\x0b\x32 .sight.x.proto.Tensor.BoolValuesH\x00\x1a\x1d\n\x0cStringValues\x12\r\n\x05value\x18\x01 \x03(\t\x1a\x1c\n\x0b\x42ytesValues\x12\r\n\x05value\x18\x01 \x03(\x0c\x1a\x1c\n\x0bInt64Values\x12\r\n\x05value\x18\x01 \x03(\x03\x1a\x1d\n\x0c\x44oubleValues\x12\r\n\x05value\x18\x01 \x03(\x01\x1a\x1b\n\nBoolValues\x12\r\n\x05value\x18\x01 \x03(\x08\"`\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x42\x0c\n\nvalue_type\"\x9c\x01\n\x04Link\x12\x17\n\x0flinked_sight_id\x18\x01 \x01(\t\x12/\n\tlink_type\x18\x02 \x01(\x0e\x32\x1c.sight.x.proto.Link.LinkType\"J\n\x08LinkType\x12\x0e\n\nLT_UNKNOWN\x10\x00\x12\x16\n\x12LT_PARENT_TO_CHILD\x10\x01\x12\x16\n\x12LT_CHILD_TO_PARENT\x10\x02\"\x8e\x02\n\x11TensorFlowExample\x12/\n\rinput_example\x18\x01 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x00\x12@\n\x16input_sequence_example\x18\x02 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x00\x12\x30\n\x0eoutput_example\x18\x03 \x01(\x0b\x32\x16.sight.x.proto.ExampleH\x01\x12\x41\n\x17output_sequence_example\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.SequenceExampleH\x01\x42\x07\n\x05inputB\x08\n\x06output\")\n\x03Log\x12\"\n\x03obj\x18\x01 \x03(\x0b\x32\x15.sight.x.proto.Object\"x\n\x04Text\x12\x0c\n\x04text\x18\x01 \x01(\t\x12-\n\x08sub_type\x18\x02 \x01(\x0e\x32\x1b.sight.x.proto.Text.SubType\"3\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x0b\n\x07ST_TEXT\x10\x01\x12\x0b\n\x07ST_HTML\x10\x02\"\xf4\x02\n\x05Value\x12.\n\x08sub_type\x18\x01 \x01(\x0e\x32\x1c.sight.x.proto.Value.SubType\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x03 \x01(\x0cH\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x05 \x01(\x01H\x00\x12\x14\n\nbool_value\x18\x06 \x01(\x08H\x00\x12\x14\n\nnone_value\x18\x07 \x01(\x08H\x00\x12\x14\n\njson_value\x18\x08 \x01(\tH\x00\x12\x11\n\tmime_type\x18\t \x01(\t\"z\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\r\n\tST_STRING\x10\x01\x12\x0c\n\x08ST_BYTES\x10\x02\x12\x0c\n\x08ST_INT64\x10\x03\x12\r\n\tST_DOUBLE\x10\x04\x12\x0b\n\x07ST_BOOL\x10\x05\x12\x0b\n\x07ST_NONE\x10\x06\x12\x0b\n\x07ST_JSON\x10\x07\x42\x0c\n\nvalue_type\"\xeb\n\n\nBlockStart\x12\r\n\x05label\x18\x01 \x01(\t\x12\x33\n\x08sub_type\x18\x02 \x01(\x0e\x32!.sight.x.proto.BlockStart.SubType\x12J\n\x12\x66lume_do_fn_create\x18\x03 \x01(\x0b\x32,.sight.x.widgets.flume.proto.FlumeDoFnCreateH\x00\x12M\n\x14\x66lume_do_fn_start_do\x18\x04 \x01(\x0b\x32-.sight.x.widgets.flume.proto.FlumeDoFnStartDoH\x00\x12T\n\x17\x66lume_compare_fn_create\x18\x05 \x01(\x0b\x32\x31.sight.x.widgets.flume.proto.FlumeCompareFnCreateH\x00\x12\x61\n\x1e\x66lume_compare_fn_start_compare\x18\x06 \x01(\x0b\x32\x37.sight.x.widgets.flume.proto.FlumeCompareFnStartCompareH\x00\x12(\n\x04list\x18\x07 \x01(\x0b\x32\x18.sight.x.proto.ListStartH\x00\x12\\\n tensor_flow_model_training_epoch\x18\x08 \x01(\x0b\x32\x30.sight.x.proto.TensorFlowModelTrainingEpochStartH\x00\x12:\n\x10simulation_start\x18\t \x01(\x0b\x32\x1e.sight.x.proto.SimulationStartH\x00\x12O\n\x1bsimulation_parameters_start\x18\n \x01(\x0b\x32(.sight.x.proto.SimulationParametersStartH\x00\x12L\n\x1asimulation_time_step_start\x18\x0b \x01(\x0b\x32&.sight.x.proto.SimulationTimeStepStartH\x00\x12:\n\rconfiguration\x18\x0c \x01(\x0b\x32!.sight.x.proto.ConfigurationStartH\x00\"\x91\x04\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x19\n\x15ST_FLUME_DO_FN_CREATE\x10\x01\x12\x1b\n\x17ST_FLUME_DO_FN_START_DO\x10\x02\x12\x1e\n\x1aST_FLUME_COMPARE_FN_CREATE\x10\x03\x12%\n!ST_FLUME_COMPARE_FN_START_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x12\x14\n\x10ST_CONFIGURATION\x10\x10\x42\x12\n\x10sub_type_message\"\x8a\t\n\x08\x42lockEnd\x12\r\n\x05label\x18\x01 \x01(\t\x12\x31\n\x08sub_type\x18\x06 \x01(\x0e\x32\x1f.sight.x.proto.BlockEnd.SubType\x12\x1f\n\x17location_of_block_start\x18\x02 \x01(\t\x12\x1b\n\x13num_direct_contents\x18\x03 \x01(\x03\x12\x1f\n\x17num_transitive_contents\x18\x04 \x01(\x03\x12N\n\x14\x66lume_do_fn_complete\x18\x07 \x01(\x0b\x32..sight.x.widgets.flume.proto.FlumeDoFnCompleteH\x00\x12I\n\x12\x66lume_do_fn_end_do\x18\x08 \x01(\x0b\x32+.sight.x.widgets.flume.proto.FlumeDoFnEndDoH\x00\x12X\n\x19\x66lume_compare_fn_complete\x18\t \x01(\x0b\x32\x33.sight.x.widgets.flume.proto.FlumeCompareFnCompleteH\x00\x12]\n\x1c\x66lume_compare_fn_end_compare\x18\n \x01(\x0b\x32\x35.sight.x.widgets.flume.proto.FlumeCompareFnEndCompareH\x00\x12\x30\n\x07metrics\x18\x0c \x01(\x0b\x32\x1f.sight.x.proto.BlockEnd.Metrics\x1a\x45\n\x07Metrics\x12\x17\n\x0f\x65lapsed_time_ns\x18\x01 \x01(\x03\x12!\n\x19\x65xclusive_elapsed_time_ns\x18\x02 \x01(\x03\"\xfb\x03\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x1b\n\x17ST_FLUME_DO_FN_COMPLETE\x10\x01\x12\x19\n\x15ST_FLUME_DO_FN_END_DO\x10\x02\x12 \n\x1cST_FLUME_COMPARE_FN_COMPLETE\x10\x03\x12#\n\x1fST_FLUME_COMPARE_FN_END_COMPARE\x10\x04\x12\x12\n\x0eST_NAMED_VALUE\x10\x05\x12\x0b\n\x07ST_LIST\x10\x06\x12\x0c\n\x08ST_TABLE\x10\x07\x12#\n\x1fST_TENSORFLOW_MODEL_APPLICATION\x10\x08\x12&\n\"ST_TENSORFLOW_MODEL_TRAINING_EPOCH\x10\t\x12 \n\x1cST_TENSORFLOW_MODEL_TRAINING\x10\n\x12\x11\n\rST_SIMULATION\x10\x0b\x12\x1c\n\x18ST_SIMULATION_PARAMETERS\x10\x0c\x12\x17\n\x13ST_SIMULATION_STATE\x10\r\x12\x1b\n\x17ST_SIMULATION_TIME_STEP\x10\x0e\x12\x1f\n\x1bST_SIMULATION_INITIAL_STATE\x10\x11\x12 \n\x1cST_SIMULATION_BOUNDARY_STATE\x10\x12\x12\x19\n\x15ST_CLUSTER_ASSIGNMENT\x10\x0f\x42\x12\n\x10sub_type_message\"\xaf\x01\n\tListStart\x12\x32\n\x08sub_type\x18\x01 \x01(\x0e\x32 .sight.x.proto.ListStart.SubType\"n\n\x07SubType\x12\x0e\n\nST_UNKNOWN\x10\x00\x12\x12\n\x0eST_HOMOGENEOUS\x10\x01\x12\x14\n\x10ST_HETEROGENEOUS\x10\x02\x12\n\n\x06ST_MAP\x10\x03\x12\x10\n\x0cST_MAP_ENTRY\x10\x04\x12\x0b\n\x07ST_DICT\x10\x05\"J\n!TensorFlowModelTrainingEpochStart\x12\x11\n\tepoch_num\x18\x01 \x01(\x03\x12\x12\n\nbatch_size\x18\x02 \x01(\x03\"=\n\x0e\x41ttributeStart\x12+\n\tattribute\x18\x01 \x01(\x0b\x32\x18.sight.x.proto.Attribute\"\x1b\n\x0c\x41ttributeEnd\x12\x0b\n\x03key\x18\x01 \x01(\t\"\xb2\x03\n\x06Params\x12\r\n\x05local\x18\x01 \x01(\x08\x12\x14\n\x0clog_dir_path\x18\x02 \x01(\t\x12\r\n\x05label\x18\x03 \x01(\t\x12\x13\n\x0btext_output\x18\x04 \x01(\x08\x12\x17\n\x0f\x63olumnio_output\x18\x05 \x01(\x08\x12\x18\n\x10\x63\x61pacitor_output\x18\x06 \x01(\x08\x12\x11\n\tlog_owner\x18\x07 \x01(\t\x12\x13\n\x0bpath_prefix\x18\x08 \x01(\t\x12\x1a\n\x12\x63ontainer_location\x18\t \x01(\t\x12\n\n\x02id\x18\n \x01(\x03\x12\x15\n\rsilent_logger\x18\x0b \x01(\x08\x12\x11\n\tin_memory\x18\x0c \x01(\x08\x12\x13\n\x0b\x61vro_output\x18\r \x01(\x08\x12\x12\n\nproject_id\x18\x0e \x01(\t\x12\x13\n\x0b\x62ucket_name\x18\x0f \x01(\t\x12\x10\n\x08gcp_path\x18\x10 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x11 \x01(\t\x12\x14\n\x0c\x64\x61taset_name\x18\x12 \x01(\t\x12\x1c\n\x14\x65xternal_file_format\x18\x13 \x01(\t\x12\x19\n\x11\x65xternal_file_uri\x18\x14 \x01(\t\"\x11\n\x0fSimulationStart\"\x1b\n\x19SimulationParametersStart\"\xb8\x02\n\x17SimulationTimeStepStart\x12\x17\n\x0ftime_step_index\x18\x01 \x03(\x03\x12\x11\n\ttime_step\x18\x02 \x01(\x02\x12\x16\n\x0etime_step_size\x18\x03 \x01(\x02\x12M\n\x0ftime_step_units\x18\x04 \x01(\x0e\x32\x34.sight.x.proto.SimulationTimeStepStart.TimeStepUnits\"\x89\x01\n\rTimeStepUnits\x12\x0f\n\x0bTSU_UNKNOWN\x10\x00\x12\x0e\n\nTSU_SECOND\x10\x01\x12\x0e\n\nTSU_MINUTE\x10\x02\x12\x0c\n\x08TSU_HOUR\x10\x03\x12\x0b\n\x07TSU_DAY\x10\x04\x12\r\n\tTSU_MONTH\x10\x05\x12\x0f\n\x0bTSU_QUARTER\x10\x06\x12\x0c\n\x08TSU_YEAR\x10\x07\"\xf0\x01\n\x12\x43ontinuousProbDist\x12>\n\x08gaussian\x18\x01 \x01(\x0b\x32*.sight.x.proto.ContinuousProbDist.GaussianH\x00\x12<\n\x07uniform\x18\x02 \x01(\x0b\x32).sight.x.proto.ContinuousProbDist.UniformH\x00\x1a\'\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x02\x12\r\n\x05stdev\x18\x02 \x01(\x02\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x02\x12\x0f\n\x07max_val\x18\x02 \x01(\x02\x42\x06\n\x04\x64ist\"\x83\x01\n\x10\x44iscreteProbDist\x12:\n\x07uniform\x18\x01 \x01(\x0b\x32\'.sight.x.proto.DiscreteProbDist.UniformH\x00\x1a+\n\x07Uniform\x12\x0f\n\x07min_val\x18\x01 \x01(\x03\x12\x0f\n\x07max_val\x18\x02 \x01(\x03\x42\x06\n\x04\x64ist\"\xc7\x1c\n\x1a\x44\x65\x63isionConfigurationStart\x12O\n\x0eoptimizer_type\x18\x01 \x01(\x0e\x32\x37.sight.x.proto.DecisionConfigurationStart.OptimizerType\x12R\n\rchoice_config\x18\x02 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.ChoiceConfigEntry\x12N\n\x0bstate_attrs\x18\x03 \x03(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.StateAttrsEntry\x12P\n\x0c\x61\x63tion_attrs\x18\x04 \x03(\x0b\x32:.sight.x.proto.DecisionConfigurationStart.ActionAttrsEntry\x12R\n\routcome_attrs\x18\x05 \x03(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.OutcomeAttrsEntry\x12\x12\n\nnum_trials\x18\x06 \x01(\x03\x12\x1f\n\x17server_queue_batch_size\x18\x07 \x01(\x03\x1a\x0e\n\x0cVizierConfig\x1a\xf1\x01\n\nAcmeConfig\x12R\n\nacme_agent\x18\x01 \x01(\x0e\x32>.sight.x.proto.DecisionConfigurationStart.AcmeConfig.AcmeAgent\"\x8e\x01\n\tAcmeAgent\x12\x0e\n\nAA_UNKNOWN\x10\x00\x12\n\n\x06\x41\x41_DQN\x10\x01\x12\x0b\n\x07\x41\x41_D4PG\x10\x02\x12\r\n\tAA_IMPALA\x10\x03\x12\x0b\n\x07\x41\x41_MDQN\x10\x04\x12\x0c\n\x08\x41\x41_QRDQN\x10\x05\x12\n\n\x06\x41\x41_PPO\x10\x06\x12\n\n\x06\x41\x41_MPO\x10\x07\x12\n\n\x06\x41\x41_SAC\x10\x08\x12\n\n\x06\x41\x41_TD3\x10\t\x1a\x35\n\x16GeneticAlgorithmConfig\x12\x1b\n\x13max_population_size\x18\x01 \x01(\x03\x1a\x18\n\x16\x45xhaustiveSearchConfig\x1a\xeb\x02\n\tLLMConfig\x12S\n\talgorithm\x18\x01 \x01(\x0e\x32@.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMAlgorithm\x12I\n\x04goal\x18\x02 \x01(\x0e\x32;.sight.x.proto.DecisionConfigurationStart.LLMConfig.LLMGoal\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"W\n\x0cLLMAlgorithm\x12\x0e\n\nLA_UNKNOWN\x10\x00\x12\x11\n\rLA_TEXT_BISON\x10\x01\x12\x11\n\rLA_CHAT_BISON\x10\x02\x12\x11\n\rLA_GEMINI_PRO\x10\x03\"P\n\x07LLMGoal\x12\x0e\n\nLM_UNKNOWN\x10\x00\x12\x0f\n\x0bLM_OPTIMIZE\x10\x01\x12\x10\n\x0cLM_RECOMMEND\x10\x02\x12\x12\n\x0eLM_INTERACTIVE\x10\x03\x1a\x13\n\x11\x42\x61yesianOptConfig\x1a\x1b\n\x19SensitivityAnalysisConfig\x1a\x8e\x03\n\x0fNeverGradConfig\x12_\n\talgorithm\x18\x01 \x01(\x0e\x32L.sight.x.proto.DecisionConfigurationStart.NeverGradConfig.NeverGradAlgorithm\"\x99\x02\n\x12NeverGradAlgorithm\x12\x0e\n\nNG_UNKNOWN\x10\x00\x12\x0b\n\x07NG_AUTO\x10\x01\x12\t\n\x05NG_BO\x10\x02\x12\n\n\x06NG_CMA\x10\x03\x12\x12\n\x0eNG_TwoPointsDE\x10\x04\x12\x13\n\x0fNG_RandomSearch\x10\x05\x12\n\n\x06NG_PSO\x10\x06\x12\x1a\n\x16NG_ScrHammersleySearch\x10\x07\x12\t\n\x05NG_DE\x10\x08\x12\n\n\x06NG_CGA\x10\t\x12\t\n\x05NG_ES\x10\n\x12\r\n\tNG_DL_OPO\x10\x0b\x12\n\n\x06NG_DDE\x10\x0c\x12\n\n\x06NG_NMM\x10\r\x12\x10\n\x0cNG_TINY_SPSA\x10\x0e\x12\x11\n\rNG_VORONOI_DE\x10\x0f\x12\x10\n\x0cNG_CMA_SMALL\x10\x10\x1a\r\n\x0bSMCPyConfig\x1a\x19\n\x17WorklistSchedulerConfig\x1a\xac\x07\n\x0c\x43hoiceConfig\x12O\n\rvizier_config\x18\x01 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.VizierConfigH\x00\x12K\n\x0b\x61\x63me_config\x18\x02 \x01(\x0b\x32\x34.sight.x.proto.DecisionConfigurationStart.AcmeConfigH\x00\x12\x64\n\x18genetic_algorithm_config\x18\x03 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.GeneticAlgorithmConfigH\x00\x12\x64\n\x18\x65xhaustive_search_config\x18\x04 \x01(\x0b\x32@.sight.x.proto.DecisionConfigurationStart.ExhaustiveSearchConfigH\x00\x12I\n\nllm_config\x18\x05 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.LLMConfigH\x00\x12Z\n\x13\x62\x61yesian_opt_config\x18\x06 \x01(\x0b\x32;.sight.x.proto.DecisionConfigurationStart.BayesianOptConfigH\x00\x12j\n\x1bsensitivity_analysis_config\x18\x07 \x01(\x0b\x32\x43.sight.x.proto.DecisionConfigurationStart.SensitivityAnalysisConfigH\x00\x12V\n\x11never_grad_config\x18\x08 \x01(\x0b\x32\x39.sight.x.proto.DecisionConfigurationStart.NeverGradConfigH\x00\x12N\n\rsmc_py_config\x18\t \x01(\x0b\x32\x35.sight.x.proto.DecisionConfigurationStart.SMCPyConfigH\x00\x12\x66\n\x19worklist_scheduler_config\x18\n \x01(\x0b\x32\x41.sight.x.proto.DecisionConfigurationStart.WorklistSchedulerConfigH\x00\x42\x0f\n\rchoice_config\x1ak\n\x11\x43hoiceConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x45\n\x05value\x18\x02 \x01(\x0b\x32\x36.sight.x.proto.DecisionConfigurationStart.ChoiceConfig:\x02\x38\x01\x1a\x8d\x02\n\tAttrProps\x12\x11\n\tmin_value\x18\x01 \x01(\x02\x12\x11\n\tmax_value\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\x12\x1a\n\x12valid_float_values\x18\x04 \x03(\x02\x12\x18\n\x10valid_int_values\x18\x08 \x03(\x03\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12?\n\x14\x63ontinuous_prob_dist\x18\x06 \x01(\x0b\x32!.sight.x.proto.ContinuousProbDist\x12;\n\x12\x64iscrete_prob_dist\x18\x07 \x01(\x0b\x32\x1f.sight.x.proto.DiscreteProbDist\x1a\x66\n\x0fStateAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ag\n\x10\x41\x63tionAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\x1ah\n\x11OutcomeAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x42\n\x05value\x18\x02 \x01(\x0b\x32\x33.sight.x.proto.DecisionConfigurationStart.AttrProps:\x02\x38\x01\"\xea\x01\n\rOptimizerType\x12\x0e\n\nOT_UNKNOWN\x10\x00\x12\r\n\tOT_VIZIER\x10\x01\x12\x0b\n\x07OT_ACME\x10\x02\x12\x18\n\x14OT_GENETIC_ALGORITHM\x10\x03\x12\x18\n\x14OT_EXHAUSTIVE_SEARCH\x10\x04\x12\n\n\x06OT_LLM\x10\x05\x12\x13\n\x0fOT_BAYESIAN_OPT\x10\x06\x12\x1b\n\x17OT_SENSITIVITY_ANALYSIS\x10\x07\x12\x11\n\rOT_NEVER_GRAD\x10\x08\x12\r\n\tOT_SMC_PY\x10\t\x12\x19\n\x15OT_WORKLIST_SCHEDULER\x10\n\"U\n\x08\x44\x61taType\x12\r\n\tDT_UNKOWN\x10\x00\x12\x0c\n\x08\x44T_INT32\x10\x01\x12\x0c\n\x08\x44T_INT64\x10\x02\x12\x0e\n\nDT_FLOAT32\x10\x03\x12\x0e\n\nDT_FLOAT64\x10\x04\"\x8e\x01\n\rDecisionParam\x12\x38\n\x06params\x18\x01 \x03(\x0b\x32(.sight.x.proto.DecisionParam.ParamsEntry\x1a\x43\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.sight.x.proto.Value:\x02\x38\x01\"\xc2\x01\n\x0f\x44\x65\x63isionMessage\x12\x11\n\taction_id\x18\x01 \x01(\x03\x12,\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x34\n\x0e\x64\x65\x63ision_point\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionPoint\x12\x38\n\x10\x64\x65\x63ision_outcome\x18\x04 \x01(\x0b\x32\x1e.sight.x.proto.DecisionOutcome\"\xa5\x01\n\rDecisionPoint\x12\x14\n\x0c\x63hoice_label\x18\x01 \x01(\t\x12\x15\n\rchosen_option\x18\x02 \x01(\t\x12\x33\n\rchoice_params\x18\x03 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\x12\x32\n\x0cstate_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParam\"\x80\x01\n\x0f\x44\x65\x63isionOutcome\x12\x15\n\routcome_label\x18\x01 \x01(\t\x12\x0e\n\x06reward\x18\x02 \x01(\x02\x12\x10\n\x08\x64iscount\x18\x03 \x01(\x02\x12\x34\n\x0eoutcome_params\x18\x04 \x01(\x0b\x32\x1c.sight.x.proto.DecisionParamb\x06proto3' ) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) @@ -125,59 +125,59 @@ _DISCRETEPROBDIST_UNIFORM._serialized_start = 7912 _DISCRETEPROBDIST_UNIFORM._serialized_end = 7955 _DECISIONCONFIGURATIONSTART._serialized_start = 7966 - _DECISIONCONFIGURATIONSTART._serialized_end = 11588 - _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_start = 8427 - _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_end = 8441 - _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_start = 8444 - _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_end = 8685 - _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_start = 8543 - _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_end = 8685 - _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_start = 8687 - _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_end = 8740 - _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_start = 8742 - _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_end = 8766 - _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_start = 8769 - _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_end = 9132 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_start = 8963 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_end = 9050 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_start = 9052 - _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_end = 9132 - _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_start = 9134 - _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_end = 9153 - _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_start = 9155 - _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_end = 9182 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_start = 9185 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_end = 9583 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_start = 9302 - _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_end = 9583 - _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_start = 9585 - _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_end = 9598 - _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_start = 9600 - _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_end = 9625 - _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_start = 9628 - _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_end = 10568 - _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_start = 10570 - _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_end = 10677 - _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_start = 10680 - _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_end = 10949 - _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_start = 10951 - _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_end = 11053 - _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_start = 11055 - _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_end = 11158 - _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_start = 11160 - _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_end = 11264 - _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_start = 11267 - _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_end = 11501 - _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_start = 11503 - _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_end = 11588 - _DECISIONPARAM._serialized_start = 11591 - _DECISIONPARAM._serialized_end = 11733 - _DECISIONPARAM_PARAMSENTRY._serialized_start = 11666 - _DECISIONPARAM_PARAMSENTRY._serialized_end = 11733 - _DECISIONMESSAGE._serialized_start = 11736 - _DECISIONMESSAGE._serialized_end = 11930 - _DECISIONPOINT._serialized_start = 11933 - _DECISIONPOINT._serialized_end = 12098 - _DECISIONOUTCOME._serialized_start = 12101 - _DECISIONOUTCOME._serialized_end = 12229 + _DECISIONCONFIGURATIONSTART._serialized_end = 11621 + _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_start = 8460 + _DECISIONCONFIGURATIONSTART_VIZIERCONFIG._serialized_end = 8474 + _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_start = 8477 + _DECISIONCONFIGURATIONSTART_ACMECONFIG._serialized_end = 8718 + _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_start = 8576 + _DECISIONCONFIGURATIONSTART_ACMECONFIG_ACMEAGENT._serialized_end = 8718 + _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_start = 8720 + _DECISIONCONFIGURATIONSTART_GENETICALGORITHMCONFIG._serialized_end = 8773 + _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_start = 8775 + _DECISIONCONFIGURATIONSTART_EXHAUSTIVESEARCHCONFIG._serialized_end = 8799 + _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_start = 8802 + _DECISIONCONFIGURATIONSTART_LLMCONFIG._serialized_end = 9165 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_start = 8996 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMALGORITHM._serialized_end = 9083 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_start = 9085 + _DECISIONCONFIGURATIONSTART_LLMCONFIG_LLMGOAL._serialized_end = 9165 + _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_start = 9167 + _DECISIONCONFIGURATIONSTART_BAYESIANOPTCONFIG._serialized_end = 9186 + _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_start = 9188 + _DECISIONCONFIGURATIONSTART_SENSITIVITYANALYSISCONFIG._serialized_end = 9215 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_start = 9218 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG._serialized_end = 9616 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_start = 9335 + _DECISIONCONFIGURATIONSTART_NEVERGRADCONFIG_NEVERGRADALGORITHM._serialized_end = 9616 + _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_start = 9618 + _DECISIONCONFIGURATIONSTART_SMCPYCONFIG._serialized_end = 9631 + _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_start = 9633 + _DECISIONCONFIGURATIONSTART_WORKLISTSCHEDULERCONFIG._serialized_end = 9658 + _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_start = 9661 + _DECISIONCONFIGURATIONSTART_CHOICECONFIG._serialized_end = 10601 + _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_start = 10603 + _DECISIONCONFIGURATIONSTART_CHOICECONFIGENTRY._serialized_end = 10710 + _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_start = 10713 + _DECISIONCONFIGURATIONSTART_ATTRPROPS._serialized_end = 10982 + _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_start = 10984 + _DECISIONCONFIGURATIONSTART_STATEATTRSENTRY._serialized_end = 11086 + _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_start = 11088 + _DECISIONCONFIGURATIONSTART_ACTIONATTRSENTRY._serialized_end = 11191 + _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_start = 11193 + _DECISIONCONFIGURATIONSTART_OUTCOMEATTRSENTRY._serialized_end = 11297 + _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_start = 11300 + _DECISIONCONFIGURATIONSTART_OPTIMIZERTYPE._serialized_end = 11534 + _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_start = 11536 + _DECISIONCONFIGURATIONSTART_DATATYPE._serialized_end = 11621 + _DECISIONPARAM._serialized_start = 11624 + _DECISIONPARAM._serialized_end = 11766 + _DECISIONPARAM_PARAMSENTRY._serialized_start = 11699 + _DECISIONPARAM_PARAMSENTRY._serialized_end = 11766 + _DECISIONMESSAGE._serialized_start = 11769 + _DECISIONMESSAGE._serialized_end = 11963 + _DECISIONPOINT._serialized_start = 11966 + _DECISIONPOINT._serialized_end = 12131 + _DECISIONOUTCOME._serialized_start = 12134 + _DECISIONOUTCOME._serialized_end = 12262 # @@protoc_insertion_point(module_scope) diff --git a/py/sight/utility.py b/py/sight/utility.py index 4d9b8eb..40e22e4 100644 --- a/py/sight/utility.py +++ b/py/sight/utility.py @@ -30,7 +30,7 @@ from sight.widgets.decision.resource_lock import RWLockDictWrapper from sight_service.proto import service_pb2 -POLL_LIMIT = 10 # POLL_TIME_INTERVAL th part of second +POLL_LIMIT = 300 # POLL_TIME_INTERVAL th part of second POLL_TIME_INTERVAL = 6 # seconds global_outcome_mapping = RWLockDictWrapper() diff --git a/py/sight/widgets/decision/decision.py b/py/sight/widgets/decision/decision.py index 9c8e572..5a4f077 100644 --- a/py/sight/widgets/decision/decision.py +++ b/py/sight/widgets/decision/decision.py @@ -164,6 +164,12 @@ _TRAINED_MODEL_LOG_ID = flags.DEFINE_string( 'sight_log_id', None, 'Sight log Id of trained run to be used') +_SERVER_QUEUE_BATCH_SIZE = flags.DEFINE_integer( + 'server_queue_batch_size', + 1, + 'batch size of the server queue for message queue', +) + _file_name = 'decision_actor.py' _sight_id = None _rewards = [] @@ -981,7 +987,7 @@ def configure_decision(sight, state_attrs, action_attrs, outcome_attrs): decision_configuration = sight_pb2.DecisionConfigurationStart() decision_configuration.optimizer_type = optimizer.obj.optimizer_type() - + decision_configuration.server_queue_batch_size = _SERVER_QUEUE_BATCH_SIZE.value or 1 if _NUM_TRIALS.value: decision_configuration.num_trials = _NUM_TRIALS.value diff --git a/py/sight/widgets/decision/proposal.py b/py/sight/widgets/decision/proposal.py index 3ece0c9..0880c33 100644 --- a/py/sight/widgets/decision/proposal.py +++ b/py/sight/widgets/decision/proposal.py @@ -13,6 +13,7 @@ # limitations under the License. import asyncio +from concurrent.futures import ThreadPoolExecutor import json from absl import flags @@ -65,6 +66,37 @@ async def fetch_outcome(sight_id, actions_id): raise e +async def asyncio_wrapper(blocking_func, *args, max_threads=-1): + """Wrapper to execute a blocking function using asyncio.to_thread. + + Parameters: + blocking_func (callable): The blocking function to execute. + *args: Positional arguments to pass to the blocking function. + max_threads (int): Number of threads for the custom ThreadPoolExecutor. + If -1, use the default executor. + + Returns: + The result of the blocking function. + """ + if max_threads != -1: + # Create a custom ThreadPoolExecutor + custom_executor = ThreadPoolExecutor(max_workers=max_threads, + thread_name_prefix="CustomThread") + try: + # Temporarily set the custom executor + loop = asyncio.get_running_loop() + loop.set_default_executor(custom_executor) + print(f"Using custom thread pool with max threads: {max_threads}") + return await asyncio.to_thread(blocking_func, *args) + finally: + # Shutdown the custom executor after usage + custom_executor.shutdown(wait=True) + else: + print("Using default thread pool") + # Use the default executor + return await asyncio.to_thread(blocking_func, *args) + + async def propose_actions(sight, action_dict, custom_part="sight_cache"): key_maker = CacheKeyMaker() @@ -80,7 +112,11 @@ async def propose_actions(sight, action_dict, custom_part="sight_cache"): print('Getting response from cache !!') return outcome - unique_action_id = decision.propose_actions(sight, action_dict) + # unique_action_id = decision.propose_actions(sight, action_dict) + # unique_action_id = await asyncio.to_thread(decision.propose_actions, sight, + # action_dict) + unique_action_id = await asyncio_wrapper(decision.propose_actions, sight, + action_dict) await push_message(sight.id, unique_action_id) response = await fetch_outcome(sight.id, unique_action_id) outcome = response.get('outcome', None) diff --git a/sight_service/message_queue.py b/sight_service/message_queue.py index a551bc2..6b42b14 100644 --- a/sight_service/message_queue.py +++ b/sight_service/message_queue.py @@ -2,10 +2,13 @@ import abc import copy +from datetime import datetime import enum +import json from typing import Any, Callable, Dict, Generic, Optional, Protocol, TypeVar import uuid +from google.cloud import storage from helpers.logs.logs_handler import logger as logging from overrides import overrides from readerwriterlock import rwlock @@ -158,6 +161,45 @@ def is_message_in_completed(self, message_id: ID) -> bool: ... +class MessageFlowLogger: + """Class to log message state transitions.""" + + def __init__(self): + self.logs = [] # List to store time-series logs + + def log_message_state(self, + state, + message_id, + worker_id=None, + message_details=None): + timestamp = datetime.utcnow().isoformat() + self.logs.append({ + "timestamp": timestamp, + "state": state, + "message_id": message_id, + "worker_id": worker_id, + "message_details": message_details, + }) + + def save_to_gcs(self): + logs_json = json.dumps(self.logs, indent=2) + + bucket_name = 'cameltrain-sight' + json_file_name = f"doing_mq_analysis/{datetime.utcnow().isoformat()}.json" + + # Initialize GCS client + client = storage.Client() + bucket = client.bucket(bucket_name) + blob = bucket.blob(json_file_name) + + blob.upload_from_string(logs_json, content_type="application/json") + + return f"gs://{bucket_name}/{json_file_name}" + + def get_logs(self): + return self.logs + + class MessageQueue(IMessageQueue[T]): """A message queue is a data structure that stores messages. @@ -212,6 +254,10 @@ def __init__( self.active_lock = lock_factory() self.completed_lock = lock_factory() + # logger + + self.logger = MessageFlowLogger() + def __str__(self) -> str: # all_messages = self.get_all_messages() messages_status = self.get_status() @@ -248,6 +294,9 @@ def push_message(self, message: T) -> ID: unique_id = self.id_generator.generate_id() with self.pending_lock.gen_wlock(): self.pending[unique_id] = message + + # log the message to logger + self.logger.log_message_state(state='pending', message_id=unique_id) return unique_id @overrides @@ -279,6 +328,12 @@ def create_active_batch(self, self.active[worker_id] = {} self.active[worker_id].update(batch) + ## log the messages to logger + for message_id in batch.keys(): + self.logger.log_message_state(state='active', + message_id=message_id, + worker_id=worker_id) + return batch @overrides @@ -305,6 +360,12 @@ def complete_message(self, with self.completed_lock.gen_wlock(): self.completed[message_id] = message + + ## log the message to logger + self.logger.log_message_state(state='completed', + message_id=message_id, + worker_id=worker_id) + else: raise ValueError( f'Failed while completing the msg ,as Message ID {message_id} not found for worker {worker_id}' diff --git a/sight_service/proto/api_descriptor.pb b/sight_service/proto/api_descriptor.pb index 94ac09d4b126a5b8338f7cd72490a766e4671fae..a97aa9ed4c7f3d238261b542a5dbe7c96ae78c6e 100644 GIT binary patch delta 1032 zcmYL{&1(};6vZC zG{5F;FIcU4;qBV7am%@3XE&ZX$17jz3IC@&lpX z5#l3g;msGN)a3UchMjO|I`)B_EXyu70}DRuHzy)vxpA zUk661fhcZrl>vyhO^ytYk--<^wt}hdlX{D*JHeZH*9`r7?nG`{~>%}7L{ zJYnaIU4O~1h{z4a343+_jxR(+PAE>9n*Lp%6-kXXi_o5OJ+)X-xZZ%3L>$T?JC~+w z4CRo$RoyjKT}MnS@MSE3ta(BA0YW@tuWAu(M_emk6cdVLrt(e@UIe^nw63lOs!#A1zwk`gOnI?xQE51e{Vi=lah3h8xhFVNhtgZt4*|l_lhsgXNC~O?lFA{1uwp=KRsH(X3yf6-?#uf%V+HL? evnMI3_`ov$3K-sfY4!xD)cw8D*S(P&z4{OJ@_=#x diff --git a/sight_service/service_root.py b/sight_service/service_root.py index 5e5e9c5..d3a9cd2 100644 --- a/sight_service/service_root.py +++ b/sight_service/service_root.py @@ -132,6 +132,7 @@ def launch(self, """Creates more specific optimizer and use them while responding to clients accordingly. """ optimizer_type = request.decision_config_params.optimizer_type + mq_batch_size = request.decision_config_params.server_queue_batch_size logging.debug(">>>>>>> In %s method of %s file. optimizer_type=%s", sys._getframe().f_code.co_name, os.path.basename(__file__), optimizer_type) @@ -171,7 +172,8 @@ def launch(self, obj = self.instances[request.client_id].launch(request) return obj elif optimizer_type == sight_pb2.DecisionConfigurationStart.OptimizerType.OT_WORKLIST_SCHEDULER: - self.instances[request.client_id] = WorklistScheduler() + self.instances[request.client_id] = WorklistScheduler( + meta_data={"mq_batch_size": mq_batch_size}) obj = self.instances[request.client_id].launch(request) return obj else: diff --git a/sight_service/single_action_optimizer.py b/sight_service/single_action_optimizer.py index e648464..d3dcd5f 100644 --- a/sight_service/single_action_optimizer.py +++ b/sight_service/single_action_optimizer.py @@ -75,7 +75,7 @@ class SingleActionOptimizer(OptimizerInstance): override while communicating with client. """ - def __init__(self): + def __init__(self, batch_size: int = 5): super().__init__() self.queue: IMessageQueue = MessageQueue[MessageDetails]( - id_generator=IncrementalUUID()) + id_generator=IncrementalUUID(), batch_size=batch_size) diff --git a/sight_service/worklist_scheduler_opt.py b/sight_service/worklist_scheduler_opt.py index ffd50a1..837d821 100644 --- a/sight_service/worklist_scheduler_opt.py +++ b/sight_service/worklist_scheduler_opt.py @@ -38,8 +38,9 @@ class WorklistScheduler(SingleActionOptimizer): of this attribute. """ - def __init__(self): - super().__init__() + def __init__(self, meta_data: dict[str, any]): + mq_batch_size = meta_data["mq_batch_size"] + super().__init__(batch_size=mq_batch_size) self.next_sample_to_issue = [] self.last_sample = False self.exp_completed = False @@ -192,10 +193,15 @@ def close(self, request: service_pb2.CloseRequest) -> service_pb2.CloseResponse: method_name = "close" logging.debug(">>>> In %s of %s", method_name, _file_name) + if not self.exp_completed: + logging.info("<<<