Skip to content

Fix performance of DPF vector #2249

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions src/ansys/dpf/core/check_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,30 +26,31 @@
Used to verify if the server version is a minimum value.
"""

from __future__ import annotations

from functools import wraps
import sys
import weakref

from ansys.dpf.core import errors as dpf_errors


def server_meet_version(required_version, server):
def server_meet_version(required_version, server: BaseServer):
"""Check if a given server version matches with a required version.

Parameters
----------
required_version : str
Required version to compare with the server version.
server : :class:`ansys.dpf.core.server`
server : :class:`ansys.dpf.core.server_types.BaseServer`
DPF server object.

Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
version = get_server_version(server)
return meets_version(version, required_version)
return server.meet_version(required_version)


def server_meet_version_and_raise(required_version, server, msg=None):
Expand Down
27 changes: 18 additions & 9 deletions src/ansys/dpf/core/server_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
import ansys.dpf.core as core
from ansys.dpf.core import __version__, errors, server_context, server_factory
from ansys.dpf.core._version import min_server_version, server_to_ansys_version
from ansys.dpf.core.check_version import server_meet_version
from ansys.dpf.core.check_version import get_server_version, meets_version, server_meet_version
from ansys.dpf.core.server_context import AvailableServerContexts, ServerContext
from ansys.dpf.gate import data_processing_grpcapi, load_api

Expand Down Expand Up @@ -444,6 +444,7 @@ def __init__(self):
self._context = None
self._info_instance = None
self._docker_config = server_factory.RunningDockerConfig()
self._server_meet_version = {}

def set_as_global(self, as_global=True):
"""Set the current server as global if necessary.
Expand Down Expand Up @@ -642,7 +643,11 @@ def meet_version(self, required_version):
bool
``True`` if the server version meets the requirement.
"""
return server_meet_version(required_version, self)
if required_version not in self._server_meet_version:
meet = meets_version(get_server_version(self), required_version)
self._server_meet_version[required_version] = meet
return meet
return self._server_meet_version[required_version]

@property
@abc.abstractmethod
Expand Down Expand Up @@ -1030,6 +1035,8 @@ def config(self):
class InProcessServer(CServer):
"""Server using the InProcess communication protocol."""

_version: str = None

def __init__(
self,
ansys_path: Union[str, None] = None,
Expand Down Expand Up @@ -1080,14 +1087,16 @@ def version(self):
version : str
The version of the InProcess server in the format "major.minor".
"""
from ansys.dpf.gate import data_processing_capi, integral_types
if self._version is None:
from ansys.dpf.gate import data_processing_capi, integral_types

api = data_processing_capi.DataProcessingCAPI
major = integral_types.MutableInt32()
minor = integral_types.MutableInt32()
api.data_processing_get_server_version(major, minor)
out = str(int(major)) + "." + str(int(minor))
return out
api = data_processing_capi.DataProcessingCAPI
major = integral_types.MutableInt32()
minor = integral_types.MutableInt32()
api.data_processing_get_server_version(major, minor)
out = str(int(major)) + "." + str(int(minor))
self._version = out
return self._version

@property
def os(self):
Expand Down
1 change: 0 additions & 1 deletion src/ansys/dpf/gate/dpf_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ def __new__(
obj = vec.np_array.view(cls)
except NotImplementedError as e:
raise TypeError(e.args)
vec.start_checking_modification()
obj.vec = vec
return obj

Expand Down
58 changes: 8 additions & 50 deletions src/ansys/dpf/gate/dpf_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,18 @@ class DPFVectorBase:

def __init__(self, owner, api):
self.dpf_vector_api = api
self._modified = False
self._check_changes = True

# The updated version of the DPF vector will always be committed to DPF.
# Ideally, this should be set to True only when modified, however this is not possible to do that efficiently.
# Consequently, for performance reasons, it's much better to always commit the vector to DPF rather than
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@cbellot000 is this also true in the case of a huge vector in gRPC on a remote machine with a high ping?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My point is that performance improvements reported are for gRPC communications with a local server, is that right?

# verifying whether the vector has changed. See issue #2201.
self._modified = True

try:
self._internal_obj = self.dpf_vector_api.dpf_vector_new_for_object(owner)
if not server_meet_version("4.1",
owner._server) and owner._server.client is None: # BUG in 22.2: DpfVector is not holding the data owner and not call to data owner should be done at delete
self._check_changes = False
self._modified = False
except ctypes.ArgumentError:
raise NotImplementedError

Expand All @@ -53,35 +58,6 @@ def internal_size(self) -> MutableInt32:
"""
return self._array.internal_size

def start_checking_modification(self) -> None:
"""
Takes a deep copy of the current data as a numpy array
in self._initial_data, if self._check_changes is set to True.
In that case, at deletion, the current data is compared to the initial one
and the data is updated server side if it has changed.

Notes
-----
self._check_changes is set to True by default when a client is added at the class init

"""
if self._check_changes:
self._initial_data = copy.deepcopy(self.np_array)

def has_changed(self):
"""
If self._check_changes is set to True, compares the initial data computed in
```start_checking_modification``` to the current one.

Notes
-----
self._check_changes is set to True by default when a client is added at the class init
"""
if self._check_changes:
if self._modified or not np.allclose(self._initial_data, self.np_array):
self._modified = True
return self._modified

@property
def np_array(self) -> np.ndarray:
"""
Expand All @@ -103,21 +79,6 @@ def size(self) -> int:
"""Size of the data array (returns a copy)"""
return int(self.internal_size)

def start_checking_modification(self) -> None:
"""
Takes a deep copy of the current data as a numpy array
in self._initial_data, if self._check_changes is set to True.
In that case, at deletion, the current data is compared to the initial one
and the data is updated server side if it has changed.

Notes
-----
self._check_changes is set to True by default when a client is added at the class init

"""
if self._check_changes:
self._initial_data = copy.deepcopy(self.np_array)

def has_changed(self):
"""
If self._check_changes is set to True, compares the initial data computed in
Expand All @@ -127,9 +88,6 @@ def has_changed(self):
-----
self._check_changes is set to True by default when a client is added at the class init
"""
if self._check_changes:
if self._modified or not np.allclose(self._initial_data, self.np_array):
self._modified = True
return self._modified

def __del__(self):
Expand Down
60 changes: 60 additions & 0 deletions tests/test_dpf_vector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# Copyright (C) 2020 - 2025 ANSYS, Inc. and/or its affiliates.
# SPDX-License-Identifier: MIT
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import numpy as np

from ansys.dpf import core as dpf
from ansys.dpf.core import fields_factory


def test_perf_vec_setters(server_type):
num_entities = int(1e5)
field = fields_factory.create_scalar_field(
num_entities=num_entities, location=dpf.locations.elemental, server=server_type
)
field.name = "my_field"
field.data = np.zeros(num_entities, dtype=np.int32)
field.scoping.ids = np.zeros(num_entities, dtype=np.int32)

all_indices = np.arange(num_entities)
chunks = np.array_split(all_indices, 200)

for index, chunk in enumerate(chunks):
field.data[chunk] = int(index)
field.scoping.ids[chunk] = chunk


def test_perf_vec_getters(server_type):
num_entities = int(1e5)
field = fields_factory.create_scalar_field(
num_entities=num_entities, location=dpf.locations.elemental, server=server_type
)
field.name = "my_field"
field.data = np.zeros(num_entities, dtype=np.int32)
field.scoping.ids = np.zeros(num_entities, dtype=np.int32)

all_indices = np.arange(num_entities)
chunks = np.array_split(all_indices, 200)

for index, chunk in enumerate(chunks):
d = field.data[chunk]
d = field.scoping.ids[chunk]
Loading