Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 41 additions & 17 deletions backends/apple/coreml/compiler/torch_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,24 +47,48 @@ def split_copy(context, node):
split(context, node)


@register_torch_op(
torch_alias=[
"dim_order_ops::_to_dim_order_copy",
"dim_order_ops._to_dim_order_copy",
],
override=False,
)
def _to_dim_order_copy(context, node):
dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0]
node.kwinputs.pop("dim_order")
def is_fbcode():
return not hasattr(_torch.version, "git_version")

# In CoreML, dim_order.val will be an ndarray, so we convert it to a list
dim_order = [int(d) for d in dim_order.val]
memory_format = get_memory_format(dim_order)
assert (
memory_format == _torch.contiguous_format
), "Only contiguous memory format is supported in CoreML"
to(context, node)

if not is_fbcode():
from coremltools.converters.mil.frontend.torch.dim_order_ops import (
_empty_dim_order,
_to_dim_order_copy,
)

# This is a temporary hack to register the alias "dim_order_ops._to_dim_order_copy",
# which was missed by coremltools
@register_torch_op(torch_alias=["dim_order_ops._to_dim_order_copy"], override=False)
def _to_dim_order_copy_TMP_EXECUTORCH_ALIAS_HACK(context, node):
_to_dim_order_copy(context, node)

# This is a temporary hack to register the alias "dim_order_ops._empty_dim_order",
# which was missed by coremltools
@register_torch_op(torch_alias=["dim_order_ops._empty_dim_order"], override=False)
def _empty_dim_order_TMP_EXECUTORCH_ALIAS_HACK(context, node):
_empty_dim_order(context, node)

else:
# TODO: remove this case when fbcode updates to coremltools 9.0
@register_torch_op(
torch_alias=[
"dim_order_ops::_to_dim_order_copy",
"dim_order_ops._to_dim_order_copy",
],
override=False,
)
def _to_dim_order_copy(context, node):
dim_order = _get_kwinputs(context, node, "dim_order", default=[None])[0]
node.kwinputs.pop("dim_order")

# In CoreML, dim_order.val will be an ndarray, so we convert it to a list
dim_order = [int(d) for d in dim_order.val]
memory_format = get_memory_format(dim_order)
assert (
memory_format == _torch.contiguous_format
), "Only contiguous memory format is supported in CoreML"
to(context, node)


# https://github.com/apple/coremltools/pull/2558
Expand Down
2 changes: 1 addition & 1 deletion backends/apple/coreml/scripts/install_requirements.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ SCRIPT_DIR_PATH="$(

# TODO(jathu): remove the need to fetch coremltools to build deps for coreml_executor_runner.
# Keep this version in sync with: pyproject.toml
COREMLTOOLS_VERSION="8.3"
COREMLTOOLS_VERSION="9.0b1"

red=`tput setaf 1`
green=`tput setaf 2`
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ dependencies=[
# See also third-party/TARGETS for buck's typing-extensions version.
"typing-extensions>=4.10.0",
# Keep this version in sync with: ./backends/apple/coreml/scripts/install_requirements.sh
"coremltools==8.3; platform_system == 'Darwin' or platform_system == 'Linux'",
"coremltools==9.0b1; platform_system == 'Darwin' or platform_system == 'Linux'",
# scikit-learn is used to support palettization in the coreml backend
"scikit-learn==1.7.1",
"hydra-core>=1.3.0",
Expand Down
Loading