Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions torch_xla/core/xla_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from torch_xla.distributed.spmd.xla_sharding import ShardingSpec
from torch_xla.distributed.xla_multiprocessing import create_optimized_replica_groups
import os
from torch_xla.experimental.deprecation import deprecated
import torch_xla._internal.utils as _utils

_DEVICES = xu.LazyProperty(lambda: torch_xla._XLAC._xla_get_devices())
Expand All @@ -41,15 +40,6 @@

from . import xla_model as this_module

xrt_world_size = deprecated(this_module, torch_xla.runtime.world_size,
'xrt_world_size() will be removed in release 2.7.')
get_ordinal = deprecated(
this_module, torch_xla.runtime.global_ordinal,
'xla_model.get_ordinal() will be removed in release 2.7.')
parse_xla_device = deprecated(
this_module, _utils.parse_xla_device,
'xla_model.parse_xla_device() will be removed in release 2.7.')


class DeviceContext(object):

Expand Down
3 changes: 1 addition & 2 deletions torch_xla/experimental/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from .eager import eager_mode, compile, is_eager_mode, eager_mode_context
from .eager import eager_mode, is_eager_mode, eager_mode_context

__all__ = [
"eager_mode",
"compile",
"is_eager_mode",
"eager_mode_context",
]
8 changes: 0 additions & 8 deletions torch_xla/experimental/eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,3 @@ def eager_mode_context(enable: bool):
yield saved_eager_mode
finally:
eager_mode(saved_eager_mode)


def compile(func):
# can's use deprecated wrapper at import time due to circular dependency
logging.warning(
'torch_xla.experimental.compile is deprecated. Use torch_xla.compile instead.'
)
return torch_xla.compile(func)
Loading