Skip to content

Commit ae482b4

Browse files
committed
Refine the comment
1 parent 58856ce commit ae482b4

File tree

2 files changed

+14
-14
lines changed

2 files changed

+14
-14
lines changed

torch_xla/csrc/aten_cpu_fallback.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ static std::unordered_map<std::string, ::xla::metrics::Counter*>
1717
_cpu_fallback_counters;
1818

1919
void xla_cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
20-
std::cout << "WONJOO: at aten_cpu_fallback.cpp, xla_cpu_fallback1"
21-
<< std::endl;
20+
// std::cout << "WONJOO: at aten_cpu_fallback.cpp, xla_cpu_fallback1"
21+
// << std::endl;
2222
XLA_FN_TRACK(3);
2323
const auto name = c10::toString(op.operator_name());
2424

torch_xla/csrc/aten_xla_type.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,8 @@ namespace {
5353
at::Tensor to_meta(const at::Tensor& tensor) {
5454
// undefined tensors can't be converted to the meta device, since they don't
5555
// have sizes/strides
56-
std::cout << "WONJOO: at aten_xla_type.cpp, to_meta1" << std::endl;
57-
std::cout << "WONJOO: at aten_xla_type.cpp, to_meta2, tensor_is_functional=" << at::functionalization::impl::isFunctionalTensor(tensor) << std::endl;
56+
// std::cout << "WONJOO: at aten_xla_type.cpp, to_meta1" << std::endl;
57+
// std::cout << "WONJOO: at aten_xla_type.cpp, to_meta2, tensor_is_functional=" << at::functionalization::impl::isFunctionalTensor(tensor) << std::endl;
5858
if (!tensor.defined()) return tensor;
5959
auto out = at::native::empty_strided_meta_symint(
6060
tensor.sym_sizes(), tensor.sym_strides(),
@@ -460,7 +460,7 @@ at::Tensor& XLANativeFunctions::_amp_update_scale_(at::Tensor& current_scale,
460460
at::Tensor XLANativeFunctions::_copy_from(const at::Tensor& self,
461461
const at::Tensor& dst,
462462
bool non_blocking) {
463-
std::cout << "WONJOO: at aten_xla_type.cpp, _copy_from" << std::endl;
463+
// std::cout << "WONJOO: at aten_xla_type.cpp, _copy_from" << std::endl;
464464
TORCH_LAZY_FN_COUNTER("xla::");
465465
auto dst_tensor = bridge::TryGetXlaTensor(dst);
466466
auto self_tensor = bridge::TryGetXlaTensor(self);
@@ -655,7 +655,7 @@ at::Tensor XLANativeFunctions::argmin(const at::Tensor& self,
655655
at::Tensor XLANativeFunctions::as_strided_copy(
656656
const at::Tensor& self, at::IntArrayRef size, at::IntArrayRef stride,
657657
c10::optional<int64_t> storage_offset) {
658-
std::cout << "WONJOO: at aten_xla_type.cpp, as_strided_copy1" << std::endl;
658+
// std::cout << "WONJOO: at aten_xla_type.cpp, as_strided_copy1" << std::endl;
659659
TORCH_LAZY_FN_COUNTER("xla::");
660660
XLATensorPtr self_tensor = bridge::GetXlaTensor(self);
661661
auto xsize = XlaHelpers::I64List(size);
@@ -675,7 +675,7 @@ at::Tensor XLANativeFunctions::as_strided_scatter(
675675
const at::Tensor& base, const at::Tensor& mutated_view,
676676
at::IntArrayRef size, at::IntArrayRef stride,
677677
c10::optional<int64_t> storage_offset) {
678-
std::cout << "WONJOO: at aten_xla_type.cpp, as_strided_scatter1" << std::endl;
678+
// std::cout << "WONJOO: at aten_xla_type.cpp, as_strided_scatter1" << std::endl;
679679
TORCH_LAZY_FN_COUNTER("xla::");
680680
auto base_ = bridge::GetXlaTensor(base);
681681
auto xsize = XlaHelpers::I64List(size);
@@ -1026,7 +1026,7 @@ at::Tensor XLANativeFunctions::cumsum(const at::Tensor& self, int64_t dim,
10261026
tensor_methods::cumsum(self_tensor, dim, dtype));
10271027
}
10281028

1029-
// Let's rewrite a without reusing other native functions.
1029+
// TODO(alanwaketan): Let's rewrite a without reusing other native functions.
10301030
at::Tensor XLANativeFunctions::detach_copy(const at::Tensor& self) {
10311031
TORCH_LAZY_FN_COUNTER("xla::");
10321032
auto new_tensor = empty_symint(self.sym_sizes(), c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt);
@@ -1160,7 +1160,7 @@ at::Tensor XLANativeFunctions::empty_symint(
11601160
c10::optional<at::Layout> layout, c10::optional<at::Device> device,
11611161
c10::optional<bool> pin_memory,
11621162
c10::optional<at::MemoryFormat> /* memory_format */) {
1163-
std::cout << "WONJOO: at XLANativeFunctions::empty_symint" << std::endl;
1163+
// std::cout << "WONJOO: at XLANativeFunctions::empty_symint" << std::endl;
11641164
TORCH_LAZY_FN_COUNTER("xla::");
11651165
auto size = c10::asIntArrayRefSlow(sym_size);
11661166
// PT empty*() are optimizations to avoid initializing the data when it is
@@ -1394,8 +1394,8 @@ at::Tensor& XLANativeFunctions::index_fill_(at::Tensor& self, int64_t dim,
13941394
at::Tensor& XLANativeFunctions::index_put_(
13951395
at::Tensor& self, const c10::List<c10::optional<at::Tensor>>& indices,
13961396
const at::Tensor& values, bool accumulate) {
1397-
std::cout << "WONJOO: at aten_xla_type.cpp, input_put_1" << std::endl;
1398-
std::cout << "WONJOO: at aten_xla_type.cpp, input_put_2, self.is_functional=" << at::functionalization::impl::isFunctionalTensor(self) << std::endl;
1397+
// std::cout << "WONJOO: at aten_xla_type.cpp, input_put_1" << std::endl;
1398+
// std::cout << "WONJOO: at aten_xla_type.cpp, input_put_2, self.is_functional=" << at::functionalization::impl::isFunctionalTensor(self) << std::endl;
13991399
TORCH_LAZY_FN_COUNTER("xla::");
14001400
XLA_CHECK(self.scalar_type() == values.scalar_type());
14011401
CanonicalIndexInfo canonical_index_info =
@@ -1414,7 +1414,7 @@ at::Tensor& XLANativeFunctions::index_put_(
14141414
canonical_index_info.start_dim,
14151415
bridge::GetOrCreateXlaTensor(values, *device), accumulate,
14161416
canonical_index_info.result_permutation);
1417-
std::cout << "WONJOO: at aten_xla_type.cpp, input_put_3, self.is_functional=" << at::functionalization::impl::isFunctionalTensor(self) << std::endl;
1417+
// std::cout << "WONJOO: at aten_xla_type.cpp, input_put_3, self.is_functional=" << at::functionalization::impl::isFunctionalTensor(self) << std::endl;
14181418
return self;
14191419
}
14201420

@@ -1485,14 +1485,14 @@ at::Tensor XLANativeFunctions::lerp(const at::Tensor& self,
14851485
}
14861486

14871487
at::Tensor XLANativeFunctions::lift(const at::Tensor& tensor) {
1488-
std::cout << "WONJOO: at XLANativeFunctions::lift" << std::endl;
1488+
// std::cout << "WONJOO: at XLANativeFunctions::lift" << std::endl;
14891489
TORCH_INTERNAL_ASSERT(
14901490
!at::functionalization::impl::isFunctionalTensor(tensor));
14911491
return at::functionalization::impl::to_functional_tensor(tensor);
14921492
}
14931493

14941494
at::Tensor XLANativeFunctions::lift_fresh(const at::Tensor& tensor) {
1495-
std::cout << "WONJOO: at XLANativeFunctions::lift_fresh" << std::endl;
1495+
// std::cout << "WONJOO: at XLANativeFunctions::lift_fresh" << std::endl;
14961496
TORCH_INTERNAL_ASSERT(
14971497
!at::functionalization::impl::isFunctionalTensor(tensor));
14981498
return at::functionalization::impl::to_functional_tensor(tensor);

0 commit comments

Comments
 (0)