@@ -53,8 +53,8 @@ namespace {
5353at::Tensor to_meta (const at::Tensor& tensor) {
5454 // undefined tensors can't be converted to the meta device, since they don't
5555 // have sizes/strides
56- std::cout << " WONJOO: at aten_xla_type.cpp, to_meta1" << std::endl;
57- std::cout << " WONJOO: at aten_xla_type.cpp, to_meta2, tensor_is_functional=" << at::functionalization::impl::isFunctionalTensor (tensor) << std::endl;
56+ // std::cout << "WONJOO: at aten_xla_type.cpp, to_meta1" << std::endl;
57+ // std::cout << "WONJOO: at aten_xla_type.cpp, to_meta2, tensor_is_functional=" << at::functionalization::impl::isFunctionalTensor(tensor) << std::endl;
5858 if (!tensor.defined ()) return tensor;
5959 auto out = at::native::empty_strided_meta_symint (
6060 tensor.sym_sizes (), tensor.sym_strides (),
@@ -460,7 +460,7 @@ at::Tensor& XLANativeFunctions::_amp_update_scale_(at::Tensor& current_scale,
460460at::Tensor XLANativeFunctions::_copy_from (const at::Tensor& self,
461461 const at::Tensor& dst,
462462 bool non_blocking) {
463- std::cout << " WONJOO: at aten_xla_type.cpp, _copy_from" << std::endl;
463+ // std::cout << "WONJOO: at aten_xla_type.cpp, _copy_from" << std::endl;
464464 TORCH_LAZY_FN_COUNTER (" xla::" );
465465 auto dst_tensor = bridge::TryGetXlaTensor (dst);
466466 auto self_tensor = bridge::TryGetXlaTensor (self);
@@ -655,7 +655,7 @@ at::Tensor XLANativeFunctions::argmin(const at::Tensor& self,
655655at::Tensor XLANativeFunctions::as_strided_copy (
656656 const at::Tensor& self, at::IntArrayRef size, at::IntArrayRef stride,
657657 c10::optional<int64_t > storage_offset) {
658- std::cout << " WONJOO: at aten_xla_type.cpp, as_strided_copy1" << std::endl;
658+ // std::cout << "WONJOO: at aten_xla_type.cpp, as_strided_copy1" << std::endl;
659659 TORCH_LAZY_FN_COUNTER (" xla::" );
660660 XLATensorPtr self_tensor = bridge::GetXlaTensor (self);
661661 auto xsize = XlaHelpers::I64List (size);
@@ -675,7 +675,7 @@ at::Tensor XLANativeFunctions::as_strided_scatter(
675675 const at::Tensor& base, const at::Tensor& mutated_view,
676676 at::IntArrayRef size, at::IntArrayRef stride,
677677 c10::optional<int64_t > storage_offset) {
678- std::cout << " WONJOO: at aten_xla_type.cpp, as_strided_scatter1" << std::endl;
678+ // std::cout << "WONJOO: at aten_xla_type.cpp, as_strided_scatter1" << std::endl;
679679 TORCH_LAZY_FN_COUNTER (" xla::" );
680680 auto base_ = bridge::GetXlaTensor (base);
681681 auto xsize = XlaHelpers::I64List (size);
@@ -1026,7 +1026,7 @@ at::Tensor XLANativeFunctions::cumsum(const at::Tensor& self, int64_t dim,
10261026 tensor_methods::cumsum (self_tensor, dim, dtype));
10271027}
10281028
1029- // Let's rewrite a without reusing other native functions.
1029+ // TODO(alanwaketan): Let's rewrite a without reusing other native functions.
10301030at::Tensor XLANativeFunctions::detach_copy (const at::Tensor& self) {
10311031 TORCH_LAZY_FN_COUNTER (" xla::" );
10321032 auto new_tensor = empty_symint (self.sym_sizes (), c10::nullopt , c10::nullopt , c10::nullopt , c10::nullopt , c10::nullopt );
@@ -1160,7 +1160,7 @@ at::Tensor XLANativeFunctions::empty_symint(
11601160 c10::optional<at::Layout> layout, c10::optional<at::Device> device,
11611161 c10::optional<bool > pin_memory,
11621162 c10::optional<at::MemoryFormat> /* memory_format */ ) {
1163- std::cout << " WONJOO: at XLANativeFunctions::empty_symint" << std::endl;
1163+ // std::cout << "WONJOO: at XLANativeFunctions::empty_symint" << std::endl;
11641164 TORCH_LAZY_FN_COUNTER (" xla::" );
11651165 auto size = c10::asIntArrayRefSlow (sym_size);
11661166 // PT empty*() are optimizations to avoid initializing the data when it is
@@ -1394,8 +1394,8 @@ at::Tensor& XLANativeFunctions::index_fill_(at::Tensor& self, int64_t dim,
13941394at::Tensor& XLANativeFunctions::index_put_ (
13951395 at::Tensor& self, const c10::List<c10::optional<at::Tensor>>& indices,
13961396 const at::Tensor& values, bool accumulate) {
1397- std::cout << " WONJOO: at aten_xla_type.cpp, input_put_1" << std::endl;
1398- std::cout << " WONJOO: at aten_xla_type.cpp, input_put_2, self.is_functional=" << at::functionalization::impl::isFunctionalTensor (self) << std::endl;
1397+ // std::cout << "WONJOO: at aten_xla_type.cpp, input_put_1" << std::endl;
1398+ // std::cout << "WONJOO: at aten_xla_type.cpp, input_put_2, self.is_functional=" << at::functionalization::impl::isFunctionalTensor(self) << std::endl;
13991399 TORCH_LAZY_FN_COUNTER (" xla::" );
14001400 XLA_CHECK (self.scalar_type () == values.scalar_type ());
14011401 CanonicalIndexInfo canonical_index_info =
@@ -1414,7 +1414,7 @@ at::Tensor& XLANativeFunctions::index_put_(
14141414 canonical_index_info.start_dim ,
14151415 bridge::GetOrCreateXlaTensor (values, *device), accumulate,
14161416 canonical_index_info.result_permutation );
1417- std::cout << " WONJOO: at aten_xla_type.cpp, input_put_3, self.is_functional=" << at::functionalization::impl::isFunctionalTensor (self) << std::endl;
1417+ // std::cout << "WONJOO: at aten_xla_type.cpp, input_put_3, self.is_functional=" << at::functionalization::impl::isFunctionalTensor(self) << std::endl;
14181418 return self;
14191419}
14201420
@@ -1485,14 +1485,14 @@ at::Tensor XLANativeFunctions::lerp(const at::Tensor& self,
14851485}
14861486
14871487at::Tensor XLANativeFunctions::lift (const at::Tensor& tensor) {
1488- std::cout << " WONJOO: at XLANativeFunctions::lift" << std::endl;
1488+ // std::cout << "WONJOO: at XLANativeFunctions::lift" << std::endl;
14891489 TORCH_INTERNAL_ASSERT (
14901490 !at::functionalization::impl::isFunctionalTensor (tensor));
14911491 return at::functionalization::impl::to_functional_tensor (tensor);
14921492}
14931493
14941494at::Tensor XLANativeFunctions::lift_fresh (const at::Tensor& tensor) {
1495- std::cout << " WONJOO: at XLANativeFunctions::lift_fresh" << std::endl;
1495+ // std::cout << "WONJOO: at XLANativeFunctions::lift_fresh" << std::endl;
14961496 TORCH_INTERNAL_ASSERT (
14971497 !at::functionalization::impl::isFunctionalTensor (tensor));
14981498 return at::functionalization::impl::to_functional_tensor (tensor);
0 commit comments