@@ -547,7 +547,7 @@ at::Tensor conv_forward_gather_scatter_cuda_latest(
547547
548548 // all gather
549549 AT_DISPATCH_FLOATING_TYPES_AND_HALF (
550- in_feat.type (), " conv_forward_gather_scatter_cuda" , ([&] {
550+ in_feat.scalar_type (), " conv_forward_gather_scatter_cuda" , ([&] {
551551 gather_all_kernel_pad_sep_with_mask<scalar_t >
552552 <<<ceil((double )(n_in_feats * n_in_channels) /
553553 (256 << (sizeof (scalar_t ) == 2 ) + 2 )),
@@ -779,7 +779,7 @@ at::Tensor conv_forward_gather_scatter_cuda_fallback(
779779 // gather n_active_feats dense features from N sparse input features with c
780780 // feature dimensions
781781 AT_DISPATCH_FLOATING_TYPES_AND_HALF (
782- in_feat.type (), " conv_forward_gather_scatter_cuda" , ([&] {
782+ in_feat.scalar_type (), " conv_forward_gather_scatter_cuda" , ([&] {
783783 gather_kernel<scalar_t >
784784 <<<ceil((double )(n_active_feats * n_in_channels) / 256 ), 256 >>> (
785785 n_active_feats, n_in_feats, n_in_channels,
@@ -796,7 +796,7 @@ at::Tensor conv_forward_gather_scatter_cuda_fallback(
796796 // scatter n_active_feats dense features into n_out_feats output features of
797797 // dimension n_out_channels
798798 AT_DISPATCH_FLOATING_TYPES_AND_HALF (
799- in_feat.type (), " conv_forward_gather_scatter_cuda" , ([&] {
799+ in_feat.scalar_type (), " conv_forward_gather_scatter_cuda" , ([&] {
800800 scatter_kernel<scalar_t >
801801 <<<ceil((double )(n_active_feats * n_out_channels) / 256 ), 256 >>> (
802802 n_active_feats, n_out_feats, n_out_channels,
@@ -877,7 +877,7 @@ void conv_backward_gather_scatter_cuda(at::Tensor in_feat, at::Tensor grad_in_fe
877877 }
878878 // gather
879879 AT_DISPATCH_FLOATING_TYPES_AND_HALF (
880- in_feat.type (), " conv_forward_gather_scatter_cuda" , ([&] {
880+ in_feat.scalar_type (), " conv_forward_gather_scatter_cuda" , ([&] {
881881 gather_kernel<scalar_t >
882882 <<<ceil((double )(n_active_feats * n_out_channels) / 256 ), 256 >>> (
883883 n_active_feats, n_out_feats, n_out_channels,
@@ -886,7 +886,7 @@ void conv_backward_gather_scatter_cuda(at::Tensor in_feat, at::Tensor grad_in_fe
886886 neighbor_map.data_ptr <int >() + cur_offset, !transpose);
887887 }));
888888 AT_DISPATCH_FLOATING_TYPES_AND_HALF (
889- in_feat.type (), " conv_forward_gather_scatter_cuda" , ([&] {
889+ in_feat.scalar_type (), " conv_forward_gather_scatter_cuda" , ([&] {
890890 gather_kernel<scalar_t >
891891 <<<ceil((double )(n_active_feats * n_in_channels) / 256 ), 256 >>> (
892892 n_active_feats, n_in_feats, n_in_channels,
@@ -902,7 +902,7 @@ void conv_backward_gather_scatter_cuda(at::Tensor in_feat, at::Tensor grad_in_fe
902902 out_grad_buffer_activated);
903903 // scatter
904904 AT_DISPATCH_FLOATING_TYPES_AND_HALF (
905- in_feat.type (), " conv_forward_gather_scatter_cuda" , ([&] {
905+ in_feat.scalar_type (), " conv_forward_gather_scatter_cuda" , ([&] {
906906 scatter_kernel<scalar_t >
907907 <<<ceil((double )(n_active_feats * n_in_channels) / 256 ), 256 >>> (
908908 n_active_feats, n_in_feats, n_in_channels,
0 commit comments