forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLegacyTHFunctionsCPU.cpp
93 lines (84 loc) · 3.53 KB
/
LegacyTHFunctionsCPU.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#include <ATen/LegacyTHFunctionsCPU.h>
#include <ATen/ATen.h>
#include <ATen/Utils.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/CPUGeneratorImpl.h>
#include <ATen/ExpandUtils.h>
#include <TH/TH.h>
#include <TH/THTensor.hpp>
namespace at {
namespace native {
namespace legacy {
namespace cpu {
namespace {
ScalarType infer_scalar_type(const Tensor & t) {
return t.scalar_type();
}
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
ScalarType infer_scalar_type(const TensorList & tl) {
TORCH_CHECK(tl.size() > 0, "expected a non-empty list of Tensors");
return tl[0].scalar_type();
}
TensorOptions options(ScalarType s) {
return TensorOptions().dtype(s)
.device(DeviceType::CPU)
.layout(kStrided);
}
Allocator* allocator() {
return getCPUAllocator();
}
}
Tensor & _th_histc_out(const Tensor & self, int64_t bins, const Scalar& min, const Scalar& max, Tensor & result) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_histc_out", false, DeviceType::CPU, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_histc_out", false, DeviceType::CPU, dispatch_scalar_type);
auto min_ = min.toDouble();
auto max_ = max.toDouble();
THDoubleTensor_histc(result_, self_, bins, min_, max_);
break;
}
case ScalarType::Float: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_histc_out", false, DeviceType::CPU, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_histc_out", false, DeviceType::CPU, dispatch_scalar_type);
auto min_ = min.toFloat();
auto max_ = max.toFloat();
THFloatTensor_histc(result_, self_, bins, min_, max_);
break;
}
default:
AT_ERROR("_th_histc_out not supported on CPUType for ", dispatch_scalar_type);
}
return result;
}
Tensor _th_histc(const Tensor & self, int64_t bins, const Scalar& min, const Scalar& max) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto result_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CPU, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto result = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(result_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_histc", false, DeviceType::CPU, dispatch_scalar_type);
auto min_ = min.toDouble();
auto max_ = max.toDouble();
THDoubleTensor_histc(result_, self_, bins, min_, max_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_histc", false, DeviceType::CPU, dispatch_scalar_type);
auto min_ = min.toFloat();
auto max_ = max.toFloat();
THFloatTensor_histc(result_, self_, bins, min_, max_);
break;
}
default:
AT_ERROR("_th_histc not supported on CPUType for ", dispatch_scalar_type);
}
return result;
}
} // namespace th
} // namespace legacy
} // namespace native
} // namespace at