Skip to content

Commit fa521a9

Browse files
wylimonai-bot
andauthored
fixes ci tests (#1029)
* fixes ci tests Signed-off-by: Wenqi Li <[email protected]> * remove temp tests Signed-off-by: Wenqi Li <[email protected]> * fixes type hint in imports Signed-off-by: Wenqi Li <[email protected]> * [MONAI] python code formatting Signed-off-by: monai-bot <[email protected]> * fixes min. dep. tests Signed-off-by: Wenqi Li <[email protected]> * exclude pil_reader in min_test Signed-off-by: Wenqi Li <[email protected]> * fixes build warnings Signed-off-by: Wenqi Li <[email protected]> * remove temp tests Signed-off-by: Wenqi Li <[email protected]> Co-authored-by: monai-bot <[email protected]>
1 parent 543e2a0 commit fa521a9

11 files changed

+75
-103
lines changed

.github/workflows/setupapp.yml

+1-2
Original file line numberDiff line numberDiff line change
@@ -124,11 +124,10 @@ jobs:
124124
name: Install torch cpu from pytorch.org (Windows only)
125125
run: |
126126
python -m pip install torch==1.4 -f https://download.pytorch.org/whl/cpu/torch_stable.html
127-
python -m pip install torchvision==0.5.0
128127
- name: Install the dependencies
129128
run: |
130129
# min. requirements for windows instances
131-
python -m pip install torch==1.4 torchvision==0.5.0
130+
python -m pip install torch==1.4
132131
python -c "f=open('requirements-dev.txt', 'r'); txt=f.readlines(); f.close(); print(txt); f=open('requirements-dev.txt', 'w'); f.writelines(txt[1:5]); f.close()"
133132
cat "requirements-dev.txt"
134133
python -m pip install -r requirements-dev.txt

monai/csrc/lltm/lltm.h

+5-20
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ limitations under the License.
1212
*/
1313

1414
#pragma once
15+
1516
#include <torch/extension.h>
1617
#include <vector>
1718
#include "utils/common_utils.h"
@@ -85,8 +86,8 @@ std::vector<torch::Tensor> lltm_backward(
8586
torch::Tensor candidate_cell,
8687
torch::Tensor X,
8788
torch::Tensor gate_weights,
88-
torch::Tensor weights){
89-
if(X.is_cuda()) {
89+
torch::Tensor weights) {
90+
if (X.is_cuda()) {
9091
#ifdef WITH_CUDA
9192
CHECK_CONTIGUOUS_CUDA(grad_h);
9293
CHECK_CONTIGUOUS_CUDA(grad_cell);
@@ -99,27 +100,11 @@ std::vector<torch::Tensor> lltm_backward(
99100
CHECK_CONTIGUOUS_CUDA(weights);
100101

101102
return lltm_cuda_backward(
102-
grad_h,
103-
grad_cell,
104-
new_cell,
105-
input_gate,
106-
output_gate,
107-
candidate_cell,
108-
X,
109-
gate_weights,
110-
weights);
103+
grad_h, grad_cell, new_cell, input_gate, output_gate, candidate_cell, X, gate_weights, weights);
111104
#else
112105
AT_ERROR("Not compiled with GPU support.");
113106
#endif
114107
}
115108
return lltm_cpu_backward(
116-
grad_h,
117-
grad_cell,
118-
new_cell,
119-
input_gate,
120-
output_gate,
121-
candidate_cell,
122-
X,
123-
gate_weights,
124-
weights);
109+
grad_h, grad_cell, new_cell, input_gate, output_gate, candidate_cell, X, gate_weights, weights);
125110
}

monai/csrc/lltm/lltm_cpu.cpp

+2-9
Original file line numberDiff line numberDiff line change
@@ -50,13 +50,7 @@ std::vector<torch::Tensor> lltm_cpu_forward(
5050
auto new_cell = old_cell + candidate_cell * input_gate;
5151
auto new_h = torch::tanh(new_cell) * output_gate;
5252

53-
return {new_h,
54-
new_cell,
55-
input_gate,
56-
output_gate,
57-
candidate_cell,
58-
X,
59-
gate_weights};
53+
return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gate_weights};
6054
}
6155

6256
std::vector<torch::Tensor> lltm_cpu_backward(
@@ -82,8 +76,7 @@ std::vector<torch::Tensor> lltm_cpu_backward(
8276
d_output_gate *= d_sigmoid(gates[1]);
8377
d_candidate_cell *= d_elu(gates[2]);
8478

85-
auto d_gates =
86-
torch::cat({d_input_gate, d_output_gate, d_candidate_cell}, /*dim=*/1);
79+
auto d_gates = torch::cat({d_input_gate, d_output_gate, d_candidate_cell}, /*dim=*/1);
8780

8881
auto d_weights = d_gates.t().mm(X);
8982
auto d_bias = d_gates.sum(/*dim=*/0, /*keepdim=*/true);

monai/csrc/lltm/lltm_cuda.cu

+48-54
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ See the License for the specific language governing permissions and
1111
limitations under the License.
1212
*/
1313

14-
#include <torch/extension.h>
1514
#include <cuda.h>
1615
#include <cuda_runtime.h>
16+
#include <torch/extension.h>
1717

1818
#include <vector>
1919

@@ -49,59 +49,53 @@ __device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) {
4949

5050
template <typename scalar_t>
5151
__global__ void lltm_cuda_forward_kernel(
52-
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates,
53-
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell,
54-
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h,
55-
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
56-
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
57-
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
58-
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) {
59-
//batch index
52+
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> gates,
53+
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> old_cell,
54+
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> new_h,
55+
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> new_cell,
56+
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> input_gate,
57+
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> output_gate,
58+
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> candidate_cell) {
59+
// batch index
6060
const int n = blockIdx.y;
6161
// column index
6262
const int c = blockIdx.x * blockDim.x + threadIdx.x;
63-
if (c < gates.size(2)){
63+
if (c < gates.size(2)) {
6464
input_gate[n][c] = sigmoid(gates[n][0][c]);
6565
output_gate[n][c] = sigmoid(gates[n][1][c]);
6666
candidate_cell[n][c] = elu(gates[n][2][c]);
67-
new_cell[n][c] =
68-
old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
67+
new_cell[n][c] = old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
6968
new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c];
7069
}
7170
}
7271

7372
template <typename scalar_t>
7473
__global__ void lltm_cuda_backward_kernel(
75-
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell,
76-
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates,
77-
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h,
78-
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell,
79-
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
80-
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
81-
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
82-
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell,
83-
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) {
84-
//batch index
74+
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> d_old_cell,
75+
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> d_gates,
76+
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_h,
77+
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_cell,
78+
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> new_cell,
79+
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> input_gate,
80+
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> output_gate,
81+
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> candidate_cell,
82+
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> gate_weights) {
83+
// batch index
8584
const int n = blockIdx.y;
8685
// column index
8786
const int c = blockIdx.x * blockDim.x + threadIdx.x;
88-
if (c < d_gates.size(2)){
87+
if (c < d_gates.size(2)) {
8988
const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c];
9089
const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c];
91-
const auto d_new_cell =
92-
d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
93-
90+
const auto d_new_cell = d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
9491

9592
d_old_cell[n][c] = d_new_cell;
9693
const auto d_candidate_cell = input_gate[n][c] * d_new_cell;
9794
const auto d_input_gate = candidate_cell[n][c] * d_new_cell;
9895

99-
d_gates[n][0][c] =
100-
d_input_gate * d_sigmoid(gate_weights[n][0][c]);
101-
d_gates[n][1][c] =
102-
d_output_gate * d_sigmoid(gate_weights[n][1][c]);
103-
d_gates[n][2][c] =
104-
d_candidate_cell * d_elu(gate_weights[n][2][c]);
96+
d_gates[n][0][c] = d_input_gate * d_sigmoid(gate_weights[n][0][c]);
97+
d_gates[n][1][c] = d_output_gate * d_sigmoid(gate_weights[n][1][c]);
98+
d_gates[n][2][c] = d_candidate_cell * d_elu(gate_weights[n][2][c]);
10599
}
106100
}
107101
} // namespace
@@ -128,16 +122,16 @@ std::vector<torch::Tensor> lltm_cuda_forward(
128122
const int threads = 1024;
129123
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
130124

131-
AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
132-
lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
133-
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
134-
old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
135-
new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
136-
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
137-
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
138-
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
139-
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>());
140-
}));
125+
AT_DISPATCH_FLOATING_TYPES(gates.scalar_type(), "lltm_forward_cuda", ([&] {
126+
lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
127+
gates.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
128+
old_cell.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
129+
new_h.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
130+
new_cell.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
131+
input_gate.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
132+
output_gate.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
133+
candidate_cell.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>());
134+
}));
141135

142136
return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
143137
}
@@ -161,18 +155,18 @@ std::vector<torch::Tensor> lltm_cuda_backward(
161155
const int threads = 1024;
162156
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
163157

164-
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
165-
lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
166-
d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
167-
d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
168-
grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
169-
grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
170-
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
171-
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
172-
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
173-
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
174-
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
175-
}));
158+
AT_DISPATCH_FLOATING_TYPES(X.scalar_type(), "lltm_forward_cuda", ([&] {
159+
lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
160+
d_old_cell.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
161+
d_gates.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
162+
grad_h.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
163+
grad_cell.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
164+
new_cell.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
165+
input_gate.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
166+
output_gate.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
167+
candidate_cell.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
168+
gates.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>());
169+
}));
176170

177171
auto d_gate_weights = d_gates.flatten(1, 2);
178172
auto d_weights = d_gate_weights.t().mm(X);

monai/csrc/utils/common_utils.h

+1-2
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,7 @@ limitations under the License.
1515
#include <torch/extension.h>
1616

1717
#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor.")
18-
#define CHECK_CONTIGUOUS(x) \
19-
TORCH_CHECK(x.is_contiguous(), #x " must be contiguous.")
18+
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous.")
2019
#define CHECK_CONTIGUOUS_CUDA(x) \
2120
CHECK_CUDA(x); \
2221
CHECK_CONTIGUOUS(x)

monai/data/image_reader.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def get_data(self):
160160
img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0]
161161
return img_array_, compatible_meta
162162

163-
def _get_meta_dict(self, img: Image) -> Dict:
163+
def _get_meta_dict(self, img) -> Dict:
164164
"""
165165
Get all the meta data of the image and convert to dict type.
166166
@@ -180,7 +180,7 @@ def _get_meta_dict(self, img: Image) -> Dict:
180180
meta_dict["direction"] = itk.array_from_matrix(img.GetDirection())
181181
return meta_dict
182182

183-
def _get_affine(self, img: Image) -> np.ndarray:
183+
def _get_affine(self, img) -> np.ndarray:
184184
"""
185185
Get or construct the affine matrix of the image, it can be used to correct
186186
spacing, orientation or execute spatial transforms.
@@ -201,7 +201,7 @@ def _get_affine(self, img: Image) -> np.ndarray:
201201
affine[(slice(-1), -1)] = origin
202202
return affine
203203

204-
def _get_spatial_shape(self, img: Image) -> Sequence:
204+
def _get_spatial_shape(self, img) -> Sequence:
205205
"""
206206
Get the spatial shape of image data, it doesn't contain the channel dim.
207207
@@ -211,7 +211,7 @@ def _get_spatial_shape(self, img: Image) -> Sequence:
211211
"""
212212
return list(itk.size(img))
213213

214-
def _get_array_data(self, img: Image) -> np.ndarray:
214+
def _get_array_data(self, img) -> np.ndarray:
215215
"""
216216
Get the raw array data of the image, converted to Numpy array.
217217
@@ -309,7 +309,7 @@ def get_data(self):
309309
img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0]
310310
return img_array_, compatible_meta
311311

312-
def _get_meta_dict(self, img: Nifti1Image) -> Dict:
312+
def _get_meta_dict(self, img) -> Dict:
313313
"""
314314
Get the all the meta data of the image and convert to dict type.
315315
@@ -319,7 +319,7 @@ def _get_meta_dict(self, img: Nifti1Image) -> Dict:
319319
"""
320320
return dict(img.header)
321321

322-
def _get_affine(self, img: Nifti1Image) -> np.ndarray:
322+
def _get_affine(self, img) -> np.ndarray:
323323
"""
324324
Get the affine matrix of the image, it can be used to correct
325325
spacing, orientation or execute spatial transforms.
@@ -330,7 +330,7 @@ def _get_affine(self, img: Nifti1Image) -> np.ndarray:
330330
"""
331331
return img.affine
332332

333-
def _get_spatial_shape(self, img: Nifti1Image) -> Sequence:
333+
def _get_spatial_shape(self, img) -> Sequence:
334334
"""
335335
Get the spatial shape of image data, it doesn't contain the channel dim.
336336
@@ -342,7 +342,7 @@ def _get_spatial_shape(self, img: Nifti1Image) -> Sequence:
342342
spatial_rank = min(ndim, 3)
343343
return list(img.header["dim"][1 : spatial_rank + 1])
344344

345-
def _get_array_data(self, img: Nifti1Image) -> np.ndarray:
345+
def _get_array_data(self, img) -> np.ndarray:
346346
"""
347347
Get the raw array data of the image, converted to Numpy array.
348348
@@ -521,7 +521,7 @@ def get_data(self):
521521
img_array_ = np.stack(img_array, axis=0) if len(img_array) > 1 else img_array[0]
522522
return img_array_, compatible_meta
523523

524-
def _get_meta_dict(self, img: PILImage.Image) -> Dict:
524+
def _get_meta_dict(self, img) -> Dict:
525525
"""
526526
Get the all the meta data of the image and convert to dict type.
527527
Args:
@@ -536,7 +536,7 @@ def _get_meta_dict(self, img: PILImage.Image) -> Dict:
536536
meta["info"] = img.info
537537
return meta
538538

539-
def _get_spatial_shape(self, img: PILImage.Image) -> Sequence:
539+
def _get_spatial_shape(self, img) -> Sequence:
540540
"""
541541
Get the spatial shape of image data, it doesn't contain the channel dim.
542542
Args:

tests/clang_format_utils.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,8 @@ def get_and_check_clang_format():
6161
PLATFORM_TO_CF_URL[HOST_PLATFORM], CLANG_FORMAT_PATH, PLATFORM_TO_HASH[HOST_PLATFORM], hash_type="sha1"
6262
)
6363
except Exception as e:
64-
print(f"Download {CLANG_FORMAT_PATH} failed: {e}.")
64+
print(f"Download {CLANG_FORMAT_PATH} failed: {e}")
65+
print(f"Please remove {CLANG_FORMAT_PATH} and retry.")
6566
return False
6667

6768
# Make sure the binary is executable.

tests/min_tests.py

+1
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ def run_testsuit():
5555
"test_orientationd",
5656
"test_parallel_execution",
5757
"test_persistentdataset",
58+
"test_pil_reader",
5859
"test_plot_2d_or_3d_image",
5960
"test_png_rw",
6061
"test_png_saver",

tests/test_decathlondataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def _test_dataset(dataset):
5151
print(str(e))
5252
if isinstance(e, RuntimeError):
5353
# FIXME: skip MD5 check as current downloading method may fail
54-
self.assertTrue(str(e).startswith("MD5 check"))
54+
self.assertTrue(str(e).startswith("md5 check"))
5555
return # skipping this test due the network connection errors
5656

5757
_test_dataset(data)

tests/test_download_and_extract.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def test_actions(self):
3232
print(str(e))
3333
if isinstance(e, RuntimeError):
3434
# FIXME: skip MD5 check as current downloading method may fail
35-
self.assertTrue(str(e).startswith("MD5 check"))
35+
self.assertTrue(str(e).startswith("md5 check"))
3636
return # skipping this test due the network connection errors
3737

3838
wrong_md5 = "0"
@@ -42,13 +42,13 @@ def test_actions(self):
4242
print(str(e))
4343
if isinstance(e, RuntimeError):
4444
# FIXME: skip MD5 check as current downloading method may fail
45-
self.assertTrue(str(e).startswith("MD5 check"))
45+
self.assertTrue(str(e).startswith("md5 check"))
4646
return # skipping this test due the network connection errors
4747

4848
try:
4949
extractall(filepath, output_dir, wrong_md5)
5050
except RuntimeError as e:
51-
self.assertTrue(str(e).startswith("MD5 check"))
51+
self.assertTrue(str(e).startswith("md5 check"))
5252

5353

5454
if __name__ == "__main__":

0 commit comments

Comments
 (0)