5
5
import chainer
6
6
from chainer .backends import cuda
7
7
from chainer import functions
8
- from chainer import gradient_check
9
8
from chainer import testing
10
9
from chainer .testing import attr
11
10
from chainer .utils import type_check
@@ -30,167 +29,120 @@ def _make_eye(shape):
30
29
'dtype' : [num
8000
py .float16 , numpy .float32 , numpy .float64 ],
31
30
'shape' : [(1 , 1 ), (5 , 5 )],
32
31
}))
33
- class InvFunctionTest (unittest .TestCase ):
32
+ @testing .fix_random ()
33
+ @testing .inject_backend_tests (
34
+ None ,
35
+ # CPU tests
36
+ [
37
+ {},
38
+ ]
39
+ # GPU tests
40
+ + testing .product ({
41
+ 'use_cuda' : [True ],
42
+ 'cuda_device' : [0 , 1 ],
43
+ })
44
+ # ChainerX tests
45
+ + testing .product ({
46
+ 'use_chainerx' : [True ],
47
+ 'chainerx_device' : ['native:0' , 'cuda:0' , 'cuda:1' ],
48
+ })
49
+ )
50
+ class InvFunctionTest (testing .FunctionTestCase ):
34
51
35
52
def setUp (self ):
36
- self .x = (numpy .eye (self .shape [- 1 ]) +
37
- numpy .random .uniform (- 0.01 , 0.01 , self .shape )).astype (
38
- self .dtype )
39
- self .gy = numpy .random .uniform (- 1 , 1 , self .shape ).astype (self .dtype )
40
- self .ggx = (numpy .eye (self .shape [- 1 ]) +
41
- numpy .random .uniform (- 0.01 , 0.01 , self .shape )).astype (
42
- self .dtype )
43
-
44
53
if self .dtype == numpy .float16 :
45
54
self .check_forward_dtype = numpy .float32
46
- self .check_forward_options = {'atol' : 1e-3 , 'rtol' : 1e-3 }
47
- self .check_backward_options = {
48
- 'dtype' : numpy .float64 , 'atol' : 1e-3 , 'rtol' : 1e-3 }
49
- self .check_double_backward_options = {
50
- 'dtype' : numpy .float64 , 'atol' : 5e-3 , 'rtol' : 5e-3 }
55
+ self .check_forward_options .update ({'atol' : 1e-3 , 'rtol' : 1e-3 })
56
+ self .check_backward_options .update ({'atol' : 1e-3 , 'rtol' : 1e-3 })
57
+ self .check_double_backward_options .update ({
58
+ 'atol' : 5e-3 , 'rtol' : 5e-3 })
51
59
else :
52
60
self .check_forward_dtype = self .dtype
53
- self .check_forward_options = {'atol' : 1e-4 , 'rtol' : 1e-4 }
54
- self .check_backward_options = {'atol' : 5e-4 , 'rtol' : 5e-4 }
55
- self .check_double_backward_options = { 'atol' : 5e-4 , 'rtol' : 5e-4 }
56
-
57
- def check_forward ( self , x_data ):
58
- x = chainer . Variable ( x_data )
59
- y = functions . inv ( x )
60
- x1 = self .x .astype (self .check_forward_dtype , copy = False )
61
- testing . assert_allclose ( _inv ( x1 ), y . data , ** self . check_forward_options )
62
-
63
- def check_backward (self , x_data , y_grad ):
64
- gradient_check . check_backward (
65
- functions .inv , x_data , y_grad , ** self . check_backward_options )
66
-
67
- def check_double_backward (self , x_data , y_grad , x_grad_grad ):
68
- gradient_check . check_double_backward (
69
- functions . inv , x_data , y_grad , x_grad_grad ,
70
- ** self .check_double_backward_options )
71
-
72
- def test_identity_cpu (self ):
73
- eye = _make_eye ( self .x . shape )
74
- x = chainer .Variable (self . x )
61
+ self .check_forward_options . update ( {'atol' : 1e-4 , 'rtol' : 1e-4 })
62
+ self .check_backward_options . update ( {'atol' : 5e-4 , 'rtol' : 5e-4 })
63
+ self .check_double_backward_options . update ({
64
+ 'atol' : 5e-4 , 'rtol' : 5e-4 })
65
+
66
+ def generate_inputs ( self ):
67
+ x = ( numpy . eye ( self . shape [ - 1 ]) +
68
+ numpy . random . uniform ( - 0.01 , 0.01 , self .shape )) .astype (self .dtype )
69
+ return x ,
70
+
71
+ def forward (self , inputs , device ):
72
+ x , = inputs
73
+ return functions .inv ( x ),
74
+
75
+ def forward_expected (self , inputs ):
76
+ x , = inputs
77
+ x1 = x . astype ( self . check_forward_dtype , copy = False )
78
+ return _inv ( x1 ). astype ( self .dtype ),
79
+
80
+ def test_identity (self , backend_config ):
81
+ x , = self .generate_inputs ( )
82
+ x = chainer .Variable (backend_config . get_array ( x ) )
75
83
y = functions .matmul (x , functions .inv (x ))
76
84
testing .assert_allclose (
77
- y .data , eye , ** self .check_forward_options )
78
-
79
- @attr .gpu
80
- def test_identity_gpu (self ):
81
- eye = cuda .to_gpu (_make_eye (self .x .shape ))
82
- x = chainer .Variable (cuda .to_gpu (self .x ))
83
- y = functions .matmul (x , functions .inv (x ))
84
- testing .assert_allclose (
85
- y .data , eye , ** self .check_forward_options )
86
-
87
- def test_forward_cpu (self ):
88
- self .check_forward (self .x )
89
-
90
- @attr .gpu
91
- def test_forward_gpu (self ):
92
- self .check_forward (cuda .to_gpu (self .x ))
93
-
94
- def test_backward_cpu (self ):
95
- self .check_backward (self .x , self .gy )
96
-
97
- @attr .gpu
98
- def test_backward_gpu (self ):
99
- self .check_backward (cuda .to_gpu (self .x ), cuda .to_gpu (self .gy ))
100
-
101
- def test_double_backward_cpu (self ):
102
- self .check_double_backward (self .x , self .gy , self .ggx )
103
-
104
- @attr .gpu
105
- def test_double_backward_gpu (self ):
106
- self .check_double_backward (
107
- cuda .to_gpu (self .x ),
108
- cuda .to_gpu (self .gy ),
109
- cuda .to_gpu (self .ggx ))
85
+ y .data , _make_eye (x .shape ), ** self .check_forward_options )
110
86
111
87
112
88
@testing .parameterize (* testing .product ({
113
89
'dtype' : [numpy .float16 , numpy .float32 , numpy .float64 ],
114
90
'shape' : [(5 , 1 , 1 ), (3 , 5 , 5 )],
115
91
}))
116
- class BatchInvFunctionTest (unittest .TestCase ):
92
+ @testing .fix_random ()
93
+ @testing .inject_backend_tests (
94
+ None ,
95
+ # CPU tests
96
+ [
97
+ {},
98
+ ]
99
+ # GPU tests
100
+ + testing .product ({
101
+ 'use_cuda' : [True ],
102
+ 'cuda_device' : [0 , 1 ],
103
+ })
104
+ # ChainerX tests
105
+ + testing .product ({
106
+ 'use_chainerx' : [True ],
107
+ 'chainerx_device' : ['native:0' , 'cuda:0' , 'cuda:1' ],
108
+ })
109
+ )
110
+ class BatchInvFunctionTest (testing .FunctionTestCase ):
117
111
118
112
def setUp (self ):
119
- self .x = (numpy .eye (self .shape [- 1 ]) +
120
- numpy .random .uniform (- 0.01 , 0.01 , self .shape )).astype (
121
- self .dtype )
122
- self .gy = numpy .random .uniform (- 1 , 1 , self .shape ).astype (self .dtype )
123
- self .ggx = (numpy .eye (self .shape [- 1 ]) +
124
- numpy .random .uniform (- 0.01 , 0.01 , self .shape )).astype (
125
- self .dtype )
126
-
127
113
if self .dtype == numpy .float16 :
128
114
F42D
self .check_forward_dtype = numpy .float32
129
- self .check_forward_options = {'atol' : 1e-3 , 'rtol' : 1e-3 }
130
- self .check_backward_options = {
131
- 'dtype' : numpy .float64 , 'atol' : 1e-3 , 'rtol' : 1e-3 }
132
- self .check_double_backward_options = {
133
- 'dtype' : numpy .float64 , 'atol' : 5e-3 , 'rtol' : 5e-3 }
115
+ self .check_forward_options .update ({'atol' : 1e-3 , 'rtol' : 1e-3 })
116
+ self .check_backward_options .update ({'atol' : 1e-3 , 'rtol' : 1e-3 })
117
+ self .check_double_backward_options .update ({
118
+ 'atol' : 5e-3 , 'rtol' : 5e-3 })
134
119
else :
135
120
self .check_forward_dtype = self .dtype
136
- self .check_forward_options = {'atol' : 1e-4 , 'rtol' : 1e-4 }
137
- self .check_backward_options = {'atol' : 5e-4 , 'rtol' : 5e-4 }
138
- self .check_double_backward_options = {'atol' : 1e-3 , 'rtol' : 1e-3 }
139
-
140
- def check_forward (self , x_data ):
141
- x = chainer .Variable (x_data )
142
- y = functions .batch_inv (x )
143
- x1 = self .x .astype (self .check_forward_dtype , copy = False )
144
- testing .assert_allclose (_inv (x1 ), y .data , ** self .check_forward_options )
145
-
146
- def check_backward (self , x_data , y_grad ):
147
- gradient_check .check_backward (
148
- functions .batch_inv , x_data , y_grad ,
149
- ** self .check_backward_options )
150
-
151
- def check_double_backward (self , x_data , y_grad , x_grad_grad ):
152
- gradient_check .check_double_backward (
153
- functions .batch_inv , x_data , y_grad , x_grad_grad ,
154
- ** self .check_double_backward_options )
155
-
156
- def test_identity_cpu (self ):
157
- eye = _make_eye (self .x .shape )
158
- x = chainer .Variable (self .x )
121
+ self .check_forward_options .update ({'atol' : 1e-4 , 'rtol' : 1e-4 })
122
+ self .check_backward_options .update ({'atol' : 5e-4 , 'rtol' : 5e-4 })
123
+ self .check_double_backward_options .update ({
124
+ 'atol' : 1e-3 , 'rtol' : 1e-3 })
125
+
126
+ def generate_inputs (self ):
127
+ x = (numpy .eye (self .shape [- 1 ]) +
128
+ numpy .random .uniform (- 0.01 , 0.01 , self .shape )).astype (self .dtype )
129
+ return x ,
130
+
131
+ def forward (self , inputs , device ):
132
+ x , = inputs
133
+ return functions .batch_inv (x ),
134
+
135
+ def forward_expected (self , inputs ):
136
+ x , = inputs
137
+ x1 = x .astype (self .check_forward_dtype , copy = False )
138
+ return _inv (x1 ).astype (self .dtype ),
139
+
140
+ def test_identity (self , backend_config ):
141
+ x , = self .generate_inputs ()
142
+ x = chainer .Variable (backend_config .get_array (x ))
159
143
y = functions .matmul (x , functions .batch_inv (x ))
160
144
testing .assert_allclose (
161
- y .data , eye , ** self .check_forward_options )
162
-
163
- @attr .gpu
164
- def test_identity_gpu (self ):
165
- eye = cuda .to_gpu (_make_eye (self .x .shape ))
166
- x = chainer .Variable (cuda .to_gpu (self .x ))
167
- y = functions .matmul (x , functions .batch_inv (x ))
168
- testing .assert_allclose (
169
- y .data , eye , ** self .check_forward_options )
170
-
171
- def test_forward_cpu (self ):
172
- self .check_forward (self .x )
173
-
174
- @attr .gpu
175
- def test_forward_gpu (self ):
176
- self .check_forward (cuda .to_gpu (self .x ))
177
-
178
- def test_backward_cpu (self ):
179
- self .check_backward (self .x , self .gy )
180
-
181
- @attr .gpu
182
- def test_backward_gpu (self ):
183
- self .check_backward (cuda .to_gpu (self .x ), cuda .to_gpu (self .gy ))
184
-
185
- def test_double_backward_cpu (self ):
186
- self .check_double_backward (self .x , self .gy , self .ggx )
187
-
188
- @attr .gpu
189
- def test_double_backward_gpu (self ):
190
- self .check_double_backward (
191
- cuda .to_gpu (self .x ),
192
- cuda .to_gpu (self .gy ),
193
- cuda .to_gpu (self .ggx ))
145
+ y .data , _make_eye (x .shape ), ** self .check_forward_options )
194
146
195
147
196
148
class InvFunctionRaiseTest (unittest .TestCase ):
0 commit comments