8000 fix Softmax unit test · SciSharp/TensorFlow.NET@42ca73c · GitHub
[go: up one dir, main page]

Skip to content

Commit 42ca73c

Browse files
committed
fix Softmax unit test
1 parent 033fb7e commit 42ca73c

File tree

1 file changed

+78
-68
lines changed
  • < 8000 div class="PRIVATE_TreeView-item-container prc-TreeView-TreeViewItemContainer--2Rkn" style="--level:1">
    test/TensorFlowNET.Keras.UnitTest/Layers

1 file changed

+78
-68
lines changed

test/TensorFlowNET.Keras.UnitTest/Layers/ActivationTest.cs

Lines changed: 78 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -7,82 +7,92 @@
77
using Tensorflow;
88

99
namespace TensorFlowNET.Keras.UnitTest {
10-
[TestClass]
11-
public class ActivationTest : EagerModeTestBase {
12-
[TestMethod]
13-
public void LeakyReLU () {
14-
var layer = keras.layers.LeakyReLU();
15-
Tensor output = layer.Apply(np.array(-3.0f, -1.0f, 0.0f, 2.0f));
16< 8000 /code>-
Equal(new[] { -0.9f, -0.3f, 0.0f, 2.0f }, output.ToArray<float>());
17-
}
10+
[TestClass]
11+
public class ActivationTest : EagerModeTestBase
12+
{
13+
[TestMethod]
14+
public void LeakyReLU()
15+
{
16+
var layer = keras.layers.LeakyReLU();
17+
Tensor output = layer.Apply(np.array(-3.0f, -1.0f, 0.0f, 2.0f));
18+
Equal(new[] { -0.9f, -0.3f, 0.0f, 2.0f }, output.ToArray<float>());
19+
}
1820

19-
[TestMethod]
20-
public void ELU () {
21-
Tensors input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
22-
Tensor output = keras.layers.ELU().Apply(input);
23-
NDArray expected = new NDArray(new float[] { -0.0950213f, -0.08646648f, -0.06321206f, 0f, 1f, 2f });
24-
Assert.AreEqual(expected.numpy(), output.numpy());
25-
}
21+
[TestMethod]
22+
public void ELU()
23+
{
24+
Tensors input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
25+
Tensor output = keras.layers.ELU().Apply(input);
26+
NDArray expected = new NDArray(new float[] { -0.0950213f, -0.08646648f, -0.06321206f, 0f, 1f, 2f });
27+
Assert.AreEqual(expected.numpy(), output.numpy());
28+
}
2629

27-
[TestMethod]
28-
public void SELU () {
29-
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
30-
Tensor output = keras.layers.SELU().Apply(input);
31-
NDArray expected = new NDArray(new float[] { -1.6705688f, -1.5201665f, -1.1113307f, 0f, 1.050701f, 2.101402f });
32-
Assert.AreEqual(expected.numpy(), output.numpy());
33-
}
30+
[TestMethod]
31+
public void SELU()
32+
{
33+
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
34+
Tensor output = keras.layers.SELU().Apply(input);
35+
NDArray expected = new NDArray(new float[] { -1.6705688f, -1.5201665f, -1.1113307f, 0f, 1.050701f, 2.101402f });
36+
Assert.AreEqual(expected.numpy(), output.numpy());
37+
}
3438

35-
[TestMethod]
36-
public void Softmax () {
37-
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
38-
Tensor output = keras.layers.Softmax(new Axis(-1)).Apply(input);
39-
NDArray expected = new NDArray(new float[] { 0.0042697787f, 0.011606461f, 0.031549633f, 0.085760795f, 0.23312202f, 0.6336913f });
40-
Assert.AreEqual(expected.numpy(), output.numpy());
41-
}
39+
[TestMethod]
40+
public void Softmax()
41+
{
42+
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
43+
Tensor output = keras.layers.Softmax(new Axis(-1)).Apply(input);
44+
var expected = new float[] { 0.0042697787f, 0.011606461f, 0.031549633f, 0.085760795f, 0.23312202f, 0.6336913f };
45+
Assert.IsTrue(Equal(expected, output.ToArray<float>()));
46+
}
4247

43-
[TestMethod]
44-
public void Softplus () {
45-
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
46-
Tensor output = keras.layers.Softplus().Apply(input);
47-
NDArray expected = new NDArray(new float[] { 0.04858733f, 0.12692805f, 0.31326166f, 0.6931472f, 1.3132616f, 2.126928f });
48-
Assert.AreEqual(expected, output.numpy());
49-
}
48+
[TestMethod]
49+
public void Softplus()
50+
{
51+
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
52+
Tensor output = keras.layers.Softplus().Apply(input);
53+
NDArray expected = new NDArray(new float[] { 0.04858733f, 0.12692805f, 0.31326166f, 0.6931472f, 1.3132616f, 2.126928f });
54+
Assert.AreEqual(expected, output.numpy());
55+
}
5056

51-
[TestMethod]
52-
public void Softsign () {
53-
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
54-
Tensor output = keras.layers.Softsign().Apply(input);
55-
NDArray expected = new NDArray(new float[] { -0.75f, -0.66666667f, -0.5f, 0f, 0.5f, 0.66666667f });
56-
Assert.AreEqual(expected, output.numpy());
57-
}
57+
[TestMethod]
58+
public void Softsign()
59+
{
60+
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
61+
Tensor output = keras.layers.Softsign().Apply(input);
62+
NDArray expected = new NDArray(new float[] { -0.75f, -0.66666667f, -0.5f, 0f, 0.5f, 0.66666667f });
63+
Assert.AreEqual(expected, output.numpy());
64+
}
5865

5966

60-
[TestMethod]
61-
public void Exponential () {
62-
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
63-
Tensor output = keras.layers.Exponential().Apply(input);
64-
NDArray expected = new NDArray(new float[] { 0.049787067f, 0.13533528f, 0.36787945f, 1f, 2.7182817f, 7.389056f });
65-
Assert.AreEqual(expected, output.numpy());
66-
}
67+
[TestMethod]
68+
public void Exponential()
69+
{
70+
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
71+
Tensor output = keras.layers.Exponential().Apply(input);
72+
var expected = new float[] { 0.049787067f, 0.13533528f, 0.36787945f, 1f, 2.7182817f, 7.389056f };
73+
Assert.IsTrue(Equal(expected, output.ToArray<float>()));
74+
}
6775

68-
[TestMethod]
69-
public void HardSigmoid () {
70-
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
71-
Tensor output = keras.layers.HardSigmoid().Apply(input);
72-
// Note, this should be [0, 0.1, 0.3, 0.5, 0.7, 0.9]
73-
// But somehow the second element will have 0.099999994
74-
// Probably because there is an accuracy loss somewhere
75-
NDArray expected = new NDArray(new float[] { 0f, 0.099999994f, 0.3f, 0.5f, 0.7f, 0.9f });
76-
Assert.AreEqual(expected, output.numpy());
77-
}
76+
[TestMethod]
77+
public void HardSigmoid()
78+
{
79+
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
80+
Tensor output = keras.layers.HardSigmoid().Apply(input);
81+
// Note, this should be [0, 0.1, 0.3, 0.5, 0.7, 0.9]
82+
// But somehow the second element will have 0.099999994
83+
// Probably because there is an accuracy loss somewhere
84+
NDArray expected = new NDArray(new float[] { 0f, 0.099999994f, 0.3f, 0.5f, 0.7f, 0.9f });
85+
Assert.AreEqual(expected, output.numpy());
86+
}
7887

7988

80-
[TestMethod]
81-
public void Swish () {
82-
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
83-
Tensor output = keras.layers.Swish().Apply(input);
84-
NDArray expected = new NDArray(new float[] { -0.14227762f, -0.23840584f, -0.26894143f, 0f, 0.7310586f, 1.761594f });
85-
Assert.AreEqual(expected, output.numpy());
86-
}
87-
}
89+
[TestMethod]
90+
public void Swish()
91+
{
92+
Tensor input = tf.constant(new float[] { -3f, -2f, -1f, 0f, 1f, 2f });
93+
Tensor output = keras.layers.Swish().Apply(input);
94+
NDArray expected = new NDArray(new float[] { -0.14227762f, -0.23840584f, -0.26894143f, 0f, 0.7310586f, 1.761594f });
95+
Assert.AreEqual(expected, output.numpy());
96+
}
97+
}
8898
}

0 commit comments

Comments
 (0)
0