Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 84293f0

Browse files
refactor(KDP): splitting more tests for layers
1 parent c188267 commit 84293f0

File tree

5 files changed

+159
-157
lines changed

5 files changed

+159
-157
lines changed
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import tensorflow as tf
2+
from kdp.layers.gated_linear_unit_layer import GatedLinearUnit
3+
4+
5+
class TestGatedLinearUnit(tf.test.TestCase):
6+
def setUp(self):
7+
super().setUp()
8+
self.batch_size = 32
9+
self.input_dim = 100
10+
self.units = 64
11+
self.layer = GatedLinearUnit(units=self.units)
12+
13+
def test_output_shape(self):
14+
inputs = tf.random.normal((self.batch_size, self.input_dim))
15+
outputs = self.layer(inputs)
16+
self.assertEqual(outputs.shape, (self.batch_size, self.units))
17+
18+
def test_gating_mechanism(self):
19+
# Test that outputs are bounded by the sigmoid gate
20+
inputs = tf.random.normal((self.batch_size, self.input_dim))
21+
outputs = self.layer(inputs)
22+
self.assertAllInRange(
23+
outputs, -10.0, 10.0
24+
) # Reasonable range for gated outputs
25+
26+
def test_serialization_basic(self):
27+
config = self.layer.get_config()
28+
new_layer = GatedLinearUnit.from_config(config)
29+
self.assertEqual(self.layer.units, new_layer.units)
30+
31+
def test_output_types(self):
32+
"""Test output types for GatedLinearUnit."""
33+
gl = GatedLinearUnit(units=64)
34+
inputs = tf.random.normal((32, 100))
35+
outputs = gl(inputs)
36+
37+
# Verify output is a tensor with correct dtype
38+
self.assertIsInstance(outputs, tf.Tensor)
39+
self.assertEqual(outputs.dtype, tf.float32)
40+
41+
def test_serialization_and_output_consistency(self):
42+
"""Test serialization and deserialization of GatedLinearUnit."""
43+
dummy_input = tf.random.normal((1, 100))
44+
45+
gl = GatedLinearUnit(units=64)
46+
gl(dummy_input) # This builds the layer
47+
48+
config = gl.get_config()
49+
gl_new = GatedLinearUnit.from_config(config)
50+
gl_new(dummy_input) # Build the new layer too
51+
52+
# Set the weights to be the same
53+
gl_new.set_weights(gl.get_weights())
54+
55+
# Test both layers produce the same output
56+
inputs = tf.random.normal((32, 100))
57+
self.assertAllClose(gl(inputs), gl_new(inputs))
Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
import tensorflow as tf
2+
from kdp.layers.gated_residual_network_layer import GatedResidualNetwork
3+
4+
5+
class TestGatedResidualNetwork(tf.test.TestCase):
6+
def setUp(self):
7+
super().setUp()
8+
self.batch_size = 32
9+
self.input_dim = 100
10+
self.units = 64
11+
self.dropout_rate = 0.2
12+
self.layer = GatedResidualNetwork(
13+
units=self.units, dropout_rate=self.dropout_rate
14+
)
15+
16+
def test_output_shape(self):
17+
inputs = tf.random.normal((self.batch_size, self.input_dim))
18+
outputs = self.layer(inputs)
19+
self.assertEqual(outputs.shape, (self.batch_size, self.units))
20+
21+
def test_residual_connection(self):
22+
"""Test that the layer can handle different input dimensions."""
23+
# Test with larger input dimension
24+
layer1 = GatedResidualNetwork(units=self.units, dropout_rate=self.dropout_rate)
25+
inputs = tf.random.normal((self.batch_size, self.input_dim))
26+
outputs = layer1(inputs)
27+
self.assertEqual(outputs.shape[-1], self.units)
28+
29+
# Test with matching dimensions (using a new layer instance)
30+
layer2 = GatedResidualNetwork(units=self.units, dropout_rate=self.dropout_rate)
31+
inputs = tf.random.normal((self.batch_size, self.units))
32+
outputs = layer2(inputs)
33+
self.assertEqual(outputs.shape, inputs.shape)
34+
35+
def test_dropout_behavior(self):
36+
inputs = tf.random.normal((self.batch_size, self.input_dim))
37+
38+
# Test training phase (dropout active)
39+
training_outputs = []
40+
for _ in range(5):
41+
outputs = self.layer(inputs, training=True)
42+
training_outputs.append(outputs)
43+
44+
# Outputs should be different during training due to dropout
45+
for i in range(len(training_outputs) - 1):
46+
self.assertNotAllClose(training_outputs[i], training_outputs[i + 1])
47+
48+
# Test inference phase (dropout inactive)
49+
inference_outputs = []
50+
for _ in range(5):
51+
outputs = self.layer(inputs, training=False)
52+
inference_outputs.append(outputs)
53+
54+
# Outputs should be identical during inference
55+
for i in range(len(inference_outputs) - 1):
56+
self.assertAllClose(inference_outputs[i], inference_outputs[i + 1])
57+
58+
def test_serialization_basic(self):
59+
config = self.layer.get_config()
60+
new_layer = GatedResidualNetwork.from_config(config)
61+
self.assertEqual(self.layer.units, new_layer.units)
62+
self.assertEqual(self.layer.dropout_rate, new_layer.dropout_rate)
63+
64+
def test_output_types(self):
65+
"""Test output types for GatedResidualNetwork."""
66+
batch_size = 32
67+
input_dim = 64
68+
dropout_rate = 0.5
69+
70+
grn = GatedResidualNetwork(units=input_dim, dropout_rate=dropout_rate)
71+
inputs = tf.random.normal((batch_size, input_dim))
72+
73+
outputs = grn(inputs)
74+
75+
# Verify output is a tensor with correct dtype
76+
self.assertIsInstance(outputs, tf.Tensor)
77+
self.assertEqual(outputs.dtype, tf.float32)
78+
79+
# Test with different input types
80+
inputs_int = tf.cast(inputs, tf.float32)
81+
outputs_from_int = grn(inputs_int)
82+
self.assertEqual(
83+
outputs_from_int.dtype, tf.float32
84+
) # Should always output float32
85+
86+
def test_serialization_and_output_consistency(self):
87+
"""Test serialization and deserialization of GatedResidualNetwork."""
88+
grn = GatedResidualNetwork(units=64, dropout_rate=0.3)
89+
# Build the layer first
90+
dummy_input = tf.random.normal((1, 64))
91+
grn(dummy_input)
92+
93+
config = grn.get_config()
94+
grn_new = GatedResidualNetwork.from_config(config)
95+
grn_new(dummy_input)
96+
97+
# Set the weights to be the same
98+
grn_new.set_weights(grn.get_weights())
99+
100+
# Test both layers produce the same output
101+
inputs = tf.random.normal((32, 64))
102+
self.assertAllClose(grn(inputs), grn_new(inputs))

test/test_feature_selection.py renamed to test/layers/test_variable_selection_layer.py

Lines changed: 0 additions & 157 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
"""Unit tests for feature selection layers."""
2-
31
import os
42
import tempfile
53

@@ -10,161 +8,6 @@
108
from kdp.layers.variable_selection_layer import VariableSelection
119

1210

13-
class TestGatedLinearUnit(tf.test.TestCase):
14-
def setUp(self):
15-
super().setUp()
16-
self.batch_size = 32
17-
self.input_dim = 100
18-
self.units = 64
19-
self.layer = GatedLinearUnit(units=self.units)
20-
21-
def test_output_shape(self):
22-
inputs = tf.random.normal((self.batch_size, self.input_dim))
23-
outputs = self.layer(inputs)
24-
self.assertEqual(outputs.shape, (self.batch_size, self.units))
25-
26-
def test_gating_mechanism(self):
27-
# Test that outputs are bounded by the sigmoid gate
28-
inputs = tf.random.normal((self.batch_size, self.input_dim))
29-
outputs = self.layer(inputs)
30-
self.assertAllInRange(
31-
outputs, -10.0, 10.0
32-
) # Reasonable range for gated outputs
33-
34-
def test_serialization_basic(self):
35-
config = self.layer.get_config()
36-
new_layer = GatedLinearUnit.from_config(config)
37-
self.assertEqual(self.layer.units, new_layer.units)
38-
39-
def test_output_types(self):
40-
"""Test output types for GatedLinearUnit."""
41-
gl = GatedLinearUnit(units=64)
42-
inputs = tf.random.normal((32, 100))
43-
outputs = gl(inputs)
44-
45-
# Verify output is a tensor with correct dtype
46-
self.assertIsInstance(outputs, tf.Tensor)
47-
self.assertEqual(outputs.dtype, tf.float32)
48-
49-
def test_serialization_and_output_consistency(self):
50-
"""Test serialization and deserialization of GatedLinearUnit."""
51-
dummy_input = tf.random.normal((1, 100))
52-
53-
gl = GatedLinearUnit(units=64)
54-
gl(dummy_input) # This builds the layer
55-
56-
config = gl.get_config()
57-
gl_new = GatedLinearUnit.from_config(config)
58-
gl_new(dummy_input) # Build the new layer too
59-
60-
# Set the weights to be the same
61-
gl_new.set_weights(gl.get_weights())
62-
63-
# Test both layers produce the same output
64-
inputs = tf.random.normal((32, 100))
65-
self.assertAllClose(gl(inputs), gl_new(inputs))
66-
67-
68-
class TestGatedResidualNetwork(tf.test.TestCase):
69-
def setUp(self):
70-
super().setUp()
71-
self.batch_size = 32
72-
self.input_dim = 100
73-
self.units = 64
74-
self.dropout_rate = 0.2
75-
self.layer = GatedResidualNetwork(
76-
units=self.units, dropout_rate=self.dropout_rate
77-
)
78-
79-
def test_output_shape(self):
80-
inputs = tf.random.normal((self.batch_size, self.input_dim))
81-
outputs = self.layer(inputs)
82-
self.assertEqual(outputs.shape, (self.batch_size, self.units))
83-
84-
def test_residual_connection(self):
85-
"""Test that the layer can handle different input dimensions."""
86-
# Test with larger input dimension
87-
layer1 = GatedResidualNetwork(units=self.units, dropout_rate=self.dropout_rate)
88-
inputs = tf.random.normal((self.batch_size, self.input_dim))
89-
outputs = layer1(inputs)
90-
self.assertEqual(outputs.shape[-1], self.units)
91-
92-
# Test with matching dimensions (using a new layer instance)
93-
layer2 = GatedResidualNetwork(units=self.units, dropout_rate=self.dropout_rate)
94-
inputs = tf.random.normal((self.batch_size, self.units))
95-
outputs = layer2(inputs)
96-
self.assertEqual(outputs.shape, inputs.shape)
97-
98-
def test_dropout_behavior(self):
99-
inputs = tf.random.normal((self.batch_size, self.input_dim))
100-
101-
# Test training phase (dropout active)
102-
training_outputs = []
103-
for _ in range(5):
104-
outputs = self.layer(inputs, training=True)
105-
training_outputs.append(outputs)
106-
107-
# Outputs should be different during training due to dropout
108-
for i in range(len(training_outputs) - 1):
109-
self.assertNotAllClose(training_outputs[i], training_outputs[i + 1])
110-
111-
# Test inference phase (dropout inactive)
112-
inference_outputs = []
113-
for _ in range(5):
114-
outputs = self.layer(inputs, training=False)
115-
inference_outputs.append(outputs)
116-
117-
# Outputs should be identical during inference
118-
for i in range(len(inference_outputs) - 1):
119-
self.assertAllClose(inference_outputs[i], inference_outputs[i + 1])
120-
121-
def test_serialization_basic(self):
122-
config = self.layer.get_config()
123-
new_layer = GatedResidualNetwork.from_config(config)
124-
self.assertEqual(self.layer.units, new_layer.units)
125-
self.assertEqual(self.layer.dropout_rate, new_layer.dropout_rate)
126-
127-
def test_output_types(self):
128-
"""Test output types for GatedResidualNetwork."""
129-
batch_size = 32
130-
input_dim = 64
131-
dropout_rate = 0.5
132-
133-
grn = GatedResidualNetwork(units=input_dim, dropout_rate=dropout_rate)
134-
inputs = tf.random.normal((batch_size, input_dim))
135-
136-
outputs = grn(inputs)
137-
138-
# Verify output is a tensor with correct dtype
139-
self.assertIsInstance(outputs, tf.Tensor)
140-
self.assertEqual(outputs.dtype, tf.float32)
141-
142-
# Test with different input types
143-
inputs_int = tf.cast(inputs, tf.float32)
144-
outputs_from_int = grn(inputs_int)
145-
self.assertEqual(
146-
outputs_from_int.dtype, tf.float32
147-
) # Should always output float32
148-
149-
def test_serialization_and_output_consistency(self):
150-
"""Test serialization and deserialization of GatedResidualNetwork."""
151-
grn = GatedResidualNetwork(units=64, dropout_rate=0.3)
152-
# Build the layer first
153-
dummy_input = tf.random.normal((1, 64))
154-
grn(dummy_input)
155-
156-
config = grn.get_config()
157-
grn_new = GatedResidualNetwork.from_config(config)
158-
grn_new(dummy_input)
159-
160-
# Set the weights to be the same
161-
grn_new.set_weights(grn.get_weights())
162-
163-
# Test both layers produce the same output
164-
inputs = tf.random.normal((32, 64))
165-
self.assertAllClose(grn(inputs), grn_new(inputs))
166-
167-
16811
class TestVariableSelection(tf.test.TestCase):
16912
def setUp(self):
17013
super().setUp()

0 commit comments

Comments
 (0)