|
1 | | -"""Unit tests for feature selection layers.""" |
2 | | - |
3 | 1 | import os |
4 | 2 | import tempfile |
5 | 3 |
|
|
10 | 8 | from kdp.layers.variable_selection_layer import VariableSelection |
11 | 9 |
|
12 | 10 |
|
13 | | -class TestGatedLinearUnit(tf.test.TestCase): |
14 | | - def setUp(self): |
15 | | - super().setUp() |
16 | | - self.batch_size = 32 |
17 | | - self.input_dim = 100 |
18 | | - self.units = 64 |
19 | | - self.layer = GatedLinearUnit(units=self.units) |
20 | | - |
21 | | - def test_output_shape(self): |
22 | | - inputs = tf.random.normal((self.batch_size, self.input_dim)) |
23 | | - outputs = self.layer(inputs) |
24 | | - self.assertEqual(outputs.shape, (self.batch_size, self.units)) |
25 | | - |
26 | | - def test_gating_mechanism(self): |
27 | | - # Test that outputs are bounded by the sigmoid gate |
28 | | - inputs = tf.random.normal((self.batch_size, self.input_dim)) |
29 | | - outputs = self.layer(inputs) |
30 | | - self.assertAllInRange( |
31 | | - outputs, -10.0, 10.0 |
32 | | - ) # Reasonable range for gated outputs |
33 | | - |
34 | | - def test_serialization_basic(self): |
35 | | - config = self.layer.get_config() |
36 | | - new_layer = GatedLinearUnit.from_config(config) |
37 | | - self.assertEqual(self.layer.units, new_layer.units) |
38 | | - |
39 | | - def test_output_types(self): |
40 | | - """Test output types for GatedLinearUnit.""" |
41 | | - gl = GatedLinearUnit(units=64) |
42 | | - inputs = tf.random.normal((32, 100)) |
43 | | - outputs = gl(inputs) |
44 | | - |
45 | | - # Verify output is a tensor with correct dtype |
46 | | - self.assertIsInstance(outputs, tf.Tensor) |
47 | | - self.assertEqual(outputs.dtype, tf.float32) |
48 | | - |
49 | | - def test_serialization_and_output_consistency(self): |
50 | | - """Test serialization and deserialization of GatedLinearUnit.""" |
51 | | - dummy_input = tf.random.normal((1, 100)) |
52 | | - |
53 | | - gl = GatedLinearUnit(units=64) |
54 | | - gl(dummy_input) # This builds the layer |
55 | | - |
56 | | - config = gl.get_config() |
57 | | - gl_new = GatedLinearUnit.from_config(config) |
58 | | - gl_new(dummy_input) # Build the new layer too |
59 | | - |
60 | | - # Set the weights to be the same |
61 | | - gl_new.set_weights(gl.get_weights()) |
62 | | - |
63 | | - # Test both layers produce the same output |
64 | | - inputs = tf.random.normal((32, 100)) |
65 | | - self.assertAllClose(gl(inputs), gl_new(inputs)) |
66 | | - |
67 | | - |
68 | | -class TestGatedResidualNetwork(tf.test.TestCase): |
69 | | - def setUp(self): |
70 | | - super().setUp() |
71 | | - self.batch_size = 32 |
72 | | - self.input_dim = 100 |
73 | | - self.units = 64 |
74 | | - self.dropout_rate = 0.2 |
75 | | - self.layer = GatedResidualNetwork( |
76 | | - units=self.units, dropout_rate=self.dropout_rate |
77 | | - ) |
78 | | - |
79 | | - def test_output_shape(self): |
80 | | - inputs = tf.random.normal((self.batch_size, self.input_dim)) |
81 | | - outputs = self.layer(inputs) |
82 | | - self.assertEqual(outputs.shape, (self.batch_size, self.units)) |
83 | | - |
84 | | - def test_residual_connection(self): |
85 | | - """Test that the layer can handle different input dimensions.""" |
86 | | - # Test with larger input dimension |
87 | | - layer1 = GatedResidualNetwork(units=self.units, dropout_rate=self.dropout_rate) |
88 | | - inputs = tf.random.normal((self.batch_size, self.input_dim)) |
89 | | - outputs = layer1(inputs) |
90 | | - self.assertEqual(outputs.shape[-1], self.units) |
91 | | - |
92 | | - # Test with matching dimensions (using a new layer instance) |
93 | | - layer2 = GatedResidualNetwork(units=self.units, dropout_rate=self.dropout_rate) |
94 | | - inputs = tf.random.normal((self.batch_size, self.units)) |
95 | | - outputs = layer2(inputs) |
96 | | - self.assertEqual(outputs.shape, inputs.shape) |
97 | | - |
98 | | - def test_dropout_behavior(self): |
99 | | - inputs = tf.random.normal((self.batch_size, self.input_dim)) |
100 | | - |
101 | | - # Test training phase (dropout active) |
102 | | - training_outputs = [] |
103 | | - for _ in range(5): |
104 | | - outputs = self.layer(inputs, training=True) |
105 | | - training_outputs.append(outputs) |
106 | | - |
107 | | - # Outputs should be different during training due to dropout |
108 | | - for i in range(len(training_outputs) - 1): |
109 | | - self.assertNotAllClose(training_outputs[i], training_outputs[i + 1]) |
110 | | - |
111 | | - # Test inference phase (dropout inactive) |
112 | | - inference_outputs = [] |
113 | | - for _ in range(5): |
114 | | - outputs = self.layer(inputs, training=False) |
115 | | - inference_outputs.append(outputs) |
116 | | - |
117 | | - # Outputs should be identical during inference |
118 | | - for i in range(len(inference_outputs) - 1): |
119 | | - self.assertAllClose(inference_outputs[i], inference_outputs[i + 1]) |
120 | | - |
121 | | - def test_serialization_basic(self): |
122 | | - config = self.layer.get_config() |
123 | | - new_layer = GatedResidualNetwork.from_config(config) |
124 | | - self.assertEqual(self.layer.units, new_layer.units) |
125 | | - self.assertEqual(self.layer.dropout_rate, new_layer.dropout_rate) |
126 | | - |
127 | | - def test_output_types(self): |
128 | | - """Test output types for GatedResidualNetwork.""" |
129 | | - batch_size = 32 |
130 | | - input_dim = 64 |
131 | | - dropout_rate = 0.5 |
132 | | - |
133 | | - grn = GatedResidualNetwork(units=input_dim, dropout_rate=dropout_rate) |
134 | | - inputs = tf.random.normal((batch_size, input_dim)) |
135 | | - |
136 | | - outputs = grn(inputs) |
137 | | - |
138 | | - # Verify output is a tensor with correct dtype |
139 | | - self.assertIsInstance(outputs, tf.Tensor) |
140 | | - self.assertEqual(outputs.dtype, tf.float32) |
141 | | - |
142 | | - # Test with different input types |
143 | | - inputs_int = tf.cast(inputs, tf.float32) |
144 | | - outputs_from_int = grn(inputs_int) |
145 | | - self.assertEqual( |
146 | | - outputs_from_int.dtype, tf.float32 |
147 | | - ) # Should always output float32 |
148 | | - |
149 | | - def test_serialization_and_output_consistency(self): |
150 | | - """Test serialization and deserialization of GatedResidualNetwork.""" |
151 | | - grn = GatedResidualNetwork(units=64, dropout_rate=0.3) |
152 | | - # Build the layer first |
153 | | - dummy_input = tf.random.normal((1, 64)) |
154 | | - grn(dummy_input) |
155 | | - |
156 | | - config = grn.get_config() |
157 | | - grn_new = GatedResidualNetwork.from_config(config) |
158 | | - grn_new(dummy_input) |
159 | | - |
160 | | - # Set the weights to be the same |
161 | | - grn_new.set_weights(grn.get_weights()) |
162 | | - |
163 | | - # Test both layers produce the same output |
164 | | - inputs = tf.random.normal((32, 64)) |
165 | | - self.assertAllClose(grn(inputs), grn_new(inputs)) |
166 | | - |
167 | | - |
168 | 11 | class TestVariableSelection(tf.test.TestCase): |
169 | 12 | def setUp(self): |
170 | 13 | super().setUp() |
|
0 commit comments