Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Reduce the time of keras unittest. #999

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,20 +1,10 @@
using Microsoft.VisualStudio.TestTools.UnitTesting;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Saving.SavedModel;
using Tensorflow.Keras.Losses;
using Tensorflow.Keras.Metrics;
using Tensorflow;
using Tensorflow.Keras.Optimizers;
using static Tensorflow.KerasApi;
using Tensorflow.NumPy;
using Tensorflow.Keras.UnitTest.Helpers;
using static TensorFlowNET.Keras.UnitTest.SaveModel.SequentialModelSave;
using Tensorflow.NumPy;
using static Tensorflow.Binding;

namespace TensorFlowNET.Keras.UnitTest.SaveModel;

Expand All @@ -24,10 +14,10 @@ public class SequentialModelLoad
[TestMethod]
public void SimpleModelFromAutoCompile()
{
var model = keras.models.load_model(@"Assets/simple_model_from_auto_compile");
var model = tf.keras.models.load_model(@"Assets/simple_model_from_auto_compile");
model.summary();

model.compile(new Adam(0.0001f), new LossesApi().SparseCategoricalCrossentropy(), new string[] { "accuracy" });
model.compile(new Adam(0.0001f), tf.keras.losses.SparseCategoricalCrossentropy(), new string[] { "accuracy" });

// check the weights
var kernel1 = np.load(@"Assets/simple_model_from_auto_compile/kernel1.npy");
Expand All @@ -44,7 +34,7 @@ public void SimpleModelFromAutoCompile()
{
TrainDir = "mnist",
OneHot = false,
ValidationSize = 50000,
ValidationSize = 58000,
}).Result;

model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size, num_epochs);
Expand All @@ -54,10 +44,10 @@ public void SimpleModelFromAutoCompile()
public void AlexnetFromSequential()
{
new SequentialModelSave().AlexnetFromSequential();
var model = keras.models.load_model(@"./alexnet_from_sequential");
var model = tf.keras.models.load_model(@"./alexnet_from_sequential");
model.summary();

model.compile(new Adam(0.001f), new LossesApi().SparseCategoricalCrossentropy(from_logits: true), new string[] { "accuracy" });
model.compile(new Adam(0.001f), tf.keras.losses.SparseCategoricalCrossentropy(from_logits: true), new string[] { "accuracy" });

var num_epochs = 1;
var batch_size = 8;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
using Microsoft.VisualStudio.TestTools.UnitTesting;
using System.Collections.Generic;
using System.Diagnostics;
using Tensorflow;
using Tensorflow.Keras;
using Tensorflow.Keras.Engine;
using Tensorflow.Keras.Losses;
using Tensorflow.Keras.Optimizers;
using Tensorflow.Keras.UnitTest.Helpers;
using static Tensorflow.Binding;
Expand All @@ -25,8 +23,8 @@ public void SimpleModelFromAutoCompile()
var outputs = tf.keras.layers.Softmax(axis: 1).Apply(x);
var model = tf.keras.Model(inputs, outputs);

model.compile(new Adam(0.001f),
tf.keras.losses.SparseCategoricalCrossentropy(),
model.compile(new Adam(0.001f),
tf.keras.losses.SparseCategoricalCrossentropy(),
new string[] { "accuracy" });

var data_loader = new MnistModelLoader();
Expand All @@ -37,7 +35,7 @@ public void SimpleModelFromAutoCompile()
{
TrainDir = "mnist",
OneHot = false,
ValidationSize = 10000,
ValidationSize = 58000,
}).Result;

model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size, num_epochs);
Expand All @@ -48,18 +46,18 @@ public void SimpleModelFromAutoCompile()
[TestMethod]
public void SimpleModelFromSequential()
{
Model model = KerasApi.keras.Sequential(new List<ILayer>()
Model model = keras.Sequential(new List<ILayer>()
{
keras.layers.InputLayer((28, 28, 1)),
keras.layers.Flatten(),
keras.layers.Dense(100, "relu"),
keras.layers.Dense(10),
keras.layers.Softmax()
tf.keras.layers.InputLayer((28, 28, 1)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, "relu"),
tf.keras.layers.Dense(10),
tf.keras.layers.Softmax()
});

model.summary();

model.compile(new Adam(0.001f), new LossesApi().SparseCategoricalCrossentropy(), new string[] { "accuracy" });
model.compile(new Adam(0.001f), tf.keras.losses.SparseCategoricalCrossentropy(), new string[] { "accuracy" });

var data_loader = new MnistModelLoader();
var num_epochs = 1;
Expand All @@ -69,7 +67,7 @@ public void SimpleModelFromSequential()
{
TrainDir = "mnist",
OneHot = false,
ValidationSize = 50000,
ValidationSize = 58000,
}).Result;

model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size, num_epochs);
Expand All @@ -80,39 +78,39 @@ public void SimpleModelFromSequential()
[TestMethod]
public void AlexnetFromSequential()
{
Model model = KerasApi.keras.Sequential(new List<ILayer>()
Model model = keras.Sequential(new List<ILayer>()
{
keras.layers.InputLayer((227, 227, 3)),
keras.layers.Conv2D(96, (11, 11), (4, 4), activation:"relu", padding:"valid"),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D((3, 3), strides:(2, 2)),
tf.keras.layers.InputLayer((227, 227, 3)),
tf.keras.layers.Conv2D(96, (11, 11), (4, 4), activation:"relu", padding:"valid"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((3, 3), strides:(2, 2)),

keras.layers.Conv2D(256, (5, 5), (1, 1), "same", activation: "relu"),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D((3, 3), (2, 2)),
tf.keras.layers.Conv2D(256, (5, 5), (1, 1), "same", activation: "relu"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((3, 3), (2, 2)),

keras.layers.Conv2D(384, (3, 3), (1, 1), "same", activation: "relu"),
keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(384, (3, 3), (1, 1), "same", activation: "relu"),
tf.keras.layers.BatchNormalization(),

keras.layers.Conv2D(384, (3, 3), (1, 1), "same", activation: "relu"),
keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(384, (3, 3), (1, 1), "same", activation: "relu"),
tf.keras.layers.BatchNormalization(),

keras.layers.Conv2D(256, (3, 3), (1, 1), "same", activation: "relu"),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D((3, 3), (2, 2)),
tf.keras.layers.Conv2D(256, (3, 3), (1, 1), "same", activation: "relu"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((3, 3), (2, 2)),

keras.layers.Flatten(),
keras.layers.Dense(4096, activation: "relu"),
keras.layers.Dropout(0.5f),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4096, activation: "relu"),
tf.keras.layers.Dropout(0.5f),

keras.layers.Dense(4096, activation: "relu"),
keras.layers.Dropout(0.5f),
tf.keras.layers.Dense(4096, activation: "relu"),
tf.keras.layers.Dropout(0.5f),

keras.layers.Dense(1000, activation: "linear"),
keras.layers.Softmax(1)
tf.keras.layers.Dense(1000, activation: "linear"),
tf.keras.layers.Softmax(1)
});

model.compile(new Adam(0.001f), new LossesApi().SparseCategoricalCrossentropy(from_logits: true), new string[] { "accuracy" });
model.compile(new Adam(0.001f), tf.keras.losses.SparseCategoricalCrossentropy(from_logits: true), new string[] { "accuracy" });

var num_epochs = 1;
var batch_size = 8;
Expand Down