diff --git a/CMakeLists.txt b/CMakeLists.txt index 868c899c..59f86fff 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -50,6 +50,7 @@ add_library(neural-fortran src/nf/nf_loss_submodule.f90 src/nf/nf_maxpool2d_layer.f90 src/nf/nf_maxpool2d_layer_submodule.f90 + src/nf/nf_metrics.f90 src/nf/nf_network.f90 src/nf/nf_network_submodule.f90 src/nf/nf_optimizers.f90 diff --git a/README.md b/README.md index f8b2174c..054582b2 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ Read the paper [here](https://arxiv.org/abs/1902.06714). * Stochastic gradient descent optimizers: Classic, momentum, Nesterov momentum, RMSProp, Adagrad, Adam, AdamW * More than a dozen activation functions and their derivatives +* Loss functions and metrics: Quadratic, Mean Squared Error, Pearson Correlation etc. * Loading dense and convolutional models from Keras HDF5 (.h5) files * Data-based parallelism diff --git a/example/cnn_mnist.f90 b/example/cnn_mnist.f90 index e8f6f0a0..fecb7deb 100644 --- a/example/cnn_mnist.f90 +++ b/example/cnn_mnist.f90 @@ -11,9 +11,6 @@ program cnn_mnist real, allocatable :: training_images(:,:), training_labels(:) real, allocatable :: validation_images(:,:), validation_labels(:) real, allocatable :: testing_images(:,:), testing_labels(:) - real, allocatable :: input_reshaped(:,:,:,:) - real :: acc - logical :: ok integer :: n integer, parameter :: num_epochs = 10 diff --git a/example/dense_mnist.f90 b/example/dense_mnist.f90 index 8a00c08d..3b0ec07d 100644 --- a/example/dense_mnist.f90 +++ b/example/dense_mnist.f90 @@ -1,6 +1,6 @@ program dense_mnist - use nf, only: dense, input, network, sgd, label_digits, load_mnist + use nf, only: dense, input, network, sgd, label_digits, load_mnist, corr implicit none @@ -38,9 +38,17 @@ program dense_mnist optimizer=sgd(learning_rate=3.) & ) - if (this_image() == 1) & - print '(a,i2,a,f5.2,a)', 'Epoch ', n, ' done, Accuracy: ', accuracy( & - net, validation_images, label_digits(validation_labels)) * 100, ' %' + block + real, allocatable :: output_metrics(:,:) + real, allocatable :: mean_metrics(:) + ! 2 metrics; 1st is default loss function (quadratic), other is Pearson corr. + output_metrics = net % evaluate(validation_images, label_digits(validation_labels), metric=corr()) + mean_metrics = sum(output_metrics, 1) / size(output_metrics, 1) + if (this_image() == 1) & + print '(a,i2,3(a,f6.3))', 'Epoch ', n, ' done, Accuracy: ', & + accuracy(net, validation_images, label_digits(validation_labels)) * 100, & + '%, Loss: ', mean_metrics(1), ', Pearson correlation: ', mean_metrics(2) + end block end do epochs diff --git a/example/get_set_network_params.f90 b/example/get_set_network_params.f90 index c4a5f980..bf9613b6 100644 --- a/example/get_set_network_params.f90 +++ b/example/get_set_network_params.f90 @@ -9,8 +9,7 @@ program get_set_network_params integer, parameter :: test_size = 30 real :: xtest(test_size), ytest(test_size) real :: ypred1(test_size), ypred2(test_size) - integer :: i, n, nparam - real, allocatable :: parameters(:) + integer :: i, n print '("Getting and setting network parameters")' print '(60("="))' diff --git a/example/quadratic.f90 b/example/quadratic.f90 index 9bd4cdc3..30303ad8 100644 --- a/example/quadratic.f90 +++ b/example/quadratic.f90 @@ -24,7 +24,7 @@ program quadratic_fit real, allocatable :: x(:), y(:) ! training data real, allocatable :: xtest(:), ytest(:) ! testing data - integer :: i, n + integer :: i print '("Fitting quadratic function")' print '(60("="))' @@ -277,7 +277,7 @@ subroutine rmsprop_optimizer( & real, intent(in) :: xtest(:), ytest(:) real, intent(in) :: learning_rate, decay_rate integer, intent(in) :: num_epochs - integer :: i, j, n + integer :: i, n real, allocatable :: ypred(:) print '(a)', 'RMSProp optimizer' @@ -446,4 +446,4 @@ subroutine shuffle(arr) end do end subroutine shuffle -end program quadratic_fit \ No newline at end of file +end program quadratic_fit diff --git a/fpm.toml b/fpm.toml index 4fc21b34..b638d215 100644 --- a/fpm.toml +++ b/fpm.toml @@ -1,5 +1,5 @@ name = "neural-fortran" -version = "0.16.1" +version = "0.17.0" license = "MIT" author = "Milan Curcic" maintainer = "milancurcic@hey.com" diff --git a/src/nf.f90 b/src/nf.f90 index ae72e1e2..b97d9e62 100644 --- a/src/nf.f90 +++ b/src/nf.f90 @@ -5,6 +5,7 @@ module nf use nf_layer_constructors, only: & conv2d, dense, flatten, input, maxpool2d, reshape use nf_loss, only: mse, quadratic + use nf_metrics, only: corr, maxabs use nf_network, only: network use nf_optimizers, only: sgd, rmsprop, adam, adagrad use nf_activation, only: activation_function, elu, exponential, & diff --git a/src/nf/nf_conv2d_layer.f90 b/src/nf/nf_conv2d_layer.f90 index 7b72980c..2f286030 100644 --- a/src/nf/nf_conv2d_layer.f90 +++ b/src/nf/nf_conv2d_layer.f90 @@ -89,19 +89,19 @@ pure module function get_num_params(self) result(num_params) !! Number of parameters end function get_num_params - pure module function get_params(self) result(params) + module function get_params(self) result(params) !! Return the parameters (weights and biases) of this layer. !! The parameters are ordered as weights first, biases second. - class(conv2d_layer), intent(in) :: self + class(conv2d_layer), intent(in), target :: self !! A `conv2d_layer` instance real, allocatable :: params(:) !! Parameters to get end function get_params - pure module function get_gradients(self) result(gradients) + module function get_gradients(self) result(gradients) !! Return the gradients of this layer. !! The gradients are ordered as weights first, biases second. - class(conv2d_layer), intent(in) :: self + class(conv2d_layer), intent(in), target :: self !! A `conv2d_layer` instance real, allocatable :: gradients(:) !! Gradients to get diff --git a/src/nf/nf_conv2d_layer_submodule.f90 b/src/nf/nf_conv2d_layer_submodule.f90 index b4804243..5f71084b 100644 --- a/src/nf/nf_conv2d_layer_submodule.f90 +++ b/src/nf/nf_conv2d_layer_submodule.f90 @@ -189,24 +189,32 @@ pure module function get_num_params(self) result(num_params) end function get_num_params - pure module function get_params(self) result(params) - class(conv2d_layer), intent(in) :: self + module function get_params(self) result(params) + class(conv2d_layer), intent(in), target :: self real, allocatable :: params(:) + real, pointer :: w_(:) => null() + + w_(1:size(self % kernel)) => self % kernel + params = [ & - pack(self % kernel, .true.), & + w_, & self % biases & ] end function get_params - pure module function get_gradients(self) result(gradients) - class(conv2d_layer), intent(in) :: self + module function get_gradients(self) result(gradients) + class(conv2d_layer), intent(in), target :: self real, allocatable :: gradients(:) + real, pointer :: dw_(:) => null() + + dw_(1:size(self % dw)) => self % dw + gradients = [ & - pack(self % dw, .true.), & + dw_, & self % db & ] @@ -219,7 +227,7 @@ module subroutine set_params(self, params) ! Check that the number of parameters is correct. if (size(params) /= self % get_num_params()) then - error stop 'conv2d % set_params: Number of parameters does not match' + error stop 'conv2d % set_params: Number of parameters does not match' end if ! Reshape the kernel. @@ -229,10 +237,9 @@ module subroutine set_params(self, params) ) ! Reshape the biases. - self % biases = reshape( & - params(product(shape(self % kernel)) + 1:), & - [self % filters] & - ) + associate(n => product(shape(self % kernel))) + self % biases = params(n + 1 : n + self % filters) + end associate end subroutine set_params diff --git a/src/nf/nf_dense_layer.f90 b/src/nf/nf_dense_layer.f90 index ae523ccb..c5735799 100644 --- a/src/nf/nf_dense_layer.f90 +++ b/src/nf/nf_dense_layer.f90 @@ -87,19 +87,19 @@ pure module function get_num_params(self) result(num_params) !! Number of parameters in this layer end function get_num_params - pure module function get_params(self) result(params) + module function get_params(self) result(params) !! Return the parameters (weights and biases) of this layer. !! The parameters are ordered as weights first, biases second. - class(dense_layer), intent(in) :: self + class(dense_layer), intent(in), target :: self !! Dense layer instance real, allocatable :: params(:) !! Parameters of this layer end function get_params - pure module function get_gradients(self) result(gradients) + module function get_gradients(self) result(gradients) !! Return the gradients of this layer. !! The gradients are ordered as weights first, biases second. - class(dense_layer), intent(in) :: self + class(dense_layer), intent(in), target :: self !! Dense layer instance real, allocatable :: gradients(:) !! Gradients of this layer @@ -110,7 +110,7 @@ module subroutine set_params(self, params) !! The parameters are ordered as weights first, biases second. class(dense_layer), intent(in out) :: self !! Dense layer instance - real, intent(in) :: params(:) + real, intent(in), target :: params(:) !! Parameters of this layer end subroutine set_params diff --git a/src/nf/nf_dense_layer_submodule.f90 b/src/nf/nf_dense_layer_submodule.f90 index 4be23e33..50d5b10d 100644 --- a/src/nf/nf_dense_layer_submodule.f90 +++ b/src/nf/nf_dense_layer_submodule.f90 @@ -61,24 +61,32 @@ pure module function get_num_params(self) result(num_params) end function get_num_params - pure module function get_params(self) result(params) - class(dense_layer), intent(in) :: self + module function get_params(self) result(params) + class(dense_layer), intent(in), target :: self real, allocatable :: params(:) + real, pointer :: w_(:) => null() + + w_(1:size(self % weights)) => self % weights + params = [ & - pack(self % weights, .true.), & + w_, & self % biases & ] end function get_params - pure module function get_gradients(self) result(gradients) - class(dense_layer), intent(in) :: self + module function get_gradients(self) result(gradients) + class(dense_layer), intent(in), target :: self real, allocatable :: gradients(:) + real, pointer :: dw_(:) => null() + + dw_(1:size(self % dw)) => self % dw + gradients = [ & - pack(self % dw, .true.), & + dw_, & self % db & ] @@ -87,24 +95,23 @@ end function get_gradients module subroutine set_params(self, params) class(dense_layer), intent(in out) :: self - real, intent(in) :: params(:) + real, intent(in), target :: params(:) + + real, pointer :: p_(:,:) => null() ! check if the number of parameters is correct if (size(params) /= self % get_num_params()) then error stop 'Error: number of parameters does not match' end if - ! reshape the weights - self % weights = reshape( & - params(:self % input_size * self % output_size), & - [self % input_size, self % output_size] & - ) - - ! reshape the biases - self % biases = reshape( & - params(self % input_size * self % output_size + 1:), & - [self % output_size] & - ) + associate(n => self % input_size * self % output_size) + ! reshape the weights + p_(1:self % input_size, 1:self % output_size) => params(1 : n) + self % weights = p_ + + ! reshape the biases + self % biases = params(n + 1 : n + self % output_size) + end associate end subroutine set_params diff --git a/src/nf/nf_layer.f90 b/src/nf/nf_layer.f90 index e9e90da8..ca5e9606 100644 --- a/src/nf/nf_layer.f90 +++ b/src/nf/nf_layer.f90 @@ -129,7 +129,7 @@ elemental module function get_num_params(self) result(num_params) !! Number of parameters in this layer end function get_num_params - pure module function get_params(self) result(params) + module function get_params(self) result(params) !! Returns the parameters of this layer. class(layer), intent(in) :: self !! Layer instance @@ -137,7 +137,7 @@ pure module function get_params(self) result(params) !! Parameters of this layer end function get_params - pure module function get_gradients(self) result(gradients) + module function get_gradients(self) result(gradients) !! Returns the gradients of this layer. class(layer), intent(in) :: self !! Layer instance diff --git a/src/nf/nf_layer_submodule.f90 b/src/nf/nf_layer_submodule.f90 index 07467643..c672581a 100644 --- a/src/nf/nf_layer_submodule.f90 +++ b/src/nf/nf_layer_submodule.f90 @@ -298,7 +298,7 @@ elemental module function get_num_params(self) result(num_params) end function get_num_params - pure module function get_params(self) result(params) + module function get_params(self) result(params) class(layer), intent(in) :: self real, allocatable :: params(:) @@ -323,7 +323,7 @@ pure module function get_params(self) result(params) end function get_params - pure module function get_gradients(self) result(gradients) + module function get_gradients(self) result(gradients) class(layer), intent(in) :: self real, allocatable :: gradients(:) diff --git a/src/nf/nf_loss.f90 b/src/nf/nf_loss.f90 index ceeb4390..75e47f92 100644 --- a/src/nf/nf_loss.f90 +++ b/src/nf/nf_loss.f90 @@ -7,6 +7,7 @@ module nf_loss !! loss type that extends the abstract loss derived type, and that !! implements concrete eval and derivative methods that accept vectors. + use nf_metrics, only: metric_type implicit none private @@ -14,18 +15,12 @@ module nf_loss public :: mse public :: quadratic - type, abstract :: loss_type + type, extends(metric_type), abstract :: loss_type contains - procedure(loss_interface), nopass, deferred :: eval procedure(loss_derivative_interface), nopass, deferred :: derivative end type loss_type abstract interface - pure function loss_interface(true, predicted) result(res) - real, intent(in) :: true(:) - real, intent(in) :: predicted(:) - real :: res - end function loss_interface pure function loss_derivative_interface(true, predicted) result(res) real, intent(in) :: true(:) real, intent(in) :: predicted(:) diff --git a/src/nf/nf_metrics.f90 b/src/nf/nf_metrics.f90 new file mode 100644 index 00000000..43bc12d7 --- /dev/null +++ b/src/nf/nf_metrics.f90 @@ -0,0 +1,72 @@ +module nf_metrics + + !! This module provides a collection of metric functions. + + implicit none + + private + public :: metric_type + public :: corr + public :: maxabs + + type, abstract :: metric_type + contains + procedure(metric_interface), nopass, deferred :: eval + end type metric_type + + abstract interface + pure function metric_interface(true, predicted) result(res) + real, intent(in) :: true(:) + real, intent(in) :: predicted(:) + real :: res + end function metric_interface + end interface + + type, extends(metric_type) :: corr + !! Pearson correlation + contains + procedure, nopass :: eval => corr_eval + end type corr + + type, extends(metric_type) :: maxabs + !! Maximum absolute difference + contains + procedure, nopass :: eval => maxabs_eval + end type maxabs + + contains + + pure module function corr_eval(true, predicted) result(res) + !! Pearson correlation function: + !! + real, intent(in) :: true(:) + !! True values, i.e. labels from training datasets + real, intent(in) :: predicted(:) + !! Values predicted by the network + real :: res + !! Resulting correlation value + real :: m_true, m_pred + + m_true = sum(true) / size(true) + m_pred = sum(predicted) / size(predicted) + + res = dot_product(true - m_true, predicted - m_pred) / & + sqrt(sum((true - m_true)**2)*sum((predicted - m_pred)**2)) + + end function corr_eval + + pure function maxabs_eval(true, predicted) result(res) + !! Maximum absolute difference function: + !! + real, intent(in) :: true(:) + !! True values, i.e. labels from training datasets + real, intent(in) :: predicted(:) + !! Values predicted by the network + real :: res + !! Resulting maximum absolute difference value + + res = maxval(abs(true - predicted)) + + end function maxabs_eval + +end module nf_metrics diff --git a/src/nf/nf_network.f90 b/src/nf/nf_network.f90 index 4346f4e3..bcf10ae8 100644 --- a/src/nf/nf_network.f90 +++ b/src/nf/nf_network.f90 @@ -3,6 +3,7 @@ module nf_network !! This module provides the network type to create new models. use nf_layer, only: layer + use nf_metrics, only: metric_type use nf_loss, only: loss_type use nf_optimizers, only: optimizer_base_type @@ -28,6 +29,7 @@ module nf_network procedure :: train procedure :: update + procedure, private :: evaluate_batch_1d procedure, private :: forward_1d procedure, private :: forward_3d procedure, private :: predict_1d @@ -35,6 +37,7 @@ module nf_network procedure, private :: predict_batch_1d procedure, private :: predict_batch_3d + generic :: evaluate => evaluate_batch_1d generic :: forward => forward_1d, forward_3d generic :: predict => predict_1d, predict_3d, predict_batch_1d, predict_batch_3d @@ -62,6 +65,16 @@ end function network_from_keras end interface network + interface evaluate + module function evaluate_batch_1d(self, input_data, output_data, metric) result(res) + class(network), intent(in out) :: self + real, intent(in) :: input_data(:,:) + real, intent(in) :: output_data(:,:) + class(metric_type), intent(in), optional :: metric + real, allocatable :: res(:,:) + end function evaluate_batch_1d + end interface evaluate + interface forward pure module subroutine forward_1d(self, input) @@ -159,7 +172,7 @@ pure module integer function get_num_params(self) !! Network instance end function get_num_params - pure module function get_params(self) result(params) + module function get_params(self) result(params) !! Get the network parameters (weights and biases). class(network), intent(in) :: self !! Network instance @@ -167,7 +180,7 @@ pure module function get_params(self) result(params) !! Network parameters to get end function get_params - pure module function get_gradients(self) result(gradients) + module function get_gradients(self) result(gradients) class(network), intent(in) :: self !! Network instance real, allocatable :: gradients(:) diff --git a/src/nf/nf_network_submodule.f90 b/src/nf/nf_network_submodule.f90 index 59abcf7d..60fa8579 100644 --- a/src/nf/nf_network_submodule.f90 +++ b/src/nf/nf_network_submodule.f90 @@ -337,6 +337,36 @@ pure module subroutine backward(self, output, loss) end subroutine backward + module function evaluate_batch_1d(self, input_data, output_data, metric) result(res) + class(network), intent(in out) :: self + real, intent(in) :: input_data(:,:) + real, intent(in) :: output_data(:,:) + class(metric_type), intent(in), optional :: metric + real, allocatable :: res(:,:) + + integer :: i, n + real, allocatable :: output(:,:) + + output = self % predict(input_data) + + n = 1 + if (present(metric)) n = n + 1 + + allocate(res(size(output, dim=1), n)) + + do concurrent (i = 1:size(output, dim=1)) + res(i,1) = self % loss % eval(output_data(i,:), output(i,:)) + end do + + if (.not. present(metric)) return + + do concurrent (i = 1:size(output, dim=1)) + res(i,2) = metric % eval(output_data(i,:), output(i,:)) + end do + + end function evaluate_batch_1d + + pure module subroutine forward_1d(self, input) class(network), intent(in out) :: self real, intent(in) :: input(:) @@ -496,7 +526,7 @@ pure module function get_num_params(self) end function get_num_params - pure module function get_params(self) result(params) + module function get_params(self) result(params) class(network), intent(in) :: self real, allocatable :: params(:) integer :: n, nstart, nend @@ -516,7 +546,7 @@ pure module function get_params(self) result(params) end function get_params - pure module function get_gradients(self) result(gradients) + module function get_gradients(self) result(gradients) class(network), intent(in) :: self real, allocatable :: gradients(:) integer :: n, nstart, nend @@ -566,11 +596,10 @@ module subroutine train(self, input_data, output_data, batch_size, & integer, intent(in) :: epochs class(optimizer_base_type), intent(in), optional :: optimizer class(loss_type), intent(in), optional :: loss - class(optimizer_base_type), allocatable :: optimizer_ real :: pos integer :: dataset_size - integer :: batch_start, batch_end + integer :: batch_start integer :: i, j, n integer :: istart, iend, indices(2) @@ -600,11 +629,9 @@ module subroutine train(self, input_data, output_data, batch_size, & ! Pull a random mini-batch from the dataset call random_number(pos) batch_start = int(pos * (dataset_size - batch_size + 1)) + 1 - batch_end = batch_start + batch_size - 1 ! FIXME shuffle in a way that doesn't require co_broadcast call co_broadcast(batch_start, 1) - call co_broadcast(batch_end, 1) ! Distribute the batch in nearly equal pieces to all images indices = tile_indices(batch_size) @@ -628,7 +655,6 @@ module subroutine update(self, optimizer, batch_size) class(network), intent(in out) :: self class(optimizer_base_type), intent(in), optional :: optimizer integer, intent(in), optional :: batch_size - class(optimizer_base_type), allocatable :: optimizer_ integer :: batch_size_ real, allocatable :: params(:) integer :: n diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 6903deb7..1be8bb8d 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -17,6 +17,7 @@ foreach(execid conv2d_network optimizers loss + metrics ) add_executable(test_${execid} test_${execid}.f90) target_link_libraries(test_${execid} PRIVATE neural-fortran h5fortran::h5fortran jsonfortran::jsonfortran ${LIBS}) diff --git a/test/test_flatten_layer.f90 b/test/test_flatten_layer.f90 index cc780acd..70641c9c 100644 --- a/test/test_flatten_layer.f90 +++ b/test/test_flatten_layer.f90 @@ -9,7 +9,7 @@ program test_flatten_layer type(layer) :: test_layer, input_layer type(network) :: net - real, allocatable :: input_data(:,:,:), gradient(:,:,:) + real, allocatable :: gradient(:,:,:) real, allocatable :: output(:) logical :: ok = .true. diff --git a/test/test_get_set_network_params.f90 b/test/test_get_set_network_params.f90 index b9cc24b1..3e285f3c 100644 --- a/test/test_get_set_network_params.f90 +++ b/test/test_get_set_network_params.f90 @@ -3,7 +3,6 @@ program test_get_set_network_params use nf, only: conv2d, dense, flatten, input, maxpool2d, network implicit none type(network) :: net - integer :: n logical :: ok = .true. real :: test_params_dense(8) = [1, 2, 3, 4, 5, 6, 7, 8] real :: test_params_conv2d(10) = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] diff --git a/test/test_metrics.f90 b/test/test_metrics.f90 new file mode 100644 index 00000000..a9db74ed --- /dev/null +++ b/test/test_metrics.f90 @@ -0,0 +1,70 @@ +program test_metrics + use iso_fortran_env, only: stderr => error_unit + use nf, only: dense, input, network, sgd, mse + implicit none + type(network) :: net + logical :: ok = .true. + + ! Minimal 2-layer network + net = network([ & + input(1), & + dense(1) & + ]) + + training: block + real :: x(1), y(1) + real :: tolerance = 1e-3 + integer :: n + integer, parameter :: num_iterations = 1000 + real :: quadratic_loss, mse_metric + real, allocatable :: metrics(:,:) + + x = [0.1234567] + y = [0.7654321] + + do n = 1, num_iterations + call net % forward(x) + call net % backward(y) + call net % update(sgd(learning_rate=1.)) + if (all(abs(net % predict(x) - y) < tolerance)) exit + end do + + ! Returns only one metric, based on the default loss function (quadratic). + metrics = net % evaluate(reshape(x, [1, 1]), reshape(y, [1, 1])) + quadratic_loss = metrics(1,1) + + if (.not. all(shape(metrics) == [1, 1])) then + write(stderr, '(a)') 'metrics array is the correct shape (1, 1).. failed' + ok = .false. + end if + + ! Returns two metrics, one from the loss function and another specified by the user. + metrics = net % evaluate(reshape(x, [1, 1]), reshape(y, [1, 1]), metric=mse()) + + if (.not. all(shape(metrics) == [1, 2])) then + write(stderr, '(a)') 'metrics array is the correct shape (1, 2).. failed' + ok = .false. + end if + + mse_metric = metrics(1,2) + + if (.not. all(metrics < 1e-5)) then + write(stderr, '(a)') 'value for all metrics is expected.. failed' + ok = .false. + end if + + if (.not. metrics(1,1) == quadratic_loss) then + write(stderr, '(a)') 'first metric should be the same as that of the loss function.. failed' + ok = .false. + end if + + end block training + + if (ok) then + print '(a)', 'test_metrics: All tests passed.' + else + write(stderr, '(a)') 'test_metrics: One or more tests failed.' + stop 1 + end if + +end program test_metrics diff --git a/test/test_optimizers.f90 b/test/test_optimizers.f90 index dc2cc03a..4ed1d927 100644 --- a/test/test_optimizers.f90 +++ b/test/test_optimizers.f90 @@ -8,7 +8,7 @@ program test_optimizers real, allocatable :: x(:), y(:) real, allocatable :: ypred(:) integer, parameter :: num_iterations = 1000 - integer :: n, i + integer :: n logical :: ok = .true. logical :: converged = .false.