LCOV - code coverage report
Current view: top level - physics/pumas - module_neural_net.F90 (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 221 0.0 %
Date: 2024-12-17 22:39:59 Functions: 0 18 0.0 %

          Line data    Source code
       1             : module module_neural_net
       2             :     use netcdf
       3             :     use shr_kind_mod,   only: r8=>shr_kind_r8
       4             : 
       5             :     implicit none
       6             :     type Dense
       7             :         integer :: input_size
       8             :         integer :: output_size
       9             :         integer :: batch_size
      10             :         integer :: activation
      11             :         real(kind=r8), allocatable :: weights(:, :)
      12             :         real(kind=r8), allocatable :: bias(:)
      13             :     end type Dense
      14             : 
      15             :     type DenseData
      16             :         real(kind=r8), allocatable :: input(:, :)
      17             :         real(kind=r8), allocatable :: output(:, :)
      18             :     end type DenseData
      19             : 
      20             : contains
      21             : 
      22           0 :     subroutine apply_dense(input, layer, output)
      23             :         ! Description: Pass a set of input data through a single dense layer and nonlinear activation function
      24             :         !
      25             :         ! Inputs:
      26             :         ! layer (input): a single Dense object
      27             :         ! input (input): a 2D array where the rows are different examples and
      28             :         !   the columns are different model inputs
      29             :         !
      30             :         ! Output:
      31             :         ! output: output of the dense layer as a 2D array with shape (number of inputs, number of neurons)
      32             :         real(kind=r8), dimension(:, :), intent(in) :: input
      33             :         type(Dense), intent(in) :: layer
      34             :         real(kind=r8), dimension(size(input, 1), layer%output_size), intent(out) :: output
      35           0 :         real(kind=r8), dimension(size(input, 1), layer%output_size) :: dense_output
      36             :         integer :: i, j, num_examples
      37             :         real(kind=r8) :: alpha, beta
      38             :         external :: dgemm
      39           0 :         alpha = 1
      40           0 :         beta = 1
      41           0 :         dense_output = 0
      42           0 :         output = 0
      43           0 :         num_examples = size(input, 1)
      44             :         call dgemm('n', 'n', num_examples, layer%output_size, layer%input_size, &
      45           0 :             alpha, input, num_examples, layer%weights, layer%input_size, beta, dense_output, num_examples)
      46           0 :         do i=1, num_examples
      47           0 :             do j=1, layer%output_size
      48           0 :                 dense_output(i, j) = dense_output(i, j) + layer%bias(j)
      49             :             end do
      50             :         end do
      51           0 :         call apply_activation(dense_output, layer%activation, output)
      52           0 :     end subroutine apply_dense
      53             : 
      54           0 :     subroutine apply_activation(input, activation_type, output)
      55             :         ! Description: Apply a nonlinear activation function to a given array of input values.
      56             :         !
      57             :         ! Inputs:
      58             :         ! input: A 2D array
      59             :         ! activation_type: string describing which activation is being applied. If the activation
      60             :         !       type does not match any of the available options, the linear activation is applied.
      61             :         !       Currently supported activations are:
      62             :         !           relu
      63             :         !           elu
      64             :         !           selu
      65             :         !           sigmoid
      66             :         !           tanh
      67             :         !           softmax
      68             :         !           linear
      69             :         ! Output:
      70             :         ! output: Array of the same dimensions as input with the nonlinear activation applied.
      71             :         real(kind=r8), dimension(:, :), intent(in) :: input
      72             :         integer, intent(in) :: activation_type
      73             :         real(kind=r8), dimension(size(input, 1), size(input, 2)), intent(out) :: output
      74             : 
      75           0 :         real(kind=r8), dimension(size(input, 1)) :: softmax_sum
      76             :         real(kind=r8), parameter :: selu_alpha = 1.6732
      77             :         real(kind=r8), parameter :: selu_lambda = 1.0507
      78             :         real(kind=r8), parameter :: zero = 0.0
      79             :         integer :: i, j
      80           0 :         select case (activation_type)
      81             :             case (0)
      82           0 :                 output = input
      83             :             case (1)
      84           0 :                 do i=1,size(input, 1)
      85           0 :                     do j=1, size(input,2)
      86           0 :                         output(i, j) = dmax1(input(i, j), zero)
      87             :                     end do
      88             :                 end do
      89             :             case (2)
      90           0 :                 output = 1.0 / (1.0 + dexp(-input))
      91             :             case (3)
      92           0 :                 do i=1,size(input, 1)
      93           0 :                     do j=1, size(input,2)
      94           0 :                         if (input(i, j) >= 0) then
      95           0 :                             output(i, j) = input(i, j)
      96             :                         else
      97           0 :                             output(i, j) = dexp(input(i, j))-1.0_r8
      98             :                         end if
      99             :                     end do
     100             :                 end do
     101             :             case (4)
     102           0 :                 do i=1,size(input, 1)
     103           0 :                     do j=1, size(input,2)
     104           0 :                         if (input(i, j) >= 0) then
     105           0 :                             output(i, j) = input(i, j)
     106             :                         else
     107           0 :                             output(i, j) = selu_lambda * ( selu_alpha * dexp(input(i, j)) - selu_alpha)
     108             :                         end if
     109             :                     end do
     110             :                 end do
     111             :             case (5)
     112           0 :                 output = tanh(input)
     113             :             case (6)
     114           0 :                 softmax_sum = sum(dexp(input), dim=2)
     115           0 :                 do i=1, size(input, 1)
     116           0 :                     do j=1, size(input, 2)
     117           0 :                         output(i, j) = dexp(input(i, j)) / softmax_sum(i)
     118             :                     end do
     119             :                 end do
     120             :             case default
     121           0 :                 output = input
     122             :         end select
     123           0 :     end subroutine apply_activation
     124             : 
     125           0 :     subroutine init_neural_net(filename, batch_size, neural_net_model, iulog, errstring)
     126             :         ! init_neuralnet
     127             :         ! Description: Loads dense neural network weights from a netCDF file and builds an array of
     128             :         ! Dense types from the weights and activations.
     129             :         !
     130             :         ! Input:
     131             :         ! filename: Full path to the netCDF file
     132             :         ! batch_size: number of items in single batch. Used to set intermediate array sizes.
     133             :         !
     134             :         ! Output:
     135             :         ! neural_net_model (output): array of Dense layers composing a densely connected neural network
     136             :         !
     137             :         character(len=*), intent(in) :: filename
     138             :         integer, intent(in) :: batch_size
     139             :         type(Dense), allocatable, intent(out) :: neural_net_model(:)
     140             :         integer,          intent(in)  :: iulog
     141             :         character(128),   intent(out) :: errstring  ! output status (non-blank for error return)
     142             : 
     143             :         integer :: ncid, num_layers_id, num_layers
     144             :         integer :: layer_names_var_id, i, layer_in_dimid, layer_out_dimid
     145             :         integer :: layer_in_dim, layer_out_dim
     146             :         integer :: layer_weight_var_id
     147             :         integer :: layer_bias_var_id
     148             : 
     149           0 :         character (len=8), allocatable :: layer_names(:)
     150             :         character (len=10) :: num_layers_dim_name = "num_layers"
     151             :         character (len=11) :: layer_name_var = "layer_names"
     152             :         character (len=11) :: layer_in_dim_name
     153             :         character (len=12) :: layer_out_dim_name
     154             :         character (len=10) :: activation_name
     155           0 :         real (kind=r8), allocatable :: temp_weights(:, :)
     156             : 
     157           0 :         errstring = ''
     158             : 
     159             :         ! Open netCDF file
     160           0 :         call check(nf90_open(filename, nf90_nowrite, ncid),errstring)
     161           0 :         if (trim(errstring) /= '') return
     162             :         ! Get the number of layers in the neural network
     163           0 :         call check(nf90_inq_dimid(ncid, num_layers_dim_name, num_layers_id),errstring)
     164           0 :         if (trim(errstring) /= '') return
     165             :         call check(nf90_inquire_dimension(ncid, num_layers_id, &
     166           0 :                                           num_layers_dim_name, num_layers),errstring)
     167           0 :         if (trim(errstring) /= '') return
     168           0 :         call check(nf90_inq_varid(ncid, layer_name_var, layer_names_var_id),errstring)
     169           0 :         if (trim(errstring) /= '') return
     170           0 :         allocate(layer_names(num_layers))
     171           0 :         call check(nf90_get_var(ncid, layer_names_var_id, layer_names),errstring)
     172           0 :         if (trim(errstring) /= '') return
     173           0 :         write(iulog,*) "load neural network " // filename
     174           0 :         allocate(neural_net_model(1:num_layers))
     175             :         ! Loop through each layer and load the weights, bias term, and activation function
     176           0 :         do i=1, num_layers
     177           0 :             layer_in_dim_name = trim(layer_names(i)) // "_in"
     178           0 :             layer_out_dim_name = trim(layer_names(i)) // "_out"
     179           0 :             layer_in_dimid = -1
     180             :             ! Get layer input and output dimensions
     181           0 :             call check(nf90_inq_dimid(ncid, trim(layer_in_dim_name), layer_in_dimid),errstring)
     182           0 :             if (trim(errstring) /= '') return
     183           0 :             call check(nf90_inquire_dimension(ncid, layer_in_dimid, layer_in_dim_name, layer_in_dim),errstring)
     184           0 :             if (trim(errstring) /= '') return
     185           0 :             call check(nf90_inq_dimid(ncid, trim(layer_out_dim_name), layer_out_dimid),errstring)
     186           0 :             if (trim(errstring) /= '') return
     187           0 :             call check(nf90_inquire_dimension(ncid, layer_out_dimid, layer_out_dim_name, layer_out_dim),errstring)
     188           0 :             if (trim(errstring) /= '') return
     189           0 :             call check(nf90_inq_varid(ncid, trim(layer_names(i)) // "_weights", &
     190           0 :                                       layer_weight_var_id),errstring)
     191           0 :             if (trim(errstring) /= '') return
     192           0 :             call check(nf90_inq_varid(ncid, trim(layer_names(i)) // "_bias", &
     193           0 :                                       layer_bias_var_id),errstring)
     194           0 :             if (trim(errstring) /= '') return
     195           0 :             neural_net_model(i)%input_size = layer_in_dim
     196           0 :             neural_net_model(i)%output_size = layer_out_dim
     197           0 :             neural_net_model(i)%batch_size = batch_size
     198             :             ! Fortran loads 2D arrays in the opposite order from Python/C, so I
     199             :             ! first load the data into a temporary array and then apply the
     200             :             ! transpose operation to copy the weights into the Dense layer
     201           0 :             allocate(neural_net_model(i)%weights(layer_in_dim, layer_out_dim))
     202           0 :             allocate(temp_weights(layer_out_dim, layer_in_dim))
     203             : 
     204             :             call check(nf90_get_var(ncid, layer_weight_var_id, &
     205           0 :                                     temp_weights),errstring)
     206           0 :             if (trim(errstring) /= '') return
     207           0 :             neural_net_model(i)%weights = transpose(temp_weights)
     208           0 :             deallocate(temp_weights)
     209             :             ! Load the bias weights
     210           0 :             allocate(neural_net_model(i)%bias(layer_out_dim))
     211             :             call check(nf90_get_var(ncid, layer_bias_var_id, &
     212           0 :                                     neural_net_model(i)%bias),errstring)
     213           0 :             if (trim(errstring) /= '') return
     214             :             ! Get the name of the activation function, which is stored as an attribute of the weights variable
     215             :             call check(nf90_get_att(ncid, layer_weight_var_id, "activation", &
     216           0 :                                     activation_name),errstring)
     217           0 :             if (trim(errstring) /= '') return
     218           0 :             select case (trim(activation_name))
     219             :                 case ("linear")
     220           0 :                     neural_net_model(i)%activation = 0
     221             :                 case ("relu")
     222           0 :                     neural_net_model(i)%activation = 1
     223             :                 case ("sigmoid")
     224           0 :                     neural_net_model(i)%activation = 2
     225             :                 case ("elu")
     226           0 :                     neural_net_model(i)%activation = 3
     227             :                 case ("selu")
     228           0 :                     neural_net_model(i)%activation = 4
     229             :                 case ("tanh")
     230           0 :                     neural_net_model(i)%activation = 5
     231             :                 case ("softmax")
     232           0 :                     neural_net_model(i)%activation = 6
     233             :                 case default
     234           0 :                     neural_net_model(i)%activation = 7
     235             :             end select
     236             :         end do
     237           0 :         call check(nf90_close(ncid),errstring)
     238           0 :         if (trim(errstring) /= '') return
     239             : 
     240           0 :     end subroutine init_neural_net
     241             : 
     242           0 :     subroutine load_quantile_scale_values(filename, scale_values, iulog, errstring)
     243             :         character(len = *), intent(in) :: filename
     244             :         real(kind = r8), allocatable, intent(out) :: scale_values(:, :)
     245             :         integer,          intent(in)  :: iulog
     246             :         character(128),   intent(out) :: errstring  ! output status (non-blank for error return)
     247             : 
     248           0 :         real(kind = r8), allocatable :: temp_scale_values(:, :)
     249             :         character(len=8) :: quantile_dim_name = "quantile"
     250             :         character(len=7) :: column_dim_name = "column"
     251             :         character(len=9) :: ref_var_name = "reference"
     252             :         character(len=9) :: quant_var_name = "quantiles"
     253             :         integer :: ncid, quantile_id, column_id, quantile_dim, column_dim, ref_var_id, quant_var_id
     254             :         
     255           0 :         errstring = ''
     256             : 
     257           0 :         call check(nf90_open(filename, nf90_nowrite, ncid),errstring)
     258           0 :         if (trim(errstring) /= '') return
     259           0 :         call check(nf90_inq_dimid(ncid, quantile_dim_name, quantile_id),errstring)
     260           0 :         if (trim(errstring) /= '') return
     261           0 :         call check(nf90_inq_dimid(ncid, column_dim_name, column_id),errstring)
     262           0 :         if (trim(errstring) /= '') return
     263             :         call check(nf90_inquire_dimension(ncid, quantile_id, &
     264           0 :                 quantile_dim_name, quantile_dim),errstring)
     265           0 :         if (trim(errstring) /= '') return
     266             :         call check(nf90_inquire_dimension(ncid, column_id, &
     267           0 :                 column_dim_name, column_dim),errstring)
     268           0 :         if (trim(errstring) /= '') return
     269           0 :         allocate(scale_values(quantile_dim, column_dim + 1))
     270           0 :         allocate(temp_scale_values(column_dim + 1, quantile_dim))
     271           0 :         call check(nf90_inq_varid(ncid, ref_var_name, ref_var_id),errstring)
     272           0 :         if (trim(errstring) /= '') return
     273           0 :         write(iulog,*) "load ref var"
     274           0 :         call check(nf90_get_var(ncid, ref_var_id, temp_scale_values(1, :)),errstring)
     275           0 :         if (trim(errstring) /= '') return
     276           0 :         call check(nf90_inq_varid(ncid, quant_var_name, quant_var_id),errstring)
     277           0 :         if (trim(errstring) /= '') return
     278           0 :         write(iulog,*) "load quant var"
     279           0 :         call check(nf90_get_var(ncid, quant_var_id, temp_scale_values(2:column_dim + 1, :)),errstring)
     280           0 :         if (trim(errstring) /= '') return
     281           0 :         scale_values = transpose(temp_scale_values)
     282           0 :         call check(nf90_close(ncid),errstring)
     283           0 :         if (trim(errstring) /= '') return
     284           0 :     end subroutine load_quantile_scale_values
     285             : 
     286           0 :     subroutine linear_interp(x_in, xs, ys, y_in)
     287             :         real(kind = r8), dimension(:), intent(in) :: x_in
     288             :         real(kind = r8), dimension(:), intent(in) :: xs
     289             :         real(kind = r8), dimension(:), intent(in) :: ys
     290             :         real(kind = r8), dimension(size(x_in, 1)), intent(out) :: y_in
     291             :         integer :: i, j, x_in_size, xs_size, x_pos
     292           0 :         x_in_size = size(x_in, 1)
     293           0 :         xs_size = size(xs, 1)
     294           0 :         do i = 1, x_in_size
     295           0 :             if (x_in(i) <= xs(1)) then
     296           0 :                 y_in(i) = ys(1)
     297           0 :             else if (x_in(i) >= xs(xs_size)) then
     298           0 :                 y_in(i) = ys(xs_size)
     299             :             else
     300             :                 j = 1
     301           0 :                 do while (xs(j) < x_in(i))
     302           0 :                     j = j + 1
     303             :                 end do
     304           0 :                 y_in(i) = (ys(j - 1) * (xs(j) - x_in(i)) + ys(j) * (x_in(i) - xs(j - 1))) / (xs(j) - xs(j - 1))
     305             :             end if
     306             :         end do
     307           0 :     end subroutine linear_interp
     308             : 
     309           0 :     subroutine quantile_transform(x_inputs, scale_values, x_transformed)
     310             :         real(kind = r8), dimension(:, :), intent(in) :: x_inputs
     311             :         real(kind = r8), dimension(:, :), intent(in) :: scale_values
     312             :         real(kind = r8), dimension(size(x_inputs, 1), size(x_inputs, 2)), intent(out) :: x_transformed
     313             :         integer :: j, x_size, scale_size
     314           0 :         x_size = size(x_inputs, 1)
     315           0 :         scale_size = size(scale_values, 1)
     316           0 :         do j = 1, size(x_inputs, 2)
     317             :             call linear_interp(x_inputs(:, j), scale_values(:, j + 1), &
     318           0 :                     scale_values(:, 1), x_transformed(:, j))
     319             :         end do
     320           0 :     end subroutine quantile_transform
     321             : 
     322           0 :     subroutine quantile_inv_transform(x_inputs, scale_values, x_transformed)
     323             :         real(kind = r8), dimension(:, :), intent(in) :: x_inputs
     324             :         real(kind = r8), dimension(:, :), intent(in) :: scale_values
     325             :         real(kind = r8), dimension(size(x_inputs, 1), size(x_inputs, 2)), intent(out) :: x_transformed
     326             :         integer :: j, x_size, scale_size
     327           0 :         x_size = size(x_inputs, 1)
     328           0 :         scale_size = size(scale_values, 1)
     329           0 :         do j = 1, size(x_inputs, 2)
     330           0 :             call linear_interp(x_inputs(:, j), scale_values(:, 1), scale_values(:, j + 1), x_transformed(:, j))
     331             :         end do
     332           0 :     end subroutine quantile_inv_transform
     333             : 
     334           0 :     subroutine neural_net_predict(input, neural_net_model, prediction)
     335             :         ! neural_net_predict
     336             :         ! Description: generate prediction from neural network model for an arbitrary set of input values
     337             :         !
     338             :         ! Args:
     339             :         ! input (input): 2D array of input values. Each row is a separate instance and each column is a model input.
     340             :         ! neural_net_model (input): Array of type(Dense) objects
     341             :         ! prediction (output): The prediction of the neural network as a 2D array of dimension (examples, outputs)
     342             :         real(kind=r8), intent(in) :: input(:, :)
     343             :         type(Dense), intent(in) :: neural_net_model(:)
     344           0 :         real(kind=r8), intent(out) :: prediction(size(input, 1), neural_net_model(size(neural_net_model))%output_size)
     345             :         integer :: bi, i, j, num_layers
     346             :         integer :: batch_size
     347             :         integer :: input_size
     348             :         integer :: batch_index_size
     349           0 :         integer, allocatable :: batch_indices(:)
     350           0 :         type(DenseData) :: neural_net_data(size(neural_net_model))
     351           0 :         input_size = size(input, 1)
     352           0 :         num_layers = size(neural_net_model)
     353           0 :         batch_size = neural_net_model(1)%batch_size
     354           0 :         batch_index_size = input_size / batch_size
     355           0 :         allocate(batch_indices(batch_index_size))
     356           0 :         i = 1
     357           0 :         do bi=batch_size, input_size, batch_size
     358           0 :             batch_indices(i) = bi
     359           0 :             i = i + 1
     360             :         end do
     361           0 :         do j=1, num_layers
     362           0 :             allocate(neural_net_data(j)%input(batch_size, neural_net_model(j)%input_size))
     363           0 :             allocate(neural_net_data(j)%output(batch_size, neural_net_model(j)%output_size))
     364             :         end do
     365           0 :         batch_indices(batch_index_size) = input_size
     366           0 :         do bi=1, batch_index_size
     367           0 :             neural_net_data(1)%input = input(batch_indices(bi)-batch_size+1:batch_indices(bi), :)
     368           0 :             do i=1, num_layers - 1
     369           0 :                 call apply_dense(neural_net_data(i)%input, neural_net_model(i), neural_net_data(i)%output)
     370           0 :                 neural_net_data(i + 1)%input = neural_net_data(i)%output
     371             :             end do
     372             :             call apply_dense(neural_net_data(num_layers)%input, neural_net_model(num_layers), &
     373           0 :                              neural_net_data(num_layers)%output)
     374           0 :             prediction(batch_indices(bi)-batch_size + 1:batch_indices(bi), :) = &
     375           0 :                     neural_net_data(num_layers)%output
     376             :         end do
     377           0 :         do j=1, num_layers
     378           0 :             deallocate(neural_net_data(j)%input)
     379           0 :             deallocate(neural_net_data(j)%output)
     380             :         end do
     381           0 :         deallocate(batch_indices)
     382           0 :     end subroutine neural_net_predict
     383             : 
     384           0 :     subroutine standard_scaler_transform(input_data, scale_values, transformed_data, errstring)
     385             :         ! Perform z-score normalization of input_data table. Equivalent to scikit-learn StandardScaler.
     386             :         !
     387             :         ! Inputs:
     388             :         !   input_data: 2D array where rows are examples and columns are variables
     389             :         !   scale_values: 2D array where rows are the input variables and columns are mean and standard deviation
     390             :         ! Output:
     391             :         !   transformed_data: 2D array with the same shape as input_data containing the transformed values.
     392             :         real(r8), intent(in) :: input_data(:, :)
     393             :         real(r8), intent(in) :: scale_values(:, :)
     394             :         real(r8), intent(out) :: transformed_data(size(input_data, 1), size(input_data, 2))
     395             :         character(128),   intent(out) :: errstring  ! output status (non-blank for error return)
     396             :         integer :: i
     397             :  
     398           0 :         errstring = ''
     399           0 :         if (size(input_data, 2) /= size(scale_values, 1)) then
     400           0 :             write(errstring,*) "Size mismatch between input data and scale values", size(input_data, 2), size(scale_values, 1)
     401           0 :             return
     402             :         end if
     403           0 :         do i=1, size(input_data, 2)
     404           0 :             transformed_data(:, i) = (input_data(:, i) - scale_values(i, 1)) / scale_values(i, 2)
     405             :         end do
     406             :     end subroutine standard_scaler_transform
     407             : 
     408           0 :     subroutine load_scale_values(filename, num_inputs, scale_values)
     409             :         character(len=*), intent(in) :: filename
     410             :         integer, intent(in) :: num_inputs
     411             :         real(r8), intent(out) :: scale_values(num_inputs, 2)
     412             :         character(len=40) :: row_name
     413             :         integer :: isu, i
     414           0 :         isu = 2
     415           0 :         open(isu, file=filename, access="sequential", form="formatted")
     416           0 :         read(isu, "(A)")
     417           0 :         do i=1, num_inputs
     418           0 :             read(isu, *) row_name, scale_values(i, 1), scale_values(i, 2)
     419             :         end do
     420           0 :         close(isu)
     421           0 :     end subroutine load_scale_values
     422             : 
     423             : 
     424           0 :     subroutine standard_scaler_inverse_transform(input_data, scale_values, transformed_data, errstring)
     425             :         ! Perform inverse z-score normalization of input_data table. Equivalent to scikit-learn StandardScaler.
     426             :         !
     427             :         ! Inputs:
     428             :         !   input_data: 2D array where rows are examples and columns are variables
     429             :         !   scale_values: 2D array where rows are the input variables and columns are mean and standard deviation
     430             :         ! Output:
     431             :         !   transformed_data: 2D array with the same shape as input_data containing the transformed values.
     432             :         real(r8), intent(in) :: input_data(:, :)
     433             :         real(r8), intent(in) :: scale_values(:, :)
     434             :         real(r8), intent(out) :: transformed_data(size(input_data, 1), size(input_data, 2))
     435             :         character(128),   intent(out) :: errstring  ! output status (non-blank for error return)
     436             :         integer :: i
     437           0 :         if (size(input_data, 2) /= size(scale_values, 1)) then
     438           0 :             write(errstring,*) "Size mismatch between input data and scale values", size(input_data, 2), size(scale_values, 1)
     439           0 :             return
     440             :         end if
     441           0 :         do i=1, size(input_data, 2)
     442           0 :             transformed_data(:, i) = input_data(:, i) * scale_values(i, 2) + scale_values(i, 1)
     443             :         end do
     444             :     end subroutine standard_scaler_inverse_transform
     445             : 
     446           0 :     subroutine minmax_scaler_transform(input_data, scale_values, transformed_data, errstring)
     447             :         ! Perform min-max scaling of input_data table. Equivalent to scikit-learn MinMaxScaler.
     448             :         !
     449             :         ! Inputs:
     450             :         !   input_data: 2D array where rows are examples and columns are variables
     451             :         !   scale_values: 2D array where rows are the input variables and columns are min and max.
     452             :         ! Output:
     453             :         !   transformed_data: 2D array with the same shape as input_data containing the transformed values.
     454             :         real(r8), intent(in) :: input_data(:, :)
     455             :         real(r8), intent(in) :: scale_values(:, :)
     456             :         real(r8), intent(out) :: transformed_data(size(input_data, 1), size(input_data, 2))
     457             :         character(128),   intent(out) :: errstring  ! output status (non-blank for error return)
     458             : 
     459             :         integer :: i
     460           0 :         if (size(input_data, 2) /= size(scale_values, 1)) then
     461           0 :             write(errstring,*) "Size mismatch between input data and scale values", size(input_data, 2), size(scale_values, 1)
     462           0 :             return
     463             :         end if
     464           0 :         do i=1, size(input_data, 2)
     465           0 :             transformed_data(:, i) = (input_data(:, i) - scale_values(i, 1)) / (scale_values(i, 2) - scale_values(i ,1))
     466             :         end do
     467             :     end subroutine minmax_scaler_transform
     468             : 
     469           0 :     subroutine minmax_scaler_inverse_transform(input_data, scale_values, transformed_data, errstring)
     470             :         ! Perform inverse min-max scaling of input_data table. Equivalent to scikit-learn MinMaxScaler.
     471             :         !
     472             :         ! Inputs:
     473             :         !   input_data: 2D array where rows are examples and columns are variables
     474             :         !   scale_values: 2D array where rows are the input variables and columns are min and max.
     475             :         ! Output:
     476             :         !   transformed_data: 2D array with the same shape as input_data containing the transformed values.
     477             :         real(r8), intent(in) :: input_data(:, :)
     478             :         real(r8), intent(in) :: scale_values(:, :)
     479             :         real(r8), intent(out) :: transformed_data(size(input_data, 1), size(input_data, 2))
     480             :         character(128),   intent(out) :: errstring  ! output status (non-blank for error return)
     481             : 
     482             :         integer :: i
     483           0 :         if (size(input_data, 2) /= size(scale_values, 1)) then
     484           0 :             write(errstring,*) "Size mismatch between input data and scale values", size(input_data, 2), size(scale_values, 1)
     485           0 :             return
     486             :         end if
     487           0 :         do i=1, size(input_data, 2)
     488           0 :             transformed_data(:, i) = input_data(:, i) * (scale_values(i, 2) - scale_values(i ,1)) + scale_values(i, 1)
     489             :         end do
     490             :     end subroutine minmax_scaler_inverse_transform
     491             : 
     492           0 :     subroutine check(status, errstring)
     493             :         ! Check for netCDF errors
     494             :         integer, intent ( in) :: status
     495             :         character(128),   intent(out) :: errstring  ! output status (non-blank for error return)
     496             : 
     497           0 :         errstring = ''
     498           0 :         if(status /= nf90_noerr) then
     499           0 :           errstring = trim(nf90_strerror(status))
     500             :         end if
     501           0 :     end subroutine check
     502             : 
     503           0 : end module module_neural_net

Generated by: LCOV version 1.14