1
votes

I need to convert a training with stochastic gradient descent in mini batch gradient descent. I report a simple example of a neural network with only 4 training sample so we can for example implement a batch size of 2 only for understand how to change the training part.

This is the simple example of a net that have to learn an xor operation:

This part is the network definition

#include <stdio.h>
#include <stdlib.h>

typedef double NNType;
// numer of inputs
#define IN 2
// number neurons layer hidden
#define HID 8
// numer of outputs
#define OUT 1
// learning constant
#define EPS 0.1

NNType input[IN]; // input
NNType hidden[HID]; // layer hidden
NNType output[OUT]; // output
NNType weightH[HID][IN]; // weights layer hidden
NNType biasesH[HID]; // biases layer hidden
NNType weightO[OUT][HID]; // weights output
NNType biasesO[OUT]; // biases output

inline NNType Activation(NNType x)
{
   return x>0?x:0;
}

inline NNType Derivative(NNType x)
{
   return x>0?1:0;
}

This function is the network calculation

NNType NetworkResult(NNType inp1,NNType inp2)
{
   // load the inputs
   input[0]=inp1;
   input[1]=inp2;
   // compute hidden layer
   for (int i=0;i<HID;i++)
   {
      hidden[i]=biasesH[i];
      for (int j=0;j<IN;j++)
         hidden[i] += input[j]*weightH[i][j];
      hidden[i]=Activation(hidden[i]);
   }
   // compute output
   for (int i=0;i<OUT;i++)
   {
      output[i]=biasesO[i];
      for (int j=0;j<HID;j++)
         output[i] += hidden[j]*weightO[i][j];
      output[i]=Activation(output[i]);
   }
   return output[0];
}

This is the training part that I need to change to mini batch gradient descent

void TrainNet(NNType inp1,NNType inp2,NNType result,NNType *error)
{
   NetworkResult(inp1,inp2);
   NNType DeltaO[OUT];
   NNType DeltaH[HID];
   // layer output
   NNType err= result-output[0];
   *error+=err*err*0.5;
   DeltaO[0]=err*Derivative(output[0]);
   // layer hidden
   for (int i=0;i<HID;i++)
   {
      NNType err=0;
      for (int j=0;j<OUT;j++)
         err+= DeltaO[j]*weightO[j][i];
      DeltaH[i]=err*Derivative(hidden[i]);
   }
   // change weights
   // layer output
   for (int i=0;i<OUT;i++)
   {
      for (int j=0;j<HID;j++)
         weightO[i][j]+=EPS*DeltaO[i]*hidden[j];
      biasesO[i]+=EPS*DeltaO[i];
   }
   // layer hidden
   for (int i=0;i<HID;i++)
   {
      for (int j=0;j<IN;j++)
         weightH[i][j]+=EPS*DeltaH[i]*input[j];
      biasesH[i]+=EPS*DeltaH[i];
   }
}

Main program

// constant for weights initializations
#define CONSTINIT 0.1

int main(int argc, char *argv[])
{
   srand(1);
   // initalize weights and biases
   for (int i=0;i<HID;i++)
   {
      for (int j=0;j<IN;j++)
         weightH[i][j]= 2.0 * ( (rand()/((NNType)RAND_MAX)) - 0.5 ) * CONSTINIT;
      biasesH[i]=0.1;
   }
   for (int i=0;i<OUT;i++)
   {
      for (int j=0;j<HID;j++)
         weightO[i][j]= 2.0 * ( (rand()/((NNType)RAND_MAX)) - 0.5 ) * CONSTINIT;
      biasesO[i]=0.1;
   }
   // calculate the results with the random weights
   printf("0 0 = %f\n",NetworkResult(0,0));
   printf("0 1 = %f\n",NetworkResult(0,1));
   printf("1 0 = %f\n",NetworkResult(1,0));
   printf("1 1 = %f\n",NetworkResult(1,1));
   printf("\n");
   // Train the net to recognize an xor operation
   int i;
   for (i=0;i<10000;i++)
   {
      NNType error=0;
      TrainNet(0,0,0,&error); // input 0 0 result 0
      TrainNet(0,1,1,&error); // input 0 1 result 1
      TrainNet(1,0,1,&error); // input 1 0 result 1
      TrainNet(1,1,0,&error); // input 1 1 result 0
      if (error<0.0001) break; // exit the training with a low error
   }
   // calculate the results after the train
   printf("After %d iterations\n",i);
   printf("0 0 = %f\n",NetworkResult(0,0));
   printf("0 1 = %f\n",NetworkResult(0,1));
   printf("1 0 = %f\n",NetworkResult(1,0));
   printf("1 1 = %f\n",NetworkResult(1,1));
   printf("\n");
   return 0;
}
1

1 Answers

0
votes

Check What are the differences between 'epoch', 'batch', and 'minibatch'?.

In your case your input is random. You could split your training data in 2 mini-batches. Run two times your for loop with an error array. In your main:

   #define BATCHES 2
   
   // add a batch dimension
   NNType weightH[BATCHES][HID][IN]; // weights layer hidden
   NNType biasesH[BATCHES][HID]; // biases layer hidden
   NNType weightO[BATCHES][OUT][HID]; // weights output
   NNType biasesO[BATCHES][OUT]; // biases output

   int i,j;
   NNType error[BATCHES] = {0};

// updated prototype to train multiple batches
void TrainNet(NNType inp1,NNType inp2,NNType result,NNType *error, int batch);
   
   //init your stuff with random val as before for BATCHES dim
   init();

   // train
   for (j=0;j<BATCHES;j++)
   {
      for (i=0;i<10000/BATCHES;i++)
      {
         TrainNet(0,0,0,&error[j], j); // input 0 0 result 0
         TrainNet(0,1,1,&error[j], j); // input 0 1 result 1
         TrainNet(1,0,1,&error[j], j); // input 1 0 result 1
         TrainNet(1,1,0,&error[j], j); // input 1 1 result 0
         if (error[j]<0.0001) break; // exit the training with a low error
      }
   }