0
votes

I am trying to implement a simple gradient descent algorithm for linear regression. I am using Armadillo C++ linear algebra library and I am also new with Armadillo. This is what I am trying to do:

void linRegression(mat &features, mat &targets, double alpha,double error){
    mat theta = ones(features.n_cols+1);
    mat temp = zeros(features.n_cols+1);
    mat features_new = join_horiz(ones(features.n_rows),features);
    mat predictions;
    double con = alpha*(1.0/features.n_rows);
    int j = 0;
    while(j<1000){
        mat step_error = (features_new*theta - targets);
        for(unsigned int i=0;i<theta.n_rows;i++){
            temp(i) = con*sum(step_error%features_new.col(i));
        }
        theta = theta-temp;
        mat pred = predict(theta,features_new);
        cout<<theta<<endl;
        j++;
    }
}

but the the values of theta keep increasing and end up at infinity. I am not sure what I am doing wrong.

1
Did you try smaller learning rates? Did you scaled your features? What does fit function look like depending on the iteration number?Anton
yes i tried smaller learning rate. I didn't scale my data but the data was small and linear. i have used squared error for my cost function and increases during each iteration.noshusan

1 Answers

5
votes

I think the calculations in the while loop are not correct. At least you can do it more ellegant without the for-loop. Here is a short code for 1 feature problem:

#include <iostream>
#include <armadillo>

using namespace std;
using namespace arma;

int main(int argc, char** argv)
{
    mat features(10, 1);

    features << 6.110100 << endr
         << 5.527700 << endr
         << 8.518600 << endr
         << 7.003200 << endr
         << 5.859800 << endr
         << 8.382900 << endr
         << 7.476400 << endr
         << 8.578100 << endr
         << 6.486200 << endr
         << 5.054600 << endr;

    mat targets(10, 1);

    targets << 17.59200 << endr
        << 9.130200 << endr
        << 13.66200 << endr
        << 11.85400 << endr
        << 6.823300 << endr
        << 11.88600 << endr
        << 4.348300 << endr
        << 12.00000 << endr
        << 6.598700 << endr
        << 3.816600 << endr;

    mat theta = ones(features.n_cols + 1);

    mat features_new = join_horiz(ones(features.n_rows), features);

    double alpha = 0.01;
    double con = alpha*(1.0 / features.n_rows);

    int j = 0;

    while (j < 20000){
        mat step_error = (features_new*theta - targets);
        theta = theta - con * (features_new.t() * step_error);
        j++;
    }

    theta.print("theta:");

    system("pause");

    return 0;
}

The program returns:

theta:
   0.5083
   1.3425

The result gained through the normal equation method is:

theta:
   0.5071
   1.3427

EDIT

Your code is indeed correct! The problem may be in the feature normalization. I expanded my example to 2 feature regression and add normalization. Without normalization it does not work for me too.

#include <iostream>
#include <armadillo>

using namespace std;
using namespace arma;

int main(int argc, char** argv)
{

    mat features(10, 2);

    features << 2104 << 3 << endr
         << 1600 << 3 << endr
         << 2400 << 3 << endr
         << 1416 << 2 << endr
         << 3000 << 4 << endr
         << 1985 << 4 << endr
         << 1534 << 3 << endr
         << 1427 << 3 << endr
         << 1380 << 3 << endr
         << 1494 << 3 << endr;

    mat m = mean(features, 0);
    mat s = stddev(features, 0, 0);

    int i,  j;

    //normalization
    for (i = 0; i < features.n_rows; i++)
    {
        for (j = 0; j < features.n_cols; j++)
        {
            features(i, j) = (features(i, j) - m(j))/s(j);
        }
    }

    mat targets(10, 1);

    targets << 399900 << endr
        << 329900 << endr
        << 369000 << endr
        << 232000 << endr
        << 539900 << endr
        << 299900 << endr
        << 314900 << endr
        << 198999 << endr
        << 212000 << endr
        << 242500 << endr;


    mat theta = ones(features.n_cols + 1);

    mat features_new = join_horiz(ones(features.n_rows), features);

    double alpha = 0.01;
    double con = alpha*(1.0 / features.n_rows);

    while (j < 20000){
        mat step_error = (features_new*theta - targets);
        theta = theta - con * (features_new.t() * step_error);
        j++;
    }

    cout << theta << endl;

    system("pause");

    return 0;
}

The result:

theta:

  3.1390e+005
  9.9704e+004
 -5.6835e+003