I am trying to implement the perceptron algorithm but am getting inconsistent results; I have noticed that the initialization of the weights is having a big impact. Is there anything I am blatantly doing wrong? Thanks!
import numpy as np
def train(x,y):
lenWeights = len(x[1,:]);
weights = np.random.uniform(-1,1,size=lenWeights)
bias = np.random.uniform(-1,1);
learningRate = 0.01;
t = 1;
converged = False;
# Perceptron Algorithm
while not converged and t < 100000:
targets = [];
for i in range(len(x)):
# Calculate output of the network
output = ( np.dot(x[i,:],weights) ) + bias;
# Perceptron threshold decision
if (output > 0):
target = 1;
else:
target = 0;
# Calculate error and update weights
error = target - y[i];
weights = weights + (x[i,:] * (learningRate * error) );
bias = bias + (learningRate * error);
targets.append(target);
t = t + 1;
if ( list(y) == list(targets) ) == True:
converged = True;
return weights,bias
def test(weights, bias, x):
predictions = [];
for i in range(len(x)):
# Calculate w'x + b
output = ( np.dot(x[i,:],weights) ) + bias;
# Get decision from hardlim function
if (output > 0):
target = 1;
else:
target = 0;
predictions.append(target);
return predictions
if __name__ == '__main__':
# Simple Test
x = np.array( [ [0,1], [1,1] ] );
y = np.array( [ 0, 1 ] );
weights,bias = train(x,y);
predictions = test(weights,bias,x);
print predictions
print y
while
. – user3072164