Halo,
I'm new in machine learning and Python and I want to predict the Kaggle House Sales in King County dataset with my gradient descent.
I'm splitting 70% (15k rows) training and 30% (6k rows) testing and I choose 5 features from 19, but there is a performance issue, the algorithm took so much time (more than 11 hours), 100% memory and failed to execute.
This is my Gradient Descent class:
class GradientDescent:
X_train = []
Y_train = []
X_test = []
Y_test = []
lr = 0
max_iter = 0
theta = 0
def __init__(self, X_train,Y_train,X_test,Y_test, lr=0.01, max_iter=100):
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.Y_test = Y_test
self.lr = lr
self.max_iter = max_iter
self.theta = np.random.randn(X_train.shape[1], 1)
print(self.theta)
def costFunction(self,theta,X,y):
"1/2m * E(h0-y)**2"
m = len(y)
y_pred = X.dot(theta)
cost = (1/2*m) * np.sum(np.square(y_pred-y))
return cost
def estimate(self):
m = len(self.Y_train)
mse_hist = np.zeros(self.max_iter)
#theta_hist = np.zeros(max_iter)
i = 0
while i < self.max_iter or mse_hist[i] > 0.01:
y_pred = np.dot(self.X_train,self.theta)
error = y_pred-self.Y_train
self.theta = self.theta - (1/m)*self.lr*(self.X_train.T.dot((error)))
mse_hist[i] = self.costFunction(self.theta,self.X_train, self.Y_train)
#print(mse_hist[i])
i+=1
return (self.theta, mse_hist)
def test(self):
res = pd.DataFrame()
for i,row in self.X_test.iterrows():
price_pred = np.dot(row.values,self.theta)
res = row
res['price_actual'] = self.Y_test[i]
res['price_predict'] = price_pred
res['r2_score'] = r2_score(res['price_actual'].values, res['price_predict'])
res.to_csv('output.csv')
Any advice to make it better?