Logistic Regression Classifier with Gradient Descent #136
Closed
firdevsuykun
started this conversation in
General
Replies: 1 comment
-
Maybe there was a small spacing difference when you rewrote the code. Try this one, this one should work: class LogisticRegressionGD:
"""Gradient descent-based logistic regression classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
w_ : 1d-array
Weights after training.
b_ : Scalar
Bias unit after fitting.
losses_ : list
Log loss function values in each epoch.
"""
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : Instance of LogisticRegressionGD
"""
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = np.float_(0.)
self.losses_ = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w_ += self.eta * X.T.dot(errors) / X.shape[0]
self.b_ += self.eta * errors.mean()
loss = -y.dot(np.log(output)) - ((1 - y).dot(np.log(1 - output))) / X.shape[0]
self.losses_.append(loss)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_) + self.b_
def activation(self, z):
"""Compute logistic sigmoid activation"""
return 1. / (1. + np.exp(-np.clip(z, -250, 250)))
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0) |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
#class LR with g.descent
class LogisticRegressionGD:
def init(self, eta=0.01,n_iter=50,random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = np.float_(0.)
self.losses_ = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w_ += self.eta * 2.0 * X.T.dot(errors) / X.shape[0]
self.b_ += self.eta * 2.0 * errors.mean()
loss = (-y.dot(np.log(output)) - ((1 - y).dot(np.log(1 - output))) / X.shape[0])
self.losses_.append(loss)
return self
def net_input(self, X):
return np.dot(X, self.w_) + self.b_
def activation(self, z):
return 1. / (1. + np.exp(-np.clip(z, -250, 250)))
def predict(self, X): """Return class label after unit step"""
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
I wrote same code but I got error.
Defining n_iter seems right but python gave me error.
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
^
IndentationError: unexpected indent
I wrote exact code,I did not correct the Indentation?Does anyone help?
Beta Was this translation helpful? Give feedback.
All reactions