Loss functions
In this chapter you will discover the conceptual framework behind logistic regression and SVMs. This will let you delve deeper into the inner workings of these models. This is the Summary of lecture "Linear Classifiers in Python", via datacamp.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
Linear classifiers - the coefficients
- Dot Products
-
x@y
is called the dot product ofx
andy
, and is written $x \cdot y$
-
- Linear Classifier predictions
- raw model output = coefficients $\cdot$ features + intercept
- Linear classifier prediction: compute raw model output, check the sign
- if positive, predict one class
- if negative, predict the other class
- This is the same for logistic regression and linear SVM
-
fit
is different butpredict
is the same
-
Changing the model coefficients
When you call fit
with scikit-learn, the logistic regression coefficients are automatically learned from your dataset. In this exercise you will explore how the decision boundary is represented by the coefficients. To do so, you will change the coefficients manually (instead of with fit
), and visualize the resulting classifiers.
def make_meshgrid(x, y, h=.02, lims=None):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
if lims is None:
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
else:
x_min, x_max, y_min, y_max = lims
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, proba=False, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
if proba:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,-1]
Z = Z.reshape(xx.shape)
out = ax.imshow(Z,extent=(np.min(xx), np.max(xx), np.min(yy), np.max(yy)),
origin='lower', vmin=0, vmax=1, **params)
ax.contour(xx, yy, Z, levels=[0.5])
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def plot_classifier(X, y, clf, ax=None, ticks=False, proba=False, lims=None):
# assumes classifier "clf" is already fit
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1, lims=lims)
if ax is None:
plt.figure()
ax = plt.gca()
show = True
else:
show = False
# can abstract some of this into a higher-level function for learners to call
cs = plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8, proba=proba)
if proba:
cbar = plt.colorbar(cs)
cbar.ax.set_ylabel('probability of red $\Delta$ class', fontsize=20, rotation=270, labelpad=30)
cbar.ax.tick_params(labelsize=14)
#ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=30, edgecolors=\'k\', linewidth=1)
labels = np.unique(y)
if len(labels) == 2:
ax.scatter(X0[y==labels[0]], X1[y==labels[0]], cmap=plt.cm.coolwarm,
s=60, c='b', marker='o', edgecolors='k')
ax.scatter(X0[y==labels[1]], X1[y==labels[1]], cmap=plt.cm.coolwarm,
s=60, c='r', marker='^', edgecolors='k')
else:
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=50, edgecolors='k', linewidth=1)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
# ax.set_xlabel(data.feature_names[0])
# ax.set_ylabel(data.feature_names[1])
if ticks:
ax.set_xticks(())
ax.set_yticks(())
# ax.set_title(title)
if show:
plt.show()
else:
return ax
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X, y)
model.coef_ = np.array([[0,1]])
model.intercept_ = np.array([0])
# Plot the data and decision boundary
plot_classifier(X,y,model)
# Print the number of errors
num_err = np.sum(y != model.predict(X))
print("Number of errors:", num_err)
What is a loss function?
- Least squares: the squared loss
- scikit-learn's
LinearRegression
minimizes a loss: $$ \sum_{i=1}^{n}(\text{true ith target value - predicted ith target value})^2 $$ - Minimization is with respect to coefficients or parameters of the model.
- scikit-learn's
- Classification errors: the 0-1 loss
- Squared loss not appropriate for classification problems
- A natrual loss for classification problem is the number of errors
- This is the 0-1 loss: it's 0 for a correct prediction and 1 for an incorrect prediction
- But this loss is hard to minimize
X = pd.read_csv('./dataset/boston_X.csv').to_numpy()
y = pd.read_csv('./dataset/boston_y.csv').to_numpy()
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
# The squared error, summed overt training examples
def my_loss(w):
s = 0
for i in range(y.size):
# Get the true and predicted target values for example 'i'
y_i_true = y[i]
y_i_pred = w@X[i]
s = s + (y_i_true - y_i_pred) ** 2
return s
# Returns the w that makes my_loss(w) smallest
w_fit = minimize(my_loss, X[0]).x
print(w_fit)
# Compare with scikit-learn's LinearRegression coefficients
lr = LinearRegression(fit_intercept=False).fit(X, y)
print(lr.coef_)
def log_loss(raw_model_output):
return np.log(1 + np.exp(-raw_model_output))
def hinge_loss(raw_model_output):
return np.maximum(0, 1 - raw_model_output)
# Create a grid of values and plot
grid = np.linspace(-2,2,1000)
plt.plot(grid, log_loss(grid), label='logistic');
plt.plot(grid, hinge_loss(grid), label='hinge');
plt.axvline(x=0, linestyle='dashed', color='k')
plt.legend();
Implementing logistic regression
This is very similar to the earlier exercise where you implemented linear regression "from scratch" using scipy.optimize.minimize
. However, this time we'll minimize the logistic loss and compare with scikit-learn's LogisticRegression
.
The log_loss()
function from the previous exercise is already defined in your environment, and the sklearn breast cancer prediction dataset (first 10 features, standardized) is loaded into the variables X
and y
.
X = pd.read_csv('./dataset/breast_X.csv').to_numpy()
y = pd.read_csv('./dataset/breast_y.csv').to_numpy()
def my_loss(w):
s = 0
for i in range(y.size):
raw_model_output = w@X[i]
s = s + log_loss(raw_model_output * y[i])
return s
# Returns the w that makes my_loss(w) smallest
w_fit = minimize(my_loss, X[0]).x
print(w_fit)
# Compare with scikit-learn's LogisticRegression
lr = LogisticRegression(fit_intercept=False, C=1000000).fit(X, y)
print(lr.coef_)
As you can see, logistic regression is just minimizing the loss function we've been looking at.