案例分析
说明:我们将建立一个逻辑回归模型来预测一个学生是否被大学录取。假设你是一个大学系的管理员,你想根据两次考试的结果来决定每个申请人的录取机会。你有以前的申请人的历史数据,你可以用它作为逻辑回归的训练集。对于每一个培训例子,你有两个考试的申请人的分数和录取决定。为了做到这一点,我们将建立一个分类模型,根据考试成绩估计入学概率。
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os #系统库
path = 'data' + os.sep + 'LogiReg_data.txt' # os.sep采用系统的分隔符
pdData = pd.read_csv(path, header = None, names = ['Exam1', 'Exam2', 'Admitted'])
# 读文件 (路径, 没有标题行,取标题名为'Exam 1', 'Exam 2', 'Admitted' )
pd.head()
pdData.shape()
positive = pdData[pdData['Admitted'] = 1]
# 将数据集pdData的‘Admited’列 = 1 的每一行数据赋给positive
negative = pdData[pdData['Admitted'] = 0]
fig, ax = plt.subplots(figsize = (10, 5))
# 画图,画图尺寸为(10,5)子区域名ax
# fig,ax = plt.subplots()等价于:
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
ax.scatter(positive['Exam1'], positive['Exam2'], s = 30, c ='b', marker = 'o', labels = 'Admitted')
# 画散点图(取出正数据,散点大小为30,颜色为blue,形式为o,标签为录取)
ax.scatter(negative['Exam1'], ['Exam2'], s = 30, c ='r', marker = 'x', label = 'Not Admitted')
ax.legend() # 图例
ax.set_xlabel('Exam 1 Score')
ax.set_ylabel('Exam 2 Score')
The Logistic Regression
目标: 建立分类器(求解出三个参数θ0θ1θ2)
设定阈值, 根据阈值判断录取结果
要完成的模块
-sigmoid:映射到概率的函数
-model: 返回预测结果值
-cost:根据参数计算损失
-gradient: 计算每个参数的梯度方向
-descent: 进行参数更新
-accuracy:计算精度
def sigmoid(z):
return 1/(1 + np.exp(-z))
nums = np.arange(-10, 10, step = 1)
fig, ax = plt.subplots(figsize = (12, 4))
ax,plot(nums, sigmoid(nums), 'r')
def model(X, theta):
return sigmoid(np.dot(X, theta.T))
# np.dot()函数是进行矩阵相乘
pdData.insert(0, 'Ones', 1)
orig_data = pdData.as_matrix()
cols = orig_data[:,0:cols-1]
y = orig_data[:, cols-1:cols]
theta = np.zeros([1, 3])
X[:5]
y[:5]
theta
X.shape, y.shape, theta.shape
损失函数
def cost(X, y, theta):
left = np.multiply(-y, np.log(model(X, theta)))
right = np.multiply(1, -y, np.log(1 - model(X, theta)))
return np.sum(left - right) / (len(X))
cost(X, y, theta)
def gradient(X, y, theta):
grad = np.zero(theta.shape)
error = (model(X, theta) - y).ravel()
# .ravel()是将多维的数组降为一维。 flatten()也有此效果
# numpy.flatten()返回一份拷贝,对数据更改时不会影响原来的数组,
# 而numpy.ravel()则返回视图,对数据更改时会影响原来的数组
for j in range(len(theta.reval())):
term = np.multiply(error, X[:, j])
grad[0, j] = np.sum(term) / len(X)
return grad
3种梯度下降的方法
STOP_ITER = 0
STOP_COST = 1
STOP_GRAD = 2
def stopCriterion(type, value, threshould):
# 设定三种不同的停止策略
if type == STOP_ITER:
return value > threshold
elif type == STOP_COST:
return abs(value[-1] - value[-2]) < threshold
elif type == STOP_GRAD:
return np.linalg.norm(value) < threshold
#np.linalg.norm()是求2范数
import numpy.random
# 打乱数据
def shuffData(data):
np.random.shuffle(data)
cols = data.shape[1]
X = data[:, 0:cols - 1]
y = data[:, cols - 1:]
return X, y
import time
def descent(data, theta, batchSize, stopType, thresh, alpha):
# 梯度下降求解
init_time = time.time()
i = 0 # 迭代次数
k = 0 # batch
X, y = shuffleData(data)
grad = np.zeros(theta.shape) # 计算的梯度
costs = [cost(X, y, theta)] # 损失值
while True:
grad = gradient(X[k:k+batchSize], y[k:k+batchSize], theta)
k += batchSize #取batch数量个数据
if k >= n:
k = 0
X, y = shuffleData(data) #重新洗牌
theta = theta - alpha*grad # 参数更新
costs.append(cost(X, y, theta)) # 计算新的损失
i += 1
if stopType == STOP_ITER: value = i
elif stopType == STOP_COST: value = costs
elif stopType == STOP_GRAD: value = grad
if stopCriterion(stopType, value, thresh): break
return theta, i-1, costs, grad, time.time() - init_time
def runExpe(data, theta, batchSize, stopType, thresh, alpha):
#import pdb; pdb.set_trace();
theta, iter, costs, grad, dur = descent(data, theta, batchSize, stopType, thresh, alpha)
name = "Original" if (data[:,1]>2).sum() > 1 else "Scaled"
name += " data - learning rate: {} - ".format(alpha)
if batchSize==n: strDescType = "Gradient"
elif batchSize==1: strDescType = "Stochastic"
else: strDescType = "Mini-batch ({})".format(batchSize)
name += strDescType + " descent - Stop: "
if stopType == STOP_ITER: strStop = "{} iterations".format(thresh)
elif stopType == STOP_COST: strStop = "costs change < {}".format(thresh)
else: strStop = "gradient norm < {}".format(thresh)
name += strStop
print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(
name, theta, iter, costs[-1], dur))
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(np.arange(len(costs)), costs, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title(name.upper() + ' - Error vs. Iteration')
return theta
不同的停止策略
设定迭代次数
#选择的梯度下降方法是基于所有样本的
n=100
runExpe(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.000001)
根据损失值停止
设定阈值1E-6,差不多需要110 000 次迭代
runExpe(orig_data, theta, n, STOP_COST, thresh=0.000001, alpha=0.001)
根据梯度变化停止
设定阈值为0.05,差不多需要40 000 次迭代
runExpe(orig_data, theta, n, STOP_GRAD, thresh=0.05, alpha=0.001)
对比不同梯度下降方法
随机梯度下降 Stochastic descent
runExpe(orig_data, theta, 1, STOP_ITER, thresh=5000, alpha=0.001)
损失函数变化波动大,很不稳定,将学习率调小试试
runExpe(orig_data, theta, 1, STOP_ITER, thresh=15000, alpha=0.000002)
速度快,但稳定性差,需要很小的学习率
Mini-batch descent
runExpe(orig_data, theta, 16, STOP_ITER, thresh=15000, alpha=0.001)
浮动仍然比较大,我们来尝试下对数据进行标准化 将数据按其属性(按列进行)减去其均值,然后除以其方差。最后得到的结果是,对每个属性/每列来说所有数据都聚集在0附近,方差值为1
from sklearn import preprocessing as pp
scaled_data = orig_data.copy()
scaled_data[:, 1:3] = pp.scale(orig_data[:, 1:3])
runExpe(scaled_data, theta, n, STOP_ITER, thresh=5000, alpha=0.001)
它好多了!原始数据,只能达到达到0.61,而我们得到了0.38个在这里! 所以对数据做预处理是非常重要的
runExpe(scaled_data, theta, n, STOP_GRAD, thresh=0.02, alpha=0.001)
更多的迭代次数会使得损失下降的更多!
theta = runExpe(scaled_data, theta, 1, STOP_GRAD, thresh=0.002/5, alpha=0.001)
随机梯度下降更快,但是我们需要迭代的次数也需要更多,所以还是用batch的比较合适!!!
runExpe(scaled_data, theta, 16, STOP_GRAD, thresh=0.002*2, alpha=0.001)
精度
#设定阈值
def predict(X, theta):
return [1 if x >= 0.5 else 0 for x in model(X, theta)]
scaled_X = scaled_data[:, :3]
y = scaled_data[:, 3]
predictions = predict(scaled_X, theta)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
print ('accuracy = {0}%'.format(accuracy))