序
此篇文章主要介绍逻辑回归的推导和实现。
LR推导
上面的推导可以分为几个步骤:
1.给出映射函数。
2.写出单样本,多样本的似然函数,然后给出-log似然函数。
3.使用梯度上升法进行求解参数更新。
最后的参数迭代式子中,i表示当前为第几个样本,j表示当前样本的第几个特征。参数迭代式子就是当前参数加上学习率(当前标签值-当前样本拟合值)当前样本i的第j个特征值。
LR实现
from numpy import *
#—————————————————————数据预处理过程——————————————————————#
def loadDataSet():
dataMat = []; labelMat = []
fr = open('./Ch05/testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
#————————————————————sigmoid函数定义———————————————————————#
def sigmoid(inX):
return 1.0/(1+exp(-inX))
#———————————————————梯度上升法实现函数————————————————————-——#
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #convert to NumPy matrix
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m,n = shape(dataMatrix)
alpha = 0.001 #学习率,步长
maxCycles = 500 #迭代次数
weights = ones((n,1))
for k in range(maxCycles): #heavy on matrix operations
h = sigmoid(dataMatrix*weights) #matrix mult
error = (labelMat - h) #vector subtraction
weights = weights + alpha * dataMatrix.transpose()* error #matrix mult
return weights
#——————————————————画出数据集和决策直线————————————————————————#
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2] #z=0,则simod=0.5,是分类点
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
#——————————————————————随机梯度上升法实现函数——————————————————#
def stocGradAscent0(dataMatrix, classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n) #initialize to all ones
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
#———————————————————改进的随机梯度上升法实现函数——————————————————#
def stocGradAscent1(dataMatrix, classLabels, numIter=150): #迭代次数默认为150次
m,n = shape(dataMatrix)
weights = ones(n) #initialize to all ones
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001 #apha 随着迭代次数逐渐减小,
randIndex = int(random.uniform(0,len(dataIndex))) #随机选取样本来跟新回归系数,这样能减少周期性波动
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(list(dataIndex)[randIndex])
return weights
#————————————————————回归结果输出函数——————————————————————#
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.8: return 1.0
else: return 0.0
#—————————————————————测试函数————————————————————————#
def colicTest():
frTrain = open(r'E:\Data1\train.txt')
frTest = open(r'E:\Data1\test.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr =[]
for i in range(13):
lineArr.append(int(currLine[i+3]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[16]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 10) #traingSet必须由列表变为数组
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0 #测试次数记录
currLine = line.strip().split('\t')
user_id = int(currLine[0]);item_id = int(currLine[2])
lineArr =[]
for i in range(13):
lineArr.append(int(currLine[i+3]))
a = int(classifyVector(array(lineArr), trainWeights))
print(user_id,item_id,a)
#if int(classifyVector(array(lineArr), trainWeights))!= int(currLine[16]):
# errorCount += 1
#errorRate = (float(errorCount)/numTestVec)
#print("the error rate of this test is: %f" % errorRate)
#return errorRate
#————————————————————调用10次coliTest函数并求结果的平均值——————————————————#
def multiTest():
numTests = 10; errorSum=0.0
for k in range(numTests):
errorSum += colicTest()
print("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
完整代码见git