Python 梯度下降实现逻辑回归
import numpy as np import pandas as pd import matplotlib.pyplot as plt
import os path = ‘data‘+os.sep+‘LogiReg_data.txt‘ pdData = pd.read_csv(path,header = None,names = [‘Exam 1‘,‘Exam 2‘,‘Admitted‘]) #设置header值为None,然后指定header值为names pdData.head()
positive = pdData[pdData[‘Admitted‘] == 1] negative = pdData[pdData[‘Admitted‘] == 0] fig,ax = plt.subplots(figsize=(10,5)) ax.scatter(positive[‘Exam 1‘],positive[‘Exam 2‘],s=30,c=‘b‘,marker=‘o‘,label=‘Admitted‘) ax.scatter(negative[‘Exam 1‘],negative[‘Exam 2‘],s=30,c=‘r‘,marker=‘x‘,label=‘Not Admitted‘) ax.legend() ax.set_xlabel(‘Exam 1 Score‘) ax.set_ylabel(‘Exam 2 Score‘) fig
def sigmod(z): return 1/(1+np.exp(-z))
def model(X,theta): return sigmod(np.dot(X,theta.T))
pdData.insert(0,‘ones‘,1) original_data = pdData.as_matrix() cols = original_data.shape[1] #cols记录一共有几列数据 X = original_data[:,0:cols-1] Y = original_data[:,cols-1:cols] theta = np.zeros([1,3]) #theta = array([[0., 0., 0.]]),构造全为零的行向量
def cost(X,Y,theta): left = np.multiply(-Y,np.log(model(X,theta))) right = np.multiply(1-Y,np.log(1-model(X,theta))) return np.sum(left-right)/ (len(X))
def gradient(X,Y,theta): grad = np.zeros(theta.shape) error = (model(X,theta)-Y).ravel() for j in range(len(theta.ravel())): term = np.multiply(error,X[:,j]) #(h(Xi)-Yi)*Xi grad[0,j] = np.sum(term)/len(X) #∑term / m return grad
STOP_ITER = 0 STOP_COST = 1 STOP_GRAD = 2 def stop_fun(type,value,threshold): if type == STOP_ITER: return value > threshold #达到一定次数后停止 elif type == STOP_COST: return abs(value[-1]-value[-2]) < threshold #小于一定阈值时停止 elif type == STOP_GRAD: return np.linalg.norm(value) < threshold #小于一定阈值时停止
import numpy.random def shuffleData(data): np.random.shuffle(data) #将数据打乱 cols = data.shape[1] X = data[:,0:cols-1] Y = data[:,cols-1:] return X,Y
import time def descent(data,theta,batchSize,stopType,thresh,alpha): init_time = time.time() i = 0 #迭代次数 k = 0 #batch X,Y = shuffleData(data) grad = np.zeros(theta.shape) #梯度初始化 costs = [cost(X,Y,theta)] #计算损失值 while 1: grad = gradient(X[k:k+batchSize],Y[k:k+batchSize],theta) k+=batchSize if k >= n: k = 0 X,Y = shuffleData(data) theta = theta - alpha*grad #对参数进行更新 costs.append(cost(X,Y,theta)) i += 1 if stopType == STOP_ITER: value = i elif stopType == STOP_COST: value = costs elif stopType == STOP_GRAD: value = grad if stop_fun(stopType,value,thresh): break return theta,i-1,costs,grad,time.time()-init_time
def run_example(data, theta, batchSize, stopType, thresh, alpha): theta,iter,costs,grad,times = descent(data,theta,batchSize,stopType,thresh,alpha) name = ‘Original‘ if (data[:,1]>2).sum() > 1 else "Scaled" name += " data - learning rate: {} - ".format(alpha) if batchSize==n: strDescType = "Gradient" elif batchSize==1: strDescType = "Stochastic" else: strDescType = "Mini-batch ({})".format(batchSize) name += strDescType + " descent - Stop: " #输出梯度下降策略 if stopType == STOP_ITER: strStop = "{} iterations".format(thresh) elif stopType == STOP_COST: strStop = "costs change < {}".format(thresh) else: strStop = "gradient norm < {}".format(thresh) name += strStop #输出停止策略 print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(name, theta, iter, costs[-1], times)) #打印相关信息 #画图 fig,ax = plt.subplots(figsize = (12,4)) ax.plot(np.arange(len(costs)),costs,‘r‘) ax.set_xlabel(‘Iterations‘) ax.set_ylabel(‘Cost‘) ax.set_title(name.upper() + ‘ - Error vs. Iteration‘) plt.show(fig) return theta
from sklearn import preprocessing as pp scaled_data = original_data.copy() scaled_data[:,1:3] = pp.scale(original_data[:,1:3]) run_example(scaled_data, theta, n, STOP_ITER, thresh=5000, alpha=0.001)
theta = run_example(scaled_data, theta, 1, STOP_GRAD, thresh=0.002/5, alpha=0.001)
def predict(X,theta): return [1 if x>= 0.5 else 0 for x in model(X,theta)]
scaled_X = scaled_data[:,:3] scaled_Y = scaled_data[:,3] predictions = predict(scaled_X,theta) correct = [1 if((a==1 and b==1) or (a==0 and b==0)) else 0 for (a,b) in zip(predictions,scaled_Y)] accuracy = (sum(map(int,correct)) % len(correct)) print(‘accuracy = {0}%‘.format(accuracy))
from sklearn.cross_validation import KFold from sklearn.cross_validation import cross_val_score from sklearn.linear_model import LogisticRegression kf = KFold(len(scaled_X),10,shuffle = True,random_state = 8) lr = LogisticRegression() accuracies = cross_val_score(lr,scaled_X,scaled_Y,scoring = ‘roc_auc‘,cv=kf) average_accuracy = sum(accuracies)/len(accuracies) plot_roc(scaled_Y,predictions) print(accuracies) print(average_accuracy)
from sklearn.metrics import roc_curve, auc def plot_roc(labels, predict_prob): #labels:正确标签 #predict_prob:预测标签 false_positive_rate,true_positive_rate,thresholds=roc_curve(labels, predict_prob) roc_auc=auc(false_positive_rate, true_positive_rate) plt.title(‘ROC‘) plt.plot(false_positive_rate, true_positive_rate,‘b‘,label=‘AUC = %0.4f‘% roc_auc) plt.legend(loc=‘lower right‘) plt.plot([0,1],[0,1],‘r--‘) plt.ylabel(‘TPR‘) plt.xlabel(‘FPR‘) plt.show()
相关推荐
necrazy 2019-03-24
sunflower0 2019-03-13
wenxuegeng 2020-08-03
83153251 2020-06-21
zhaorui0 2020-06-06
wenxuegeng 2020-03-05
源式羽语 2020-02-19
Happyunlimited 2020-02-02
清溪算法 2020-01-24
yishujixiaoxiao 2020-01-10
yedaoxiaodi 2020-01-06
frommymind 2019-12-13
liqing 2019-12-10
playoffs 2019-12-03
rainchxy 2019-11-05
qinrui 2019-07-29
aaJamesJones 2019-09-06
zhaoyinghuan 2019-07-21