python sklearn 梯度下降法_科学网—Python_机器学习_总结4:随机梯度下降算法 - 李军的博文...
===============================================================
總結如下:
1、隨機梯度下降算法可以看成是梯度下降算法的近似,但通常它能更快收斂;
2、隨機梯度算法基于單個樣本訓練更新權重(梯度下降算法是基于所有樣本更新權重!),因此更容易跳出小范圍的局部最優點;但其誤差曲線不如梯度下降算法平滑;
3、因為樣本要隨機選擇,所以通常在每次迭代時都都打亂訓練集,以防止進入循環;
4、梯度隨機算法中,通常采用隨時間變化的自適應學習速率來代替固定學習速率eta;
5、隨機梯度下降算法不一定得到全局最優解,但會趨近于它;借助于自適應學習速率,隨機梯度下降算法會進一步趨近于全局最優解;
6、隨機梯度下降算法可以用于在線學習,尤其在海量數據場合;
===============================================================
仿真結果如下:
代碼如下:##############################################################################
from sklearn import datasets
import numpy as np
import matplotlib.pylab as plt
import pandas as pd
from numpy.random import seed
from matplotlib.colors import ListedColormap
##############################################################################
#讀取數據集
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header = None)
#print(df.tail())
#顯示所有樣本點
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:100, [0,2]].values
plt.scatter(X[:50,0], X[:50, 1], color = 'red', marker = 'o', label = 'setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color = 'blue', marker = 'x', label = 'versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal length')
plt.show()
##############################################################################
class AdalineSGD(object):
def __init__(self, eta=0.01, n_inter=10, shuffle=True, random_state=None):
self.eta = eta
self.n_inter = n_inter
self.w_initialized = False
self.shuffle = shuffle
if random_state:
seed(random_state)
def fit(self, X, y):
self._initialize_weights(X.shape[1])
self.cost_ = []
for i in range(self.n_inter):
if self.shuffle:
X, y = self._shuffle(X,y)
cost = []
#kk=0
#print("*****************************")
#print(len(y))
for xi, target in zip(X,y):
cost.append(self._update_weights(xi, target))
#kk += 1
#print(kk)
avg_cost = sum(cost) / len(y)
self.cost_.append(avg_cost)
return self
'''
def partial_fit(self, X, y):
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
'''
def _shuffle(self, X, y):
r = np.random.permutation(len(y))
return X[r], y[r]
def _initialize_weights(self, m):
self.w_ = np.zeros(1 + m)
self.w_initialized = True
def _update_weights(self, xi, target):
output = self.net_input(xi)
error = (target - output)
#print(error)
self.w_[1:] += self.eta * xi.dot(error)
self.w_[0] += self.eta * error
cost = 0.5 * error**2
return cost
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
return self.net_input(X)
def predict(self, X):
return np.where(self.activation(X) >= 0.0, 1, -1)
##############################################################################
# 特征縮放,能有助于收斂;但不是說之前不收斂的,特征縮放后一定能收斂;
X_std = np.copy(X)
X_std[:,0] = (X[:, 0] - X[:,0].mean()) / X[:, 0].std()
X_std[:,1] = (X[:, 1] - X[:,1].mean()) / X[:, 1].std()
ada = AdalineSGD(n_inter=15, eta=0.01, random_state=1)
ada.fit(X_std, y)
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Averrage Cost')
plt.show()
##############################################################################
def plot_decision_regions(X, y, classifier, test_idx= None, resolution=0.02):
#setup marker generator and color map
markers = ('s', 'x', 'o', '^','v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap= ListedColormap(colors[:len(np.unique(y))])
#plot the decison surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() -1 , X[:, 1].max() + 1
xx1,xx2 = np.meshgrid(np.arange(x1_min, x1_max,resolution), np.arange(x2_min, x2_max,resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha = 0.4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
#plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x= X[y==cl, 0], y = X[y==cl, 1],
alpha=0.8, c=cmap(idx), marker= markers[idx],label = cl)
#highlight test samples
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:,1], c='',alpha = 1.0, linewidth = 1, marker = '0', s = 55, label = 'test set')
##############################################################################
plot_decision_regions(X_std, y, classifier = ada)
plt.title('Adaline - Stochastic Gradient Descent')
plt.xlabel('sepal length [standerdized]')
plt.ylabel('petal length [standerdized]')
補充:
#摘至《Python 機器學習》,作者:Sebastian Raschaka, 機械工業出版社;
轉載本文請聯系原作者獲取授權,同時請注明本文來自李軍科學網博客。
鏈接地址:http://blog.sciencenet.cn/blog-3377553-1130434.html
上一篇:Python_機器學習_總結3:利用Adaline算法分類Iris
下一篇:Python_機器學習_總結4:初識scikit-learn
總結
以上是生活随笔為你收集整理的python sklearn 梯度下降法_科学网—Python_机器学习_总结4:随机梯度下降算法 - 李军的博文...的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 经典网页设计:10个响应式设计的国外购物
- 下一篇: 8086指令系统 操作数地址,双操作