【Python-ML】自适应线性神经网络(Adaline)
生活随笔
收集整理的這篇文章主要介紹了
【Python-ML】自适应线性神经网络(Adaline)
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
# -*- coding: utf-8 -*-
'''
Created on 2017年12月21日
@author: Jason.F
@summary: 自適應線性神經(jīng)網(wǎng)絡學習算法
'''
import numpy as np
import time
import matplotlib.pyplot as plt
import pandas as pdclass AdalineGD(object):'''Adaptive Linear Neuron classifier.hyper-Parameterseta:float=Learning rate (between 0.0 and 1.0)n_iter:int=Passes over the training dataset.Attributesw_:ld-array=weights after fitting.costs_:list=Number of misclassification in every epoch.'''def __init__(self,eta=0.01,n_iter=50):self.eta=etaself.n_iter=n_iterdef fit(self,X,y):'''Fit training data.ParametersX:{array-like},shape=[n_samples,n_features]=Training vectors,where n_samples is the number of samples and n_features is the number of features.y:array-like,shape=[n_samples]=Target values.Returnsself:object'''self.w_=np.zeros(1+X.shape[1])self.costs_=[]for i in range(self.n_iter):output=self.net_input(X)errors=(y-output)self.w_[1:] += self.eta * X.T.dot(errors)self.w_[0] += self.eta * errors.sum()cost=(errors ** 2).sum() /2.0self.costs_.append(cost)return selfdef net_input(self,X):#calculate net inputreturn np.dot(X,self.w_[1:])+self.w_[0]def activation(self,X):#computer linear activationreturn self.net_input(X)def predict(self,X):#return class label after unit stepreturn np.where(self.activation(X)>=0.0,1,-1) if __name__ == "__main__": start = time.clock() #訓練數(shù)據(jù)train =pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header=None)X_train = train.drop([4], axis=1).values #dataframe convert to arrayy_train = train[4].values#特征值標準化,特征縮放方法,使數(shù)據(jù)具有標準正態(tài)分布的特性,各特征的均值為0,標準差為1.X_std=np.copy(X_train)X_std[:,0]=(X_train[:,0]-X_train[:,0].mean()) / X_train[:,0].std()X_std[:,1]=(X_train[:,1]-X_train[:,1].mean()) / X_train[:,1].std()#X_std[:,2]=(X_train[:,2]-X_train[:,2].mean()) / X_train[:,2].std()#X_std[:,3]=(X_train[:,3]-X_train[:,3].mean()) / X_train[:,3].std()y=np.where(y_train == 'Iris-setosa',-1,1)#one vs rest:OvR#學習速率和迭代次數(shù)者兩個超參進行觀察fig,ax=plt.subplots(nrows=1,ncols=2,figsize=(8,4))#eta=0.01,n_iter=20agd1 = AdalineGD(eta=0.01,n_iter=20).fit(X_std,y)print (agd1.predict([6.9,3.0,5.1,1.8]))#預測ax[0].plot(range(1,len(agd1.costs_)+1),np.log10(agd1.costs_),marker='o')ax[0].set_xlabel('Epochs')ax[0].set_ylabel('log(Sum-Squared-error)')ax[0].set_title('Adaline-learning rate 0.01')#eta=0.0001,n_iter=20agd2 = AdalineGD(eta=0.0001,n_iter=20).fit(X_std,y)print (agd2.predict([6.9,3.0,5.1,1.8]))#預測ax[1].plot(range(1,len(agd2.costs_)+1),np.log10(agd2.costs_),marker='x')ax[1].set_xlabel('Epochs')ax[1].set_ylabel('log(Sum-Squared-error)')ax[1].set_title('Adaline-learning rate 0.0001')#show plt.show()end = time.clock() print('finish all in %s' % str(end - start))
# -*- coding: utf-8 -*- ''' Created on 2017年12月21日 @author: Jason.F @summary: 自適應線性神經(jīng)網(wǎng)絡學習算法 ''' import numpy as np import time import matplotlib.pyplot as plt import pandas as pd from numpy.random import seedclass AdalineSGD(object):'''Adaptive Linear Neuron classifier.hyper-Parameterseta:float=Learning rate (between 0.0 and 1.0)n_iter:int=Passes over the training dataset.Attributesw_:ld-array=weights after fitting.costs_:list=Number of misclassification in every epoch.shuffle:bool(default:True)=Shuffles training data every epoch if True to prevent cycles.random_state:int(default:None)=set random state for shuffling and initializing the weights.'''def __init__(self,eta=0.01,n_iter=20,shuffle=True,random_state=None):self.eta=etaself.n_iter=n_iterself.w_initialized=Falseself.shuffle=shuffleif random_state:seed(random_state)def fit(self,X,y):'''Fit training data.ParametersX:{array-like},shape=[n_samples,n_features]=Training vectors,where n_samples is the number of samples and n_features is the number of features.y:array-like,shape=[n_samples]=Target values.Returnsself:object'''self._initialize_weights(X.shape[1])self.cost_=[]for i in range(self.n_iter):if self.shuffle:X,y=self._shuffle(X,y)cost=[]for xi,target in zip(X,y):cost.append(self._update_weights(xi,target))avg_cost=sum(cost)/len(y)self.cost_.append(avg_cost)return selfdef partial_fit(self,X,y):#Fit training data without reinitializing the weightsif not self.w_initialized:self._initialize_weights(X.shape[1])if y.ravel().shape[0]>1:for xi,target in zip(X,y):self._update_weights(xi,target)else:self._update_weights(X,y)return selfdef _shuffle(self,X,y):#shuffle training datar=np.random.permutation(len(y))return X[r],y[r]def _initialize_weights(self,m):#Initialize weights to zerosself.w_ =np.zeros(1+m)self.w_initialized=Truedef _update_weights(self,xi,target):#apply adaline learning rule to update the weightsoutput=self.net_input(xi)error=(target-output)self.w_[1:] += self.eta * xi.dot(error)self.w_[0] += self.eta * errorcost= 0.5 * error ** 2return costdef net_input(self,X):#calculate net inputreturn np.dot(X,self.w_[1:])+self.w_[0]def activation(self,X):#computer linear activationreturn self.net_input(X)def predict(self,X):#return class label after unit stepreturn np.where(self.activation(X)>=0.0,1,-1) if __name__ == "__main__": start = time.clock() #訓練數(shù)據(jù)train =pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header=None)X_train = train.drop([4], axis=1).values #dataframe convert to arrayy_train = train[4].values#特征值標準化,特征縮放方法,使數(shù)據(jù)具有標準正態(tài)分布的特性,各特征的均值為0,標準差為1.X_std=np.copy(X_train)X_std[:,0]=(X_train[:,0]-X_train[:,0].mean()) / X_train[:,0].std()X_std[:,1]=(X_train[:,1]-X_train[:,1].mean()) / X_train[:,1].std()#X_std[:,2]=(X_train[:,2]-X_train[:,2].mean()) / X_train[:,2].std()#X_std[:,3]=(X_train[:,3]-X_train[:,3].mean()) / X_train[:,3].std()y=np.where(y_train == 'Iris-setosa',-1,1)#one vs rest:OvR#學習速率和迭代次數(shù)者兩個超參進行觀察fig,ax=plt.subplots(nrows=1,ncols=2,figsize=(8,4))#eta=0.01,n_iter=20agd1 = AdalineSGD(eta=0.01,n_iter=20,random_state=1).fit(X_std,y)print (agd1.predict([6.9,3.0,5.1,1.8]))#預測ax[0].plot(range(1,len(agd1.cost_)+1),agd1.cost_,marker='o')ax[0].set_xlabel('Epochs')ax[0].set_ylabel('Average Cost')ax[0].set_title('Adaline-learning rate 0.01')#eta=0.0001,n_iter=20agd2 = AdalineSGD(eta=0.0001,n_iter=20,random_state=1).fit(X_std,y)print (agd2.predict([6.9,3.0,5.1,1.8]))#預測ax[1].plot(range(1,len(agd2.cost_)+1),agd2.cost_,marker='x')ax[1].set_xlabel('Epochs')ax[1].set_ylabel('Average Cost')ax[1].set_title('Adaline-learning rate 0.0001')#show plt.show()#測試在線更新print (agd2.w_) #更新前agd2.partial_fit(X_std[0,:],y[0])print (agd2.w_) #更新后end = time.clock() print('finish all in %s' % str(end - start))
# -*- coding: utf-8 -*- ''' Created on 2017年12月21日 @author: Jason.F @summary: 自適應線性神經(jīng)網(wǎng)絡學習算法 ''' import numpy as np import time import matplotlib.pyplot as plt import pandas as pd from numpy.random import seedclass AdalineSGD(object):'''Adaptive Linear Neuron classifier.hyper-Parameterseta:float=Learning rate (between 0.0 and 1.0)n_iter:int=Passes over the training dataset.Attributesw_:ld-array=weights after fitting.costs_:list=Number of misclassification in every epoch.shuffle:bool(default:True)=Shuffles training data every epoch if True to prevent cycles.random_state:int(default:None)=set random state for shuffling and initializing the weights.'''def __init__(self,eta=0.01,n_iter=20,shuffle=True,random_state=None):self.eta=etaself.n_iter=n_iterself.w_initialized=Falseself.shuffle=shuffleif random_state:seed(random_state)def fit(self,X,y):'''Fit training data.ParametersX:{array-like},shape=[n_samples,n_features]=Training vectors,where n_samples is the number of samples and n_features is the number of features.y:array-like,shape=[n_samples]=Target values.Returnsself:object'''self._initialize_weights(X.shape[1])self.cost_=[]for i in range(self.n_iter):if self.shuffle:X,y=self._shuffle(X,y)cost=[]for xi,target in zip(X,y):cost.append(self._update_weights(xi,target))avg_cost=sum(cost)/len(y)self.cost_.append(avg_cost)return selfdef partial_fit(self,X,y):#Fit training data without reinitializing the weightsif not self.w_initialized:self._initialize_weights(X.shape[1])if y.ravel().shape[0]>1:for xi,target in zip(X,y):self._update_weights(xi,target)else:self._update_weights(X,y)return selfdef _shuffle(self,X,y):#shuffle training datar=np.random.permutation(len(y))return X[r],y[r]def _initialize_weights(self,m):#Initialize weights to zerosself.w_ =np.zeros(1+m)self.w_initialized=Truedef _update_weights(self,xi,target):#apply adaline learning rule to update the weightsoutput=self.net_input(xi)error=(target-output)self.w_[1:] += self.eta * xi.dot(error)self.w_[0] += self.eta * errorcost= 0.5 * error ** 2return costdef net_input(self,X):#calculate net inputreturn np.dot(X,self.w_[1:])+self.w_[0]def activation(self,X):#computer linear activationreturn self.net_input(X)def predict(self,X):#return class label after unit stepreturn np.where(self.activation(X)>=0.0,1,-1) if __name__ == "__main__": start = time.clock() #訓練數(shù)據(jù)train =pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header=None)X_train = train.drop([4], axis=1).values #dataframe convert to arrayy_train = train[4].values#特征值標準化,特征縮放方法,使數(shù)據(jù)具有標準正態(tài)分布的特性,各特征的均值為0,標準差為1.X_std=np.copy(X_train)X_std[:,0]=(X_train[:,0]-X_train[:,0].mean()) / X_train[:,0].std()X_std[:,1]=(X_train[:,1]-X_train[:,1].mean()) / X_train[:,1].std()#X_std[:,2]=(X_train[:,2]-X_train[:,2].mean()) / X_train[:,2].std()#X_std[:,3]=(X_train[:,3]-X_train[:,3].mean()) / X_train[:,3].std()y=np.where(y_train == 'Iris-setosa',-1,1)#one vs rest:OvR#學習速率和迭代次數(shù)者兩個超參進行觀察fig,ax=plt.subplots(nrows=1,ncols=2,figsize=(8,4))#eta=0.01,n_iter=20agd1 = AdalineSGD(eta=0.01,n_iter=20,random_state=1).fit(X_std,y)print (agd1.predict([6.9,3.0,5.1,1.8]))#預測ax[0].plot(range(1,len(agd1.cost_)+1),agd1.cost_,marker='o')ax[0].set_xlabel('Epochs')ax[0].set_ylabel('Average Cost')ax[0].set_title('Adaline-learning rate 0.01')#eta=0.0001,n_iter=20agd2 = AdalineSGD(eta=0.0001,n_iter=20,random_state=1).fit(X_std,y)print (agd2.predict([6.9,3.0,5.1,1.8]))#預測ax[1].plot(range(1,len(agd2.cost_)+1),agd2.cost_,marker='x')ax[1].set_xlabel('Epochs')ax[1].set_ylabel('Average Cost')ax[1].set_title('Adaline-learning rate 0.0001')#show plt.show()#測試在線更新print (agd2.w_) #更新前agd2.partial_fit(X_std[0,:],y[0])print (agd2.w_) #更新后end = time.clock() print('finish all in %s' % str(end - start))
下圖是對特征值不做標準化的,可以比對效果:
總結(jié)
以上是生活随笔為你收集整理的【Python-ML】自适应线性神经网络(Adaline)的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 【Python-ML】感知器学习算法(p
- 下一篇: 【正一专栏】走过2017——坚持