2-神经网络起源-demo3-共享单车__小批量多隐藏层_答案
生活随笔
收集整理的這篇文章主要介紹了
2-神经网络起源-demo3-共享单车__小批量多隐藏层_答案
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt# 解決pd中print中間省略的問題
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 1000)# 讀取數(shù)據(jù)
data_path = './Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)def f1():print(rides.head())print(rides.describe())rides.info()def f2(rides):"""# 一、季節(jié)、天氣(分類變量)、月份、小時、星期幾都是分類變量,需要調整為啞變量。"""dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']for each in dummy_fields:dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)rides = pd.concat([rides, dummies], axis=1)"""二、除了將 上面的原變量,還有以下變量需要刪除,思考下why?1、instant 記錄索引號;2、dteday 具體某天的日期號;3、atemp 體感溫度,和temp重復,故刪除;4、workingday 是否工作日,和weekday重復了,故刪除;"""fields_to_drop = ['instant', 'dteday', 'season', 'weathersit','weekday', 'atemp', 'mnth', 'workingday', 'hr']data = rides.drop(fields_to_drop, axis=1)print(data.head())# rides[:24 * 10].plot(x='dteday', y='cnt')# plt.show()return datadef f3(data):"""連續(xù)變量的數(shù)據(jù)標準化注意:cnt 就是target"""quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']# 將換算因子進行保存,以便在預測的時候還原數(shù)據(jù)。scaled_features = {}for each in quant_features:mean, std = data[each].mean(), data[each].std()scaled_features[each] = [mean, std]data.loc[:, each] = (data[each] - mean) / stdreturn data, scaled_featuresdef f4(data):"""拆分數(shù)據(jù)集,拆分 特征 和 target"""# 保存最后21天 作為測試數(shù)據(jù)集test_data = data[-21 * 24:]# 移除最后21天數(shù)據(jù),作為訓練數(shù)據(jù)集data = data[:-21 * 24]# 將特征值 和 target進行拆分target_fields = ['cnt', 'casual', 'registered']features, targets = data.drop(target_fields, axis=1), data[target_fields]test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]return features, targets, test_features, test_targetsdef f5(features , targets):"""使用訓練數(shù)據(jù)集的后60天數(shù)據(jù),作為驗證數(shù)據(jù)集;在模型訓練過程中進行驗證模型的效果。"""train_features, train_targets = features[:-60 * 24], targets[:-60 * 24]val_features, val_targets = features[-60 * 24:], targets[-60 * 24:]return train_features, train_targets, val_features, val_targetsclass NeuralNetworkMultiHidden(object):def __init__(self, input_nodes, hidden_nodes1, hidden_nodes2, output_nodes, learning_rate,keep_prob=0.75, batch_size=128):""":param input_nodes: 輸入的節(jié)點數(shù)量 (特征數(shù)量):param hidden_nodes: 隱藏層節(jié)點數(shù)量:param output_nodes: 輸出層節(jié)點數(shù)量:param learning_rate:"""# Set number of nodes in input, hidden and output layers.self.input_nodes = input_nodesself.hidden_nodes1 = hidden_nodes1self.hidden_nodes2 = hidden_nodes2self.output_nodes = output_nodesself.batch_size = batch_sizeself.lr = learning_rateself.keep_prob = keep_prob# 初始化權重self.weights_input_to_h1 = np.random.normal(0.0, 1/self.input_nodes ** -0.5, size=(self.input_nodes, self.hidden_nodes1))self.weights_h1_to_h2 = np.random.normal(0.0, 1/self.hidden_nodes1 ** -0.5, size=(self.hidden_nodes1, self.hidden_nodes2))self.weights_h2_to_output = np.random.normal(0.0, 1/self.hidden_nodes2 ** -0.5, size=(self.hidden_nodes2, self.output_nodes))self.lr = learning_rate# TODO: 設置 self.activation_function 來部署 sigmoid 函數(shù)self.activation_function = lambda x: 1/(1+np.exp(-x))def get_batches(self, features, targets):assert len(features) == len(targets)for i in range(0, len(features), self.batch_size):yield features[i: i+self.batch_size], targets[i: i+self.batch_size]def drop_out(self, x):"""實現(xiàn)dropout函數(shù):param x::return:"""keep_prob = (np.random.rand(*x.shape) < self.keep_prob) / self.keep_probreturn keep_probdef train_batch(self, features, targets):"""MBGD的實現(xiàn):param features::param targets::return:"""for batch_x, batch_y in self.get_batches(features, targets):# 1、正向傳播# TODO: 隱藏層h1_inputs = np.matmul(batch_x, self.weights_input_to_h1)h1_outputs = self.activation_function(h1_inputs)h1_outputs = h1_outputs * self.drop_out(h1_outputs)h2_inputs = np.matmul(h1_outputs, self.weights_h1_to_h2)h2_outputs = self.activation_function(h2_inputs)h2_outputs = h2_outputs * self.drop_out(h2_outputs)# TODO: 輸出層final_inputs = np.matmul(h2_outputs, self.weights_h2_to_output)y_hat = final_inputs# 二、反向傳播# 1\Output errorerror = y_hat - batch_y.reshape([-1, 1]) # [batch, 1]# 2: 計算隱藏層對誤差error的貢獻output_error_term = error * 1# 3、 計算 h2 to output權重的梯度delta_h2_to_output = np.matmul(np.transpose(h2_outputs), output_error_term) / self.batch_size# 4、h2 隱藏層誤差項h2_error_term = np.matmul(output_error_term, self.weights_h2_to_output.transpose()) * h2_outputs * (1-h2_outputs)# 5\ h1 to h2權重的梯度值delta_h1_to_h2 = np.matmul(np.transpose(h1_outputs), h2_error_term) / self.batch_size# 6、求 h1隱藏層誤差項h1_error_term = np.matmul(h2_error_term, self.weights_h1_to_h2.transpose()) * h1_outputs*(1-h1_outputs)# 7、求input to h1權重的梯度delta_input_to_h1 = np.matmul(np.transpose(batch_x), h1_error_term) / self.batch_size# 8執(zhí)行梯度下降self.weights_input_to_h1 -= delta_input_to_h1 * self.lrself.weights_h1_to_h2 -= delta_h1_to_h2 * self.lrself.weights_h2_to_output -= delta_h2_to_output * self.lrdef run(self, features):'''預測函數(shù)。使用輸入特征,執(zhí)行1次正向傳播,得到預測值features: 1D array of feature values'''# 1、正向傳播# TODO: 隱藏層h1_inputs = np.matmul(features.values, self.weights_input_to_h1)h1_outputs = self.activation_function(h1_inputs)h2_inputs = np.matmul(h1_outputs, self.weights_h1_to_h2)h2_outputs = self.activation_function(h2_inputs)# TODO: 輸出層final_inputs = np.matmul(h2_outputs, self.weights_h2_to_output)y_hat = final_inputsreturn y_hatdef MSE(y, Y):return np.mean((y-Y)**2)# 顯示訓練過程中的訓練 和 驗證損失
def show(losses):plt.plot(losses['train'], label='Training loss')plt.plot(losses['validation'], label='Validation loss')plt.legend()_ = plt.ylim()plt.show()def test(network,scaled_features, test_features, test_targets, rides):fig, ax = plt.subplots(figsize=(8, 4))mean, std = scaled_features['cnt']predictions = network.run(test_features).T * std + meanax.plot(predictions[0], label='Prediction')ax.plot((test_targets['cnt'] * std + mean).values, label='Data')ax.set_xlim(right=len(predictions))ax.legend()dates = pd.to_datetime(rides.iloc[test_features.index]['dteday'])dates = dates.apply(lambda d: d.strftime('%b %d'))ax.set_xticks(np.arange(len(dates))[12::24])_ = ax.set_xticklabels(dates[12::24], rotation=45)plt.show()if __name__ == '__main__':# f1()data = f2(rides)data, scaled_features = f3(data)features, targets, test_features, test_targets = f4(data)train_features, train_targets, val_features, val_targets = f5(features, targets)# todo 設置超參數(shù) ###epochs = 3000 # 迭代次數(shù)learning_rate = 0.1 # 學習率hidden_nodes1 = 8 # 隱藏層節(jié)點數(shù)量,決定你模型的復雜度。hidden_nodes2 = 7output_nodes = 1 # 輸出層的節(jié)點數(shù)量。batch_size = 256keep_prob = 0.8input_nodes = train_features.shape[1]network = NeuralNetworkMultiHidden(input_nodes, hidden_nodes1, hidden_nodes2, output_nodes,learning_rate, keep_prob=keep_prob, batch_size=batch_size)losses = {'train': [], 'validation': []}for epoch in range(epochs):network.train_batch(train_features.values, train_targets['cnt'].values)# 打印出訓練過程train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)if epoch % 20 == 0:print('訓練迭代次數(shù):{},訓練損失:{} ,驗證損失:''{}'.format(epoch, train_loss, val_loss))losses['train'].append(train_loss)losses['validation'].append(val_loss)show(losses)test(network, scaled_features, test_features, test_targets, rides)
D:\Anaconda\python.exe "D:\PyCharm\PyCharm 2018.2.2\helpers\pydev\pydevconsole.py" 54140 54141
import sys; print('Python %s on %s' % (sys.version, sys.platform))
sys.path.extend(['D:\\AI20\\HJZ', 'D:/AI20/HJZ'])
Python 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)]
Type 'copyright', 'credits' or 'license' for more information
IPython 6.4.0 -- An enhanced Interactive Python. Type '?' for help.
PyDev console: using IPython 6.4.0
Python 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)] on win32
runfile('D:/AI20/HJZ/04-深度學習/1-深度學習入門/深度學習項目/AI20_單車__小批量多隱藏層_答案.py', wdir='D:/AI20/HJZ/04-深度學習/1-深度學習入門/深度學習項目')yr holiday temp hum windspeed casual registered cnt season_1 season_2 season_3 season_4 weathersit_1 weathersit_2 weathersit_3 weathersit_4 mnth_1 mnth_2 mnth_3 mnth_4 mnth_5 mnth_6 mnth_7 mnth_8 mnth_9 mnth_10 mnth_11 mnth_12 hr_0 hr_1 hr_2 hr_3 hr_4 hr_5 hr_6 hr_7 hr_8 hr_9 hr_10 hr_11 hr_12 hr_13 hr_14 hr_15 hr_16 hr_17 hr_18 hr_19 hr_20 hr_21 hr_22 hr_23 weekday_0 weekday_1 weekday_2 weekday_3 weekday_4 weekday_5 weekday_6
0 0 0 0.24 0.81 0.0 3 13 16 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
1 0 0 0.22 0.80 0.0 8 32 40 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
2 0 0 0.22 0.80 0.0 5 27 32 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
3 0 0 0.24 0.75 0.0 3 10 13 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
4 0 0 0.24 0.75 0.0 0 1 1 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
訓練迭代次數(shù):0,訓練損失:1.4049598343209766 ,驗證損失:1.4134166663142764
訓練迭代次數(shù):20,訓練損失:1.191033061166735 ,驗證損失:1.2931418136345434
訓練迭代次數(shù):40,訓練損失:1.1984009623831025 ,驗證損失:1.2920684294502167
訓練迭代次數(shù):60,訓練損失:1.2128614744725161 ,驗證損失:1.2890902536743876
訓練迭代次數(shù):80,訓練損失:1.1920010882383159 ,驗證損失:1.2781289512063165
訓練迭代次數(shù):100,訓練損失:1.2006819864036042 ,驗證損失:1.2669214154589938
訓練迭代次數(shù):120,訓練損失:1.201754896935542 ,驗證損失:1.2543690660827465
訓練迭代次數(shù):140,訓練損失:1.191243393043961 ,驗證損失:1.2386061002917468
訓練迭代次數(shù):160,訓練損失:1.1663420353206133 ,驗證損失:1.2133403530787898
訓練迭代次數(shù):180,訓練損失:1.1854967589152083 ,驗證損失:1.2038260345349165
訓練迭代次數(shù):200,訓練損失:1.1795672537011166 ,驗證損失:1.1823249575128574
訓練迭代次數(shù):220,訓練損失:1.1682780833044055 ,驗證損失:1.1699543809357753
訓練迭代次數(shù):240,訓練損失:1.1636495190832759 ,驗證損失:1.153493583812518
訓練迭代次數(shù):260,訓練損失:1.1683817941847439 ,驗證損失:1.1530397929143232
訓練迭代次數(shù):280,訓練損失:1.1704024692290609 ,驗證損失:1.1463094551941384
訓練迭代次數(shù):300,訓練損失:1.167836729294609 ,驗證損失:1.1380138358655458
訓練迭代次數(shù):320,訓練損失:1.152206893299751 ,驗證損失:1.1268447929175738
訓練迭代次數(shù):340,訓練損失:1.1398153088544771 ,驗證損失:1.1141464353215857
訓練迭代次數(shù):360,訓練損失:1.1399555109674233 ,驗證損失:1.1173589548792964
訓練迭代次數(shù):380,訓練損失:1.1339803999528923 ,驗證損失:1.1058483596575561
訓練迭代次數(shù):400,訓練損失:1.127499637809135 ,驗證損失:1.0943699867988634
訓練迭代次數(shù):420,訓練損失:1.130508289149735 ,驗證損失:1.094848660480854
訓練迭代次數(shù):440,訓練損失:1.1226987785233933 ,驗證損失:1.0868869679815947
訓練迭代次數(shù):460,訓練損失:1.1124556323539798 ,驗證損失:1.0880393700260946
訓練迭代次數(shù):480,訓練損失:1.0947664417341665 ,驗證損失:1.0822969246723875
訓練迭代次數(shù):500,訓練損失:1.102019574807323 ,驗證損失:1.0785993278226347
訓練迭代次數(shù):520,訓練損失:1.0756653061870864 ,驗證損失:1.0739512911587645
訓練迭代次數(shù):540,訓練損失:1.0848392163051497 ,驗證損失:1.071863377072026
訓練迭代次數(shù):560,訓練損失:1.0726029326883686 ,驗證損失:1.0662775420905442
訓練迭代次數(shù):580,訓練損失:1.0573534852202195 ,驗證損失:1.0714235515825197
訓練迭代次數(shù):600,訓練損失:1.0383465103374563 ,驗證損失:1.061587838801731
訓練迭代次數(shù):620,訓練損失:1.025923333984107 ,驗證損失:1.0653288968309416
訓練迭代次數(shù):640,訓練損失:1.0369456811847073 ,驗證損失:1.0713373240243294
訓練迭代次數(shù):660,訓練損失:1.0364001667139042 ,驗證損失:1.0640935566218894
訓練迭代次數(shù):680,訓練損失:1.0400004910395533 ,驗證損失:1.065681599324164
訓練迭代次數(shù):700,訓練損失:1.050995284262165 ,驗證損失:1.0647670884860212
訓練迭代次數(shù):720,訓練損失:1.018940240035332 ,驗證損失:1.059287874059955
訓練迭代次數(shù):740,訓練損失:1.0363819632576774 ,驗證損失:1.066276341690848
訓練迭代次數(shù):760,訓練損失:1.028426813397832 ,驗證損失:1.0623961167773956
訓練迭代次數(shù):780,訓練損失:1.0189745034921762 ,驗證損失:1.0624564548702664
訓練迭代次數(shù):800,訓練損失:1.0535661337931692 ,驗證損失:1.0668193865406113
訓練迭代次數(shù):820,訓練損失:1.0266509285859535 ,驗證損失:1.068206513591057
訓練迭代次數(shù):840,訓練損失:1.0331621655804455 ,驗證損失:1.0726722273363603
訓練迭代次數(shù):860,訓練損失:1.0251174045688107 ,驗證損失:1.0654138978355179
訓練迭代次數(shù):880,訓練損失:1.0304148623045695 ,驗證損失:1.0656984457855712
訓練迭代次數(shù):900,訓練損失:1.024423315282759 ,驗證損失:1.0700745167114185
訓練迭代次數(shù):920,訓練損失:1.032328343800837 ,驗證損失:1.0624130680251542
訓練迭代次數(shù):940,訓練損失:1.0283687484682102 ,驗證損失:1.0773623767903766
訓練迭代次數(shù):960,訓練損失:1.0134267116061493 ,驗證損失:1.074257335227338
訓練迭代次數(shù):980,訓練損失:1.0110943715100067 ,驗證損失:1.0785358764065802
訓練迭代次數(shù):1000,訓練損失:1.0137399554429194 ,驗證損失:1.0891244094584875
訓練迭代次數(shù):1020,訓練損失:0.9976091453098368 ,驗證損失:1.1036601448902938
訓練迭代次數(shù):1040,訓練損失:1.0025455852046654 ,驗證損失:1.1078044097173811
訓練迭代次數(shù):1060,訓練損失:0.9971201783057869 ,驗證損失:1.1097782177478874
訓練迭代次數(shù):1080,訓練損失:1.0077135024045787 ,驗證損失:1.1171342596149176
訓練迭代次數(shù):1100,訓練損失:0.989435521338311 ,驗證損失:1.1163947695415941
訓練迭代次數(shù):1120,訓練損失:1.0046778115903863 ,驗證損失:1.116652012791937
訓練迭代次數(shù):1140,訓練損失:0.9890504798273926 ,驗證損失:1.116016089697368
訓練迭代次數(shù):1160,訓練損失:0.9843516216949189 ,驗證損失:1.1052671666140421
訓練迭代次數(shù):1180,訓練損失:1.0064311979803187 ,驗證損失:1.1049940415771407
訓練迭代次數(shù):1200,訓練損失:0.998841749230865 ,驗證損失:1.0985993017086981
訓練迭代次數(shù):1220,訓練損失:0.9862143891130666 ,驗證損失:1.0903739899913933
訓練迭代次數(shù):1240,訓練損失:0.9810610274347947 ,驗證損失:1.0885378134649732
訓練迭代次數(shù):1260,訓練損失:0.9792300562416918 ,驗證損失:1.077161582076293
訓練迭代次數(shù):1280,訓練損失:0.9760627779270655 ,驗證損失:1.0730553186056613
訓練迭代次數(shù):1300,訓練損失:0.967982277710496 ,驗證損失:1.0630035815717724
訓練迭代次數(shù):1320,訓練損失:0.9717968522519344 ,驗證損失:1.053997539353016
訓練迭代次數(shù):1340,訓練損失:0.9743654642216101 ,驗證損失:1.0558319503303484
訓練迭代次數(shù):1360,訓練損失:0.9680533949739022 ,驗證損失:1.0506383794413203
訓練迭代次數(shù):1380,訓練損失:0.9709026169373867 ,驗證損失:1.0532973030862018
訓練迭代次數(shù):1400,訓練損失:0.974128474316653 ,驗證損失:1.0498922921766818
訓練迭代次數(shù):1420,訓練損失:0.9785149046759605 ,驗證損失:1.0408913252615668
訓練迭代次數(shù):1440,訓練損失:0.9537413724379682 ,驗證損失:1.0381164051752785
訓練迭代次數(shù):1460,訓練損失:0.965637541159986 ,驗證損失:1.035939389690247
訓練迭代次數(shù):1480,訓練損失:0.9795617178366891 ,驗證損失:1.0330948779622118
訓練迭代次數(shù):1500,訓練損失:0.9830199380134964 ,驗證損失:1.023296422546772
訓練迭代次數(shù):1520,訓練損失:0.9718277653587923 ,驗證損失:1.02374833235857
訓練迭代次數(shù):1540,訓練損失:0.9871744709311929 ,驗證損失:1.0181642903151176
訓練迭代次數(shù):1560,訓練損失:0.9645553513132961 ,驗證損失:1.014631074593417
訓練迭代次數(shù):1580,訓練損失:0.95745665627693 ,驗證損失:1.011575426466816
訓練迭代次數(shù):1600,訓練損失:0.9891756647749831 ,驗證損失:1.0059939313593158
訓練迭代次數(shù):1620,訓練損失:0.9736988068828757 ,驗證損失:1.0091536385132531
訓練迭代次數(shù):1640,訓練損失:0.9960204223543437 ,驗證損失:1.0033607875054935
訓練迭代次數(shù):1660,訓練損失:0.9854556298410954 ,驗證損失:1.0002568588738163
訓練迭代次數(shù):1680,訓練損失:0.9830104575963702 ,驗證損失:1.0005752641230383
訓練迭代次數(shù):1700,訓練損失:0.9937353633299877 ,驗證損失:0.9981942073966672
訓練迭代次數(shù):1720,訓練損失:0.9930644015954275 ,驗證損失:0.994249323972036
訓練迭代次數(shù):1740,訓練損失:0.9921303783625928 ,驗證損失:0.9930527726071848
訓練迭代次數(shù):1760,訓練損失:1.0012668782395846 ,驗證損失:0.9936678586296049
訓練迭代次數(shù):1780,訓練損失:1.0046846570351382 ,驗證損失:0.9904557116907274
訓練迭代次數(shù):1800,訓練損失:0.9753986592440668 ,驗證損失:0.9858319854643595
訓練迭代次數(shù):1820,訓練損失:0.9862383235682017 ,驗證損失:0.9807620309667786
訓練迭代次數(shù):1840,訓練損失:0.997360058261536 ,驗證損失:0.9883861735630757
訓練迭代次數(shù):1860,訓練損失:0.993343092045557 ,驗證損失:0.9859186058806008
訓練迭代次數(shù):1880,訓練損失:0.9996648339110443 ,驗證損失:0.9836449063128594
訓練迭代次數(shù):1900,訓練損失:0.9890779499154683 ,驗證損失:0.988435019455184
訓練迭代次數(shù):1920,訓練損失:0.9936278333526469 ,驗證損失:0.9842544956102532
訓練迭代次數(shù):1940,訓練損失:0.983567206487514 ,驗證損失:0.9860559344032238
訓練迭代次數(shù):1960,訓練損失:0.9886528359516243 ,驗證損失:0.9860672916553115
訓練迭代次數(shù):1980,訓練損失:1.0003637487457928 ,驗證損失:0.987138283627506
訓練迭代次數(shù):2000,訓練損失:1.0123710735183136 ,驗證損失:0.9902226985469503
訓練迭代次數(shù):2020,訓練損失:0.9904608835359283 ,驗證損失:0.9854193298272445
訓練迭代次數(shù):2040,訓練損失:1.0054704041102138 ,驗證損失:0.9874762958896658
訓練迭代次數(shù):2060,訓練損失:1.027581260124156 ,驗證損失:0.991672992135386
訓練迭代次數(shù):2080,訓練損失:0.989565406914043 ,驗證損失:0.984916443161018
訓練迭代次數(shù):2100,訓練損失:1.0084468607171728 ,驗證損失:0.9886431169322287
訓練迭代次數(shù):2120,訓練損失:0.9942951112223293 ,驗證損失:0.9838803991380857
訓練迭代次數(shù):2140,訓練損失:1.005621205659498 ,驗證損失:0.9826214487037916
訓練迭代次數(shù):2160,訓練損失:0.9970232280450297 ,驗證損失:0.9811441899587124
訓練迭代次數(shù):2180,訓練損失:1.0109055419085602 ,驗證損失:0.9817208963062901
訓練迭代次數(shù):2200,訓練損失:0.9998017953503151 ,驗證損失:0.9802820253130723
訓練迭代次數(shù):2220,訓練損失:0.9983370483596552 ,驗證損失:0.979627235574009
訓練迭代次數(shù):2240,訓練損失:1.0094413724929017 ,驗證損失:0.9841338397252996
訓練迭代次數(shù):2260,訓練損失:1.005974330859626 ,驗證損失:0.9818251741135302
訓練迭代次數(shù):2280,訓練損失:1.0142675598363247 ,驗證損失:0.9837754496291757
訓練迭代次數(shù):2300,訓練損失:1.0127720127438498 ,驗證損失:0.9828429365857131
訓練迭代次數(shù):2320,訓練損失:1.0104569904204563 ,驗證損失:0.9800861853389657
訓練迭代次數(shù):2340,訓練損失:1.0093547783740506 ,驗證損失:0.977398843006523
訓練迭代次數(shù):2360,訓練損失:1.0110058263991355 ,驗證損失:0.9815249536367491
訓練迭代次數(shù):2380,訓練損失:1.0157069324371437 ,驗證損失:0.9824352086252541
訓練迭代次數(shù):2400,訓練損失:1.0069441529695864 ,驗證損失:0.9802546909219414
訓練迭代次數(shù):2420,訓練損失:0.998629288979768 ,驗證損失:0.9757994016194351
訓練迭代次數(shù):2440,訓練損失:1.0264442542787664 ,驗證損失:0.9789124302218697
訓練迭代次數(shù):2460,訓練損失:0.9858533087585518 ,驗證損失:0.9753708653306028
訓練迭代次數(shù):2480,訓練損失:1.0149051626536327 ,驗證損失:0.9792163908030725
訓練迭代次數(shù):2500,訓練損失:1.0257094713748163 ,驗證損失:0.9790813165266272
訓練迭代次數(shù):2520,訓練損失:1.0064263925393622 ,驗證損失:0.9716495650211158
訓練迭代次數(shù):2540,訓練損失:1.0074456081753316 ,驗證損失:0.9713535169777512
訓練迭代次數(shù):2560,訓練損失:1.0045156753470388 ,驗證損失:0.9722775864599293
訓練迭代次數(shù):2580,訓練損失:1.0116777234135117 ,驗證損失:0.9734898859818268
訓練迭代次數(shù):2600,訓練損失:1.008474365142438 ,驗證損失:0.9676925039978643
訓練迭代次數(shù):2620,訓練損失:1.0155960298865223 ,驗證損失:0.9713532357071608
訓練迭代次數(shù):2640,訓練損失:1.042803244913576 ,驗證損失:0.9750837335994059
訓練迭代次數(shù):2660,訓練損失:1.027546771694612 ,驗證損失:0.9724091714158932
訓練迭代次數(shù):2680,訓練損失:1.0336935638889095 ,驗證損失:0.9719282835539361
訓練迭代次數(shù):2700,訓練損失:1.0092632145747278 ,驗證損失:0.9717977290574962
訓練迭代次數(shù):2720,訓練損失:1.0271627014773743 ,驗證損失:0.9742688881038355
訓練迭代次數(shù):2740,訓練損失:1.0137922202722167 ,驗證損失:0.9750051808328831
訓練迭代次數(shù):2760,訓練損失:1.0057584026865223 ,驗證損失:0.9726964652883073
訓練迭代次數(shù):2780,訓練損失:1.0033088589710482 ,驗證損失:0.9700467223899546
訓練迭代次數(shù):2800,訓練損失:1.006040099471596 ,驗證損失:0.9738564921842755
訓練迭代次數(shù):2820,訓練損失:1.011735558239328 ,驗證損失:0.9783865020181645
訓練迭代次數(shù):2840,訓練損失:0.9833877463044207 ,驗證損失:0.9769258604374474
訓練迭代次數(shù):2860,訓練損失:1.027951700601824 ,驗證損失:0.9849864397762739
訓練迭代次數(shù):2880,訓練損失:1.0417753524731448 ,驗證損失:0.9869484745302265
訓練迭代次數(shù):2900,訓練損失:1.0116816678915919 ,驗證損失:0.9849301034760884
訓練迭代次數(shù):2920,訓練損失:1.0111170503145037 ,驗證損失:0.9858138725738876
訓練迭代次數(shù):2940,訓練損失:1.0329653851686955 ,驗證損失:0.9948427598844941
訓練迭代次數(shù):2960,訓練損失:1.0149997393003969 ,驗證損失:0.9955813486192334
訓練迭代次數(shù):2980,訓練損失:1.0111855326181511 ,驗證損失:0.9933412663413201
如果報錯test:
maybe運行得是pythontest
pycharm,默認運行單元是pythontest
而不是python
解決pytest運行問題
總結
以上是生活随笔為你收集整理的2-神经网络起源-demo3-共享单车__小批量多隐藏层_答案的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 南京邮电大学电工电子基础B实验六(组合逻
- 下一篇: 电力铁塔监测预警系统解决方案