生活随笔
收集整理的這篇文章主要介紹了
【Kaggle-MNIST之路】CNN结构再改进+交叉熵损失函数(六)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
簡述
- 這里再添加了一個卷積層。用一個kernal=4的卷積層做一個卷積之后,再做映射。
- 基于之前的一個版本
- 【Kaggle-MNIST之路】CNN結構改進+改進過的損失函數(五)
- 成績:0.9898
- 排名:1000+
代碼
- 注意,下一個版本會重新安排架構。
- 這是本次架構的最后一個版本了。
import pandas
as pd
import torch
.utils
.data
as data
import torch
import torch
.nn
as nn
file = './all/train.csv'
LR
= 0.01class MNISTCSVDataset(data
.Dataset
):def __init__(self
, csv_file
, Train
=True):self
.dataframe
= pd
.read_csv
(csv_file
, iterator
=True)self
.Train
= Train
def __len__(self
):if self
.Train
:return 42000else:return 28000def __getitem__(self
, idx
):data
= self
.dataframe
.get_chunk
(100)ylabel
= data
['label'].as_matrix
().astype
('float')xdata
= data
.ix
[:, 1:].as_matrix
().astype
('float')return ylabel
, xdata
class CNN(nn
.Module
):def __init__(self
):super(CNN
, self
).__init__
()self
.layer1
= nn
.Sequential
(nn
.Conv2d
(in_channels
=1,out_channels
=32,kernel_size
=3, stride
=1, ),nn
.ReLU
(),nn
.BatchNorm2d
(32),nn
.Conv2d
(in_channels
=32,out_channels
=32,kernel_size
=3, stride
=1, ),nn
.ReLU
(),nn
.BatchNorm2d
(32),nn
.Conv2d
(in_channels
=32,out_channels
=32,kernel_size
=5, stride
=2, padding
=2,),nn
.ReLU
(),nn
.BatchNorm2d
(32),nn
.Dropout
(0.4),)self
.layer2
= nn
.Sequential
(nn
.Conv2d
(in_channels
=32,out_channels
=64,kernel_size
=3, stride
=1, ),nn
.ReLU
(),nn
.BatchNorm2d
(64),nn
.Conv2d
(in_channels
=64,out_channels
=64,kernel_size
=3, stride
=1, ),nn
.ReLU
(),nn
.BatchNorm2d
(64),nn
.Conv2d
(in_channels
=64,out_channels
=64,kernel_size
=5, stride
=2, padding
=2,),nn
.ReLU
(),nn
.BatchNorm2d
(64),nn
.Dropout
(0.4),)self
.layer3
= nn
.Sequential
(nn
.Conv2d
(in_channels
=64,out_channels
=128,kernel_size
=4, stride
=1, ),nn
.ReLU
(),nn
.BatchNorm2d
(128),)self
.layer4
= nn
.Linear
(128, 10)def forward(self
, x
):x
= self
.layer1
(x
)x
= self
.layer2
(x
)x
= self
.layer3
(x
)x
= x
.view
(x
.size
(0), -1)x
= self
.layer4
(x
)return xnet
= CNN
()
loss_function
= nn
.CrossEntropyLoss
()
optimizer
= torch
.optim
.Adam
(net
.parameters
(), lr
=LR
)
EPOCH
= 10
for epoch
in range(EPOCH
):mydataset
= MNISTCSVDataset
(file)train_loader
= torch
.utils
.data
.DataLoader
(mydataset
, batch_size
=1, shuffle
=True)print('epoch %d' % epoch
)for step
, (yl
, xd
) in enumerate(train_loader
):xd
= xd
.reshape
(100, 1, 28, 28).float()output
= net
(xd
)yl
= yl
.long()loss
= loss_function
(output
, yl
.squeeze
())optimizer
.zero_grad
()loss
.backward
()optimizer
.step
()if step
% 40 == 0:print('step %d' % step
, loss
)torch
.save
(net
, 'divided-net.pkl')
總結
以上是生活随笔為你收集整理的【Kaggle-MNIST之路】CNN结构再改进+交叉熵损失函数(六)的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。