【Kaggle-MNIST之路】CNN+改进过的损失函数(三)
生活随笔
收集整理的這篇文章主要介紹了
【Kaggle-MNIST之路】CNN+改进过的损失函数(三)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
簡述
在上一個版本上的CNN的框架的基礎上。
- 上一個版本
- 卷積神經網絡CNN入門【pytorch學習】
調用了上面的框架。
目前:
- 分數:0.9160
- 排名:2400+
框架代碼
import pandas as pd import torch.utils.data as data import torch import torch.nn as nnfile = './all/train.csv' LR = 0.01class MNISTCSVDataset(data.Dataset):def __init__(self, csv_file, Train=True):self.dataframe = pd.read_csv(csv_file, iterator=True)self.Train = Traindef __len__(self):if self.Train:return 42000else:return 28000def __getitem__(self, idx):data = self.dataframe.get_chunk(100)ylabel = data['label'].as_matrix().astype('float')xdata = data.ix[:, 1:].as_matrix().astype('float')return ylabel, xdatamydataset = MNISTCSVDataset(file)train_loader = torch.utils.data.DataLoader(mydataset, batch_size=1, shuffle=True)class CNN(nn.Module):def __init__(self):super(CNN, self).__init__()self.layer1 = nn.Sequential(# (1, 28, 28)nn.Conv2d(in_channels=1,out_channels=16,kernel_size=5, # 卷積filter, 移動塊長stride=1, # filter的每次移動步長padding=2,groups=1),# nn.BatchNorm2d(16),# (16, 28, 38)nn.ReLU(),nn.MaxPool2d(kernel_size=2)# (16, 14, 14))self.layer2 = nn.Sequential(nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5,stride=1,padding=2),# nn.BatchNorm2d(32),nn.ReLU(),nn.MaxPool2d(kernel_size=2))self.layer3 = nn.Linear(32 * 7 * 7, 10)def forward(self, x):# print(x.shape)x = self.layer1(x)# print(x.shape)x = self.layer2(x)# print(x.shape)x = x.view(x.size(0), -1)# print(x.shape)x = self.layer3(x)# print(x.shape)return xnet = CNN()loss_function = nn.MultiMarginLoss() optimizer = torch.optim.Adam(net.parameters(), lr=LR) for step, (yl, xd) in enumerate(train_loader):xd = xd.reshape(100, 1, 28, 28).float()output = net(xd)yl = yl.long()loss = loss_function(output, yl.squeeze())optimizer.zero_grad()loss.backward()optimizer.step()if step % 20 == 0:print('step %d' % step, loss)torch.save(net, 'divided-net.pkl')生成數據的代碼也需要改改
import torch import torch.utils.data as data import pandas as pd import csv import torch.nn as nnfile = './all/test.csv'class MNISTCSVDataset(data.Dataset):def __init__(self, csv_file, Train=False):self.dataframe = pd.read_csv(csv_file, iterator=True)self.Train = Traindef __len__(self):if self.Train:return 42000else:return 28000def __getitem__(self, idx):data = self.dataframe.get_chunk(100)xdata = data.as_matrix().astype('float')return xdataclass CNN(nn.Module):def __init__(self):super(CNN, self).__init__()self.layer1 = nn.Sequential(# (1, 28, 28)nn.Conv2d(in_channels=1,out_channels=16,kernel_size=5, # 卷積filter, 移動塊長stride=1, # filter的每次移動步長padding=2,groups=1),# nn.BatchNorm2d(16),# (16, 28, 38)nn.ReLU(),nn.MaxPool2d(kernel_size=2)# (16, 14, 14))self.layer2 = nn.Sequential(nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5,stride=1,padding=2),# nn.BatchNorm2d(32),nn.ReLU(),nn.MaxPool2d(kernel_size=2))self.layer3 = nn.Linear(32 * 7 * 7, 10)def forward(self, x):# print(x.shape)x = self.layer1(x)# print(x.shape)x = self.layer2(x)# print(x.shape)x = x.view(x.size(0), -1)# print(x.shape)x = self.layer3(x)# print(x.shape)return xnet = torch.load('divided-net.pkl')myMnist = MNISTCSVDataset(file) test_loader = torch.utils.data.DataLoader(myMnist, batch_size=1, shuffle=False)values = [] for _, xd in enumerate(test_loader):xd = xd.reshape(100, 1, 28, 28).float()output = net(xd)values = values + output.argmax(dim=1).numpy().tolist()with open('./all/sample_submission.csv', 'r') as fp_in, open('newfile.csv', 'w', newline='') as fp_out:reader = csv.reader(fp_in)writer = csv.writer(fp_out)header = 0for i, row in enumerate(reader):if i == 0:writer.writerow(row)else:row[-1] = str(values[i - 1])writer.writerow(row)總結
以上是生活随笔為你收集整理的【Kaggle-MNIST之路】CNN+改进过的损失函数(三)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 【Kaggle-MNIST之路】两层的神
- 下一篇: 【Kaggle-MNIST之路】CNN+