从0实现三层神经网络
                                                            生活随笔
收集整理的這篇文章主要介紹了
                                从0实现三层神经网络
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.                        
                                本文目標
機器學習的一般框架
從0實現版
# -*- coding: utf-8 -*- import d2lzh as d2l from mxnet import nd from mxnet import autograd# data batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)# model num_inputs, num_hiddens1, num_hiddens2, num_outputs = 784, 256, 256, 10 W1 = nd.random.normal(scale=0.01, shape=(num_inputs, num_hiddens1)) b1 = nd.zeros(num_hiddens1) W2 = nd.random.normal(scale=0.01, shape=(num_hiddens1, num_hiddens2)) b2 = nd.zeros(num_hiddens2) W3 = nd.random.normal(scale=0.01, shape=(num_hiddens2, num_outputs)) b3 = nd.zeros(num_outputs) params = [W1, b1, W2, b2, W3, b3]for param in params:param.attach_grad()def relu(X):return nd.maximum(X, 0)def softmax(X):X_exp = X.exp()partition = X_exp.sum(axis=1, keepdims=True)return X_exp / partitiondef net(X):X = X.reshape((-1, num_inputs))H1 = relu(nd.dot(X, W1) + b1)H2 = relu(nd.dot(H1, W2) + b2)return softmax(H2)# strategy def cross_entropy(y_hat, y):return -nd.pick(y_hat, y).log()loss = cross_entropy# algorithm def sgd(params, lr, batch_size):for param in params:param[:] = param - lr * param.grad / batch_size# training def evaluate_accuracy(data_iter, net):acc_sum, n = 0.0, 0for X, y in data_iter:y = y.astype('float32')acc_sum += (net(X).argmax(axis=1) == y).sum().asscalar()n += y.sizereturn acc_sum / ndef train(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr):for epoch in range(num_epochs):train_l_sum, train_acc_sum, n = 0.0, 0.0, 0for X, y in train_iter:with autograd.record():y_hat = net(X)l = loss(y_hat, y).sum()l.backward()sgd(params, lr, batch_size)y = y.astype('float32')train_l_sum += l.asscalar()train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()n += y.sizetest_acc = evaluate_accuracy(test_iter, net)print('epoch: %d, loss %.4f, train_acc %.3f, test_acc %.3f'% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))num_epochs, lr = 10, 0.3 train(net, train_iter, test_iter, loss, num_epochs, batch_size,params, lr) # predictif __name__ == '__main__':print('------ok-------')- 說明:代碼中還是使用了d2l.load_data_fashion_mnist來加載圖片數據,有時間把這個也替換掉,用NDArray實現;
 
mxnet框架版
# -*- coding: utf- -*- import d2lzh as d2l from mxnet import gluon, init from mxnet.gluon import loss as gloss, nn# data batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)# model net = nn.Sequential() net.add(nn.Dense(256, activation='relu'),nn.Dense(256, activation='relu'),nn.Dense(10)) net.initialize(init.Normal(sigma=0.01))# strategy loss = gloss.SoftmaxCrossEntropyLoss()# algorithm lr = 0.3 trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})# training num_epochs = 10 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, trainer) # predictif __name__ == '__main__':print('-----ok------')參考資料
總結
以上是生活随笔為你收集整理的从0实现三层神经网络的全部內容,希望文章能夠幫你解決所遇到的問題。
                            
                        - 上一篇: 【预测模型】Leslie 人口增长模型
 - 下一篇: linux 挂载多余空间,linux 空