多层感知机(MLP)实现

2019-08-01  本文已影响0人  VictorHong

MLP实现

从零开始实现

首先导入必要的包

import d2lzh as d2l
from mxnet import autograd,nd
from mxnet.gluon import loss as gloss

读取数据

batch_size = 256
train_iter,test_iter = d2l.load_data_fashion_mnist(batch_size)

定义模型参数

num_inputs= 784
num_outputs = 10
num_hiddens = 256

W1 = nd.random.normal(scale=0.01,shape=(num_inputs,num_hiddens))
b1 = nd.zeros(shape=num_hiddens)
W2 = nd.random.normal(scale=0.01,shape=(num_hiddens,num_outputs))
b2 = nd.zeros(shape=num_outputs)

params = [W1,b1,W2,b2]
for param in params:
    param.attach_grad()

定义激活函数

def relu(x):
    return nd.maximum(0,x)

定义模型

def net(X):
    X = X.reshape((-1,num_inputs))
    H = relu(nd.dot(X,W1)+b1)
    return nd.dot(H,W2)+b2

定义损失函数

loss = gloss.SoftmaxCrossEntropyLoss()

训练模型

num_epochs,lr = 5,0.5
#help(d2l.train_ch3)
d2l.train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,params,lr)
epoch 1, loss 0.9075, train acc 0.711, test acc 0.826
epoch 2, loss 0.5075, train acc 0.812, test acc 0.836
epoch 3, loss 0.4439, train acc 0.834, test acc 0.855
epoch 4, loss 0.4128, train acc 0.848, test acc 0.852
epoch 5, loss 0.3991, train acc 0.853, test acc 0.856

简洁实现 Gluon实现

from mxnet import autograd,nd
from mxnet import gluon,init
from mxnet.gluon import data as gdata,loss as gloss,nn
net = nn.Sequential()
net.add(nn.Dense(256,activation='relu'),nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
batch_size = 256
num_epochs = 5
loss = gloss.SoftmaxCrossEntropyLoss()

trainer = gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.5})

d2l.train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None,trainer)
epoch 1, loss 0.3552, train acc 0.869, test acc 0.872
epoch 2, loss 0.3404, train acc 0.873, test acc 0.880
epoch 3, loss 0.3300, train acc 0.878, test acc 0.869
epoch 4, loss 0.3184, train acc 0.882, test acc 0.876
epoch 5, loss 0.3112, train acc 0.885, test acc 0.884
上一篇 下一篇

猜你喜欢

热点阅读