MNISTの識別を多層パーセプトロンで試行

Chainer本家の サンプルコードをベースに作成。Chainer ver.1.5.1を使用。

・MNISTの手書き文字データは、scikit-learnを利用 (本家サンプルコードは、直接ダウンロード)

・GPU処理の組込みはひとまず割愛

・隠れ層:2層(深層では無い)

In [7]:
import chainer

chainer.__version__
Out[7]:
'1.5.1'
In [4]:
"""
Chainer example: train a multi-layer perceptron on MNIST
This is a minimal example to write a feed-forward net.
"""
from __future__ import print_function
import numpy as np
import six

import chainer
from   chainer import computational_graph, cuda, optimizers, serializers
import chainer.functions as F
import chainer.links     as L
from   sklearn.datasets import fetch_mldata

# ◆マルチレイヤパーセプトロンの設計。隠れ層は2層?
class MnistMLP(chainer.Chain):
    def __init__(self, n_in, n_units, n_out):
        super(MnistMLP, self).__init__(
            l1 = L.Linear(n_in   , n_units),
            l2 = L.Linear(n_units, n_units),
            l3 = L.Linear(n_units, n_out  ),
        )        
    # 活性化関数。ここではRelu:正規化線形関数(Rectified Linear Unit function)を利用。
    def __call__(self, x):
        h1 = F.relu(self.l1(x) )
        h2 = F.relu(self.l2(h1))
        return self.l3(h2)
    
# ◆データセットの準備 
# mnist.data : 70,000件の784次元ベクトルデータ
# 28pixx28pixの手書き文字 ⇒28x28 = 784次元。
print ('fetch MNIST dataset')
mnist = fetch_mldata('MNIST original')
mnist.data   = mnist.data.astype(np.float32)#ベクトルデータの型変換。
mnist.data  /= 255     #ベクトル要素の値のレンジを0.0~1.0に変換。元は256階調。
mnist.target = mnist.target.astype(np.int32)#ラベルデータの型変換。

# トレーニング用データ60,000件とテスト用データ10,000件とに分ける
N = 60000
x_train, x_test = np.split(mnist.data,   [N]) #ベクトルデータ
y_train, y_test = np.split(mnist.target, [N]) #ラベルデータ
N_test = y_test.size                          #テスト用データのサイズ

initmodel = ''   # Initialize the model from given file
resume    = ''   # Resume the optimization from snapshot

batchsize      = 100  # バッチサイズ
n_epoch        = 10   # ループ回数
n_units        = 1000 # 各層でのノード数

# ◆ニューラルネット生成
#入力ベクトルデータ784次元、ユニット数1000、出力10(0~9の手書き数字を判別)
model = L.Classifier(MnistMLP(784, n_units, 10)) 

# 最適化手法の設定
# optimizerを生成。ここではAdamを利用。
optimizer = optimizers.Adam() 
# optimizerにモデルを設定
optimizer.setup(model)

# Init/Resume
if initmodel:
    print('Load model from', initmodel)
    serializers.load_hdf5(initmodel, model)
if resume:
    print('Load optimizer state from', resume)
    serializers.load_hdf5(resume, optimizer)

# ◆学習
for epoch in six.moves.range(1, n_epoch + 1):
    print('epoch', epoch)

    # トレーニング
    perm = np.random.permutation(N) #サンプルを乱数で並び替え
                                    #なお、shuffle()はin-placeで並び替え
    sum_accuracy = 0
    sum_loss     = 0
    for i in six.moves.range(0, N, batchsize):
        x = chainer.Variable(np.asarray(x_train[perm[i:i + batchsize]]))
        t = chainer.Variable(np.asarray(y_train[perm[i:i + batchsize]]))

        # optimizerでモデルを更新        
        # Pass the loss function (Classifier defines it) and its arguments
        optimizer.update(model, x, t)

        if epoch == 1 and i == 0:
            with open('graph.dot', 'w') as o:
                g = computational_graph.build_computational_graph((model.loss, ), remove_split=True)
                o.write(g.dump())
            print('graph generated')

        sum_loss     += float(model.loss.data)     * len(t.data)
        sum_accuracy += float(model.accuracy.data) * len(t.data)

    print('train mean loss={}, accuracy={}'.format(sum_loss / N, sum_accuracy / N))

    # 評価
    sum_accuracy = 0
    sum_loss     = 0
    for i in six.moves.range(0, N_test, batchsize):
        x = chainer.Variable(np.asarray(x_test[i:i + batchsize]), volatile='on')
        t = chainer.Variable(np.asarray(y_test[i:i + batchsize]), volatile='on')
        loss = model(x, t)
        sum_loss     += float(loss.data)           * len(t.data)
        sum_accuracy += float(model.accuracy.data) * len(t.data)

    print('test  mean loss={}, accuracy={}'.format(sum_loss / N_test, sum_accuracy / N_test))

# ◆modelとoptimizerの保存
print('save the model')
serializers.save_hdf5('mlp.model', model)
print('save the optimizer')
serializers.save_hdf5('mlp.state', optimizer)
fetch MNIST dataset
epoch 1
graph generated
train mean loss=0.19022969173, accuracy=0.941700003371
test  mean loss=0.087216377568, accuracy=0.972300003171
epoch 2
train mean loss=0.0742791442418, accuracy=0.97691667676
test  mean loss=0.0767258678846, accuracy=0.977500007749
epoch 3
train mean loss=0.046471829068, accuracy=0.984866677523
test  mean loss=0.0738036866684, accuracy=0.977400007248
epoch 4
train mean loss=0.0364178050218, accuracy=0.988350009322
test  mean loss=0.0612103409497, accuracy=0.981300006509
epoch 5
train mean loss=0.0286914123052, accuracy=0.990516675313
test  mean loss=0.0686932048488, accuracy=0.981200005412
epoch 6
train mean loss=0.0239544144826, accuracy=0.992166673541
test  mean loss=0.0859721974967, accuracy=0.975400006771
epoch 7
train mean loss=0.0206430344892, accuracy=0.993150005738
test  mean loss=0.0803848945047, accuracy=0.980200006366
epoch 8
train mean loss=0.0180904265835, accuracy=0.993816671968
test  mean loss=0.0749666128116, accuracy=0.981000006795
epoch 9
train mean loss=0.0152721670893, accuracy=0.994950004717
test  mean loss=0.0900614361009, accuracy=0.978800005317
epoch 10
train mean loss=0.0156433219236, accuracy=0.99476667136
test  mean loss=0.103034083288, accuracy=0.97620000422
save the model
save the optimizer
inserted by FC2 system