Mxnet (14): 神经网批量归一化

1. 批量归一化

训练深度神经网络很困难,存在一些挑战,须要引入批量归一化来处理这些这些挑战:html

  • 数据预处理一般会对结果有巨大的影响,一般须要将咱们的数据进行特征标准化,让特征平均值为0,方差为1。.
  • 在CNN模型中,中间层的变量可能会采用变化很大的值,可能会阻碍网络的收敛,若是一层的变量值是另外一层的100倍,那么须要对学习率进行调整。
  • 更深的网络更复杂,很容易过拟合,所以正则化显得更加关键

在模型训练时,批量归一化利用小批量上的均值和标准差,不断调整神经网络中间输出,从而使整个神经网络在各层的中间输出的数值更稳定。批量归一化和下一节将要介绍的残差网络为训练和设计深度模型提供了两类重要思路。python

2.批量归一化层

全链接层和卷积层的批处理规范化实现略有不一样,这里分别解释。git

2.1 全链接层

咱们将批量归一化层置于全链接层中的仿射变换和激活函数之间。设全链接层的输入为 u u u ,权重参数和误差参数分别为 W W W b b b ,激活函数为 ϕ \phi ϕ 。设批量归一化的运算符为 B N BN BN 。那么,使用批量归一化的全链接层的输出为github

h = ϕ ( B N ( W x + b ) ) \mathbf{h} = \phi(\mathrm{BN}(\mathbf{W}\mathbf{x} + \mathbf{b}) ) h=ϕ(BN(Wx+b))express

2.2 卷积层

对卷积层来讲,批量归一化发生在卷积计算以后、应用激活函数以前。若是卷积计算输出多个通道,咱们须要对这些通道的输出分别作批量归一化,且每一个通道都拥有独立的拉伸和偏移参数,并均为标量。设小批量中有 m m m 个样本。在单个通道上,假设卷积计算输出的高和宽分别为 p p p q q q 。咱们须要对该通道中 m × p × q m×p×q m×p×q 个元素同时作批量归一化。对这些元素作标准化计算时,咱们使用相同的均值和方差,即该通道中 m × p × q m×p×q m×p×q 个元素的均值和方差。windows

2.3 预测时的批归一化

在预测时,咱们但愿模型对于任意输入都有肯定的输出。单个样本的输出若是取决于小批量中的均值和方差,能够经过得到整个训练数据集的均值和方差。可见,批量归一化层在训练模式和预测模式下的计算结果也是同样的。api

3. 从零建立批归一化

咱们从头开始使用张量实现批处理归一化层。网络

from mxnet import autograd, np, npx, init, gluon
from mxnet.gluon import nn
import plotly.graph_objs as go
npx.set_np()

ctx = npx.gpu() if npx.num_gpus() else npx.cpu()

def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):
    # 区分是训练仍是预测
    
    if not autograd.is_training():
        # 预测模式下,直接使用传入的移动平均所得的均值和方差
        X_hat = (X - moving_mean)/np.sqrt(moving_var + eps)
    else:
        assert len(X.shape) in (2,4) # 全连接层2, 卷积4
        if len(X.shape) == 2:
            # 使用全链接层的状况,计算特征维上的均值和方差
            mean = X.mean(axis = 0)
            var = ((X-mean)**2).mean(axis=0)
        else:
            # 二维卷积的状况,计算通道纬度上均值和方差,须要保持X的形状
            mean = X.mean(axis = (0 ,2, 3), keepdims=True)
            var = ((X-mean)**2).mean(axis=(0,2,3), keepdims=True)
        X_hat = (X-mean)/np.sqrt(var+eps)
        moving_mean = momentum*moving_mean+(1.0-momentum)*mean
        moving_var = momentum*moving_var+(1.0-momentum)*var
    Y = gamma*X_hat + beta
    return Y, moving_mean, moving_var

同时获取移动平均值和移动方差,由于测试的时候要用。多线程

建立一个BatchNorm实例,num_features参数对于全链接层来讲应为输出个数,对于卷积层来讲则为输出通道数。num_dims参数对于全链接层和卷积层来讲分别为2和4。app

class BatchNorm(nn.Block):
    
    def __init__(self, num_features, num_dims, **kwargs):
        super().__init__(**kwargs)
        if num_dims == 2:
            shape = (1, num_features)
        else:
            shape = (1, num_features, 1, 1)
        #参与求梯度和迭代的拉伸和偏移参数,分别初始化成1和0
        self.gamma = self.params.get('gamma', shape=shape, init = init.One())
        self.beta = self.params.get('beta', shape=shape, init = init.Zero())
        
        self.moving_mean = np.zeros(shape)
        self.moving_var = np.zeros(shape)
        
    def forward(self, X):
        # 若是X不在内存上,将moving_mean和moving_var复制到X所在显存上
        if self.moving_mean.ctx != X.ctx:
            self.moving_mean = self.moving_mean.as_in_ctx(X.ctx)
            self.moving_var = self.moving_var.as_in_ctx(X.ctx)
        # 保存更新过的moving_mean和moving_var
        Y, self.moving_mean, self.moving_var = batch_norm(
            X, self.gamma.data(), self.beta.data(), self.moving_mean,
            self.moving_var, eps=1e-12, momentum=0.9)
        return Y

3.1 在LeNet中使用批归一化层

在全部的卷积层或全链接层以后、激活层以前加入批量归一化层。

net = nn.Sequential()
net.add(
    nn.Conv2D(6, kernel_size=5),
    BatchNorm(6, num_dims=4),
    nn.Activation('sigmoid'),
    nn.MaxPool2D(pool_size=2, strides=2),
    nn.Conv2D(16, kernel_size=5),
    BatchNorm(16, num_dims=4),
    nn.Activation('sigmoid'),
    nn.MaxPool2D(pool_size=2, strides=2),
    nn.Dense(120),
    BatchNorm(120, num_dims=2),
    nn.Activation('sigmoid'),
    nn.Dense(84),
    BatchNorm(84, num_dims=2),
    nn.Activation('sigmoid'),
    nn.Dense(10)
)

3.2 训练

将在Fashion-MNIST数据集上训练咱们的网络。

def get_workers(num):
    # windows系统不能使用多线程转换
    return 0 if __import__('sys').platform.startswith('win') else num

def loader(data, batch_size, shuffle=True, workers = 6):
    return gluon.data.DataLoader(data,batch_size, shuffle=shuffle,
                                   num_workers=get_workers(workers))

def load_data(batch_size, resize=None):
    
    dataset = gluon.data.vision
    trans = [dataset.transforms.Resize(resize)] if resize else []
    trans.append(dataset.transforms.ToTensor())
    trans = dataset.transforms.Compose(trans)
    mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)
    mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)
    return loader(mnist_train, batch_size), loader(mnist_test, batch_size, False)    


def accuracy(y_hat, y): 
    if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
        y_hat = y_hat.argmax(axis=1)
    cmp = y_hat.astype(y.dtype) == y
    return float(cmp.sum())

def train_epoch(net, train_iter, loss, updater):
    
    l_sum = acc_rate = total = 0
    
    if isinstance(updater, gluon.Trainer):
        updater = updater.step
        
    for X,y in train_iter:
        X = X.as_in_ctx(ctx)
        y = y.as_in_ctx(ctx)
        with autograd.record():
            pre_y = net(X)
            l = loss(pre_y, y)
        l.backward()
        updater(y.size)
        l_sum += float(l.sum())
        acc_rate += accuracy(pre_y, y)
        total += y.size
    return l_sum/total, acc_rate/total

def evaluate_accuracy(net, data_iter):  

    match_num = total_num = 0
    for X, y in data_iter:
        X = X.as_in_ctx(ctx)
        y = y.as_in_ctx(ctx)
        match_num += accuracy(net(X), y)
        total_num += y.size
    return match_num / total_num

import time
def train(net, train_iter, test_iter, epochs, lr):
    
    net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), 'sgd',  { 'learning_rate': lr})
    l_lst, acc_lst, test_acc_lst = [], [], []
    timer = 0
    print("----------------start------------------")
    for epoch in range(epochs):
        start = time.time()
        l, acc = train_epoch(net, train_iter, loss, trainer)
        timer += time.time()-start
        test_acc = evaluate_accuracy(net, test_iter)
        print(f'[epoch {epoch+1}] loss {l:.3f}, train acc {acc:.3f}, ' f'test acc {test_acc:.3f}')
        l_lst.append(l)
        acc_lst.append(acc)
        test_acc_lst.append(test_acc)
    print(f'loss {l:.3f}, train acc {acc:.3f}, test acc {test_acc:.3f}')
    print(f'{timer:.1f} sec, on {str(ctx)}')
    draw_graph([l_lst, acc_lst, test_acc_lst])
    

def draw_graph(result):
    data = []
    colors = ['aquamarine', 'orange', 'hotpink']
    names = ['train loss', 'train acc', 'test acc']
    symbols = ['circle-open', 'cross-open', 'triangle-up-open']
    for i, info in enumerate(result):
        trace = go.Scatter(
            x = list(range(1, num_epochs+1)),
            y = info,
            mode = 'lines+markers',
            name = names[i],
            marker = { 
                'color':colors[i],
                'symbol':symbols[i],
            },
        )
        data.append(trace)
    fig = go.Figure(data = data)
    fig.update_layout(xaxis_title='epochs', width=800, height=480)
    fig.show()
  • 依旧运行10epochs
lr, num_epochs, batch_size = 1.0, 10, 256
train_iter, test_iter = load_data(batch_size)
train(net, train_iter, test_iter, num_epochs, lr)

在这里插入图片描述

在这里插入图片描述

  • 第一批归一化层学到的scale参数gamma和shift参数beta。

在这里插入图片描述

4. 使用api简化

与BatchNorm咱们本身定义的类相比,咱们能够直接使用BatchNorm深度学习框架中高级API中定义的类。该代码实际上与咱们上面实现的应用程序相同。

NeLet = nn.Sequential()
NeLet.add(nn.Conv2D(6, kernel_size=5),
        nn.BatchNorm(),
        nn.Activation('sigmoid'),
        nn.MaxPool2D(pool_size=2, strides=2),
        nn.Conv2D(16, kernel_size=5),
        nn.BatchNorm(),
        nn.Activation('sigmoid'),
        nn.MaxPool2D(pool_size=2, strides=2),
        nn.Dense(120),
        nn.BatchNorm(),
        nn.Activation('sigmoid'),
        nn.Dense(84),
        nn.BatchNorm(),
        nn.Activation('sigmoid'),
        nn.Dense(10))

5. 预测

训练完成的模型经过输入一些数据进行预测,试试效果

import plotly.express as px
from plotly.subplots import make_subplots
def get_fashion_mnist_labels(labels): 
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in labels]

def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): 
    colorscales = px.colors.named_colorscales()
    fig = make_subplots(num_rows, num_cols, subplot_titles=titles)
    for i, img in enumerate(imgs):
        fig.add_trace(go.Heatmap(z=img.asnumpy()[::-1], showscale=False, colorscale=colorscales[i+3]), 1, i+1)
        fig.update_xaxes(visible=False,row=1, col=i+1)
        fig.update_yaxes(visible=False, row=1, col=i+1)
    fig.update_layout(height=270)
    fig.show()

def predict(net, test_iter, stop, n=8):
    for i,(X,y) in enumerate(test_iter):
        if (i==stop) :
            break
    X,y = X.as_in_ctx(ctx), y.as_in_ctx(ctx)
    trues = get_fashion_mnist_labels(y)
    preds = get_fashion_mnist_labels(net(X).argmax(axis=1))
    titles = [f"true: {t} <br> pre: {p}" for t, p in zip(trues, preds)]
    show_images(X[:n].reshape((-1, 28, 28)), 1, n, titles=titles[:n])

import random
stop = random.choice(range(10))
predict(NeLet, test_iter, stop)

在这里插入图片描述

6. 参考

https://d2l.ai/chapter_convolutional-modern/batch-norm.html

https://plotly.com/python/subplots/

7.代码

github

相关文章
相关标签/搜索