变分自编码器VAE代码篇

VAE很是适合用于学习具备良好结构的潜在空间,其中特定方向表示数据中有意义的变化轴; git

VAE的工做原理:网络

(1)一个编码器模块将输入样本input_img转换为表示潜在空间中的两个参数z_mean和z_log_variance;dom

(2)咱们假定潜在正态分布可以生成输入图像,并从这个分布中随机采样一个点:z=z_mean + exp(z_log_variance)*epsilon,其中epsilon是取值很小的随机张量;ide

(3)一个解码器模块将潜在空间的这个点映射回原始输入图像。函数

       由于epsilon是随机的,因此这个过程能够确保,与input_img编码的潜在位置(即z-mean)靠近的每一个点都能被解码为与input_img相似的图像,从而迫使潜在空间可以连续地有意义。潜在空间中任意两个相邻的点都会被解码为高度类似的图像。连续性以及潜在空间的低维度,将迫使潜在空间中的每一个方向都表示数据中一个有意义的变化轴,这使得潜在空间具备很是良好的结构,所以很是适合经过几率向量来进行操做。学习

       VAE的参数经过两个损失函数来进行训练:一个是重构损失(reconstruction loss),它迫使解码后的样本匹配初始输入;另外一个是正则化损失(regularization loss),它有助于学习具备良好结构的潜在空间,并能够下降训练数据上的过拟合。测试

详细原理介绍请参考另一篇文章:http://www.javashuo.com/article/p-ghmyvqco-gp.html编码

代码案例以下:spa

import keras
from keras import layers
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense
import numpy as np

img_shape = (28, 28, 1)
latent_dim = 2  # 潜在空间的维度:一个二维平面

input_img = keras.Input(shape=img_shape)

encoded = layers.Conv2D(32, 3, padding='same', activation='relu')(input_img)
encoded = layers.Conv2D(64, 3, padding='same', activation='relu', strides=(2, 2))(encoded)
encoded = layers.Conv2D(64, 3, padding='same', activation='relu')(encoded)
encoded = layers.Conv2D(64, 3, padding='same', activation='relu')(encoded)
shape_before_flattening = K.int_shape(encoded)
shape_before_flattening


encoded = layers.Flatten()(encoded)
encoded = layers.Dense(32, activation='relu')(encoded)

# 输入图像最终被编码为这两个参数
z_mean = layers.Dense(latent_dim)(encoded)
z_log_var = layers.Dense(latent_dim)(encoded)

# 编码器  输入图片-->获得二维特征
encoder = Model(input_img, z_mean)


# 潜在空间采样的函数
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=1.)
    return z_mean + K.exp(z_log_var) * epsilon


z = layers.Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])

# VAE解码器网络,将潜在空间点映射为图像
decoder_input = layers.Input(K.int_shape(z)[1:])  # 将z调整为图像大小,须要将z输入到这里

# 对输入进行上采样
decoded = layers.Dense(np.prod(shape_before_flattening[1:]), activation='relu')(decoder_input)

# 将z转换为特征图,使其形状与编码器模型最后一个Flatten层以前的特征图的形状相同
decoded = layers.Reshape(shape_before_flattening[1:])(decoded)

# 使用一个Conv2DTranspose层和一个Conv2D层,将z解码为与原始输入图像具备相同尺寸的特征图
decoded = layers.Conv2DTranspose(32, 3, padding='same', activation='relu', strides=(2, 2))(decoded)
decoder_output = layers.Conv2D(1, 3, padding='same', activation='sigmoid')(decoded)

# 将解码器模型实例化,它将decoder_input转换为解码后的图像
decoder = Model(decoder_input, decoder_output)

# 将这个实例应用于z,以获得解码后的z
z_decoded = decoder(z)


# 用于计算VAE损失的自定义层
class CustomVariationalLayer(keras.layers.Layer):
    def vae_loss(self, x, z_decoded):
        x = K.flatten(x)
        z_decoded = K.flatten(z_decoded)
        xent_loss = keras.metrics.binary_crossentropy(x, z_decoded)  # 正则化损失
        kl_loss = -5e-4 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)  # 重构损失
        return K.mean(xent_loss + kl_loss)

    # 编写一个call方法,来实现自定义层
    def call(self, inputs):
        x = inputs[0]
        z_decoded = inputs[1]
        loss = self.vae_loss(x, z_decoded)
        self.add_loss(loss, inputs=inputs)
        return x  # 咱们不适用这个输出,但层必需要有返回值


# 对输入和解码后的输出调用自定义层,以获得最终的模型输出
y = CustomVariationalLayer()([input_img, z_decoded])


#训练VAE
vae = Model(input_img,y)
vae.compile(optimizer='rmsprop',loss=None)
vae.summary()

from keras.datasets import mnist

(x_train, _), (x_test, y_test) = mnist.load_data()
x_train = x_train[:600]
x_test = x_test[:100]
x_train = x_train.astype('float32') / 255.
print('x_train.shape', x_train.shape)
x_train = x_train.reshape(x_train.shape + (1,))
print('x_train.shape', x_train.shape)

x_test = x_test.astype('float32') / 255.
print('x_test.shape', x_test.shape)
x_test = x_test.reshape(x_test.shape + (1,))
print('x_test.shape', x_test.shape)

vae.fit(x_train,None,
       shuffle=True,
       epochs=1,
       batch_size=100,
       validation_data = (x_test,None)
       )

# 从二维潜在空间中采样一组点的网络,并将其解码为图像
import matplotlib.pyplot as plt
from scipy.stats import norm

batch_size = 100
n = 15  # 咱们将显示15*15的数字网格(共225个数字)
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))

# 使用scipy的ppf函数对线性分割的坐标进行变换,以生存潜在变量z的值(由于潜在空间的先验分布是高斯分布)
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
print(grid_x)
print(grid_y)


for i, yi in enumerate(grid_x):
    for j, xi in enumerate(grid_y):
        z_sample = np.array([[xi, yi]])
        z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)  # 将z屡次重复,以构建一个完整的批量
        x_decoded = decoder.predict(z_sample, batch_size=batch_size)  # 将批量解码为数字图像
        digit = x_decoded[0].reshape(digit_size, digit_size)  # 将批量第一个数字形状从28*28*1转变为28*28
        figure[i * digit_size:(i + 1) * digit_size, j * digit_size:(j + 1) * digit_size] = digit

plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()

测试结果:.net