Inception Net的思想是分组卷积,上一层分红几组卷积,卷积完成以后在把分组的结果拼接起来python
能够进行扩展,每一个组有不少层,这里只实现基本的分组卷积git
# 定义 Inception-Net的分组结构
def inception_block(x, output_channel_for_each_path, name):
"""inception block implementation"""
""" Args: - x: 输入数据 - output_channel_for_each_path: 每组的输出通道数目 eg: [10,20,30] - name: 每组的卷积命名 """
# variable_scope 在这个scope下命名不会有冲突 conv1 = 'conv1' => scope_name/conv1
with tf.variable_scope(name):
conv1_1 = tf.layers.conv2d(x,
output_channel_for_each_path[0],
(1, 1),
strides = (1,1),
padding = 'same',
activation = tf.nn.relu,
name = 'conv1_1')
conv3_3 = tf.layers.conv2d(x,
output_channel_for_each_path[1],
(3, 3),
strides = (1,1),
padding = 'same',
activation = tf.nn.relu,
name = 'conv3_3')
conv5_5 = tf.layers.conv2d(x,
output_channel_for_each_path[0],
(5, 5),
strides = (1,1),
padding = 'same',
activation = tf.nn.relu,
name = 'conv5_5')
max_pooling = tf.layers.max_pooling2d(x,
(2,2),
(2,2),
name = 'max_pooling')
# max_pooling 会使得图像变小,因此须要padding
max_pooling_shape = max_pooling.get_shape().as_list()[1:]
input_shape = x.get_shape().as_list()[1:]
width_padding = (input_shape[0] - max_pooling_shape[0]) // 2
height_padding = (input_shape[1] - max_pooling_shape[1]) // 2
padded_pooling = tf.pad(max_pooling,
[[0,0],
[width_padding,width_padding],
[height_padding,height_padding],
[0,0]])
# 在第四个维度(通道数)上作拼接
concat_layer = tf.concat(
[conv1_1, conv3_3, conv5_5, padded_pooling],
axis = 3)
return concat_layer
x = tf.placeholder(tf.float32, [None, 3072])
y = tf.placeholder(tf.int64, [None])
# 将向量变成具备三通道的图片的格式
x_image = tf.reshape(x, [-1,3,32,32])
# 32*32
x_image = tf.transpose(x_image, perm = [0, 2, 3, 1])
# 先通过一个普通的卷积层和池化层
# conv1:神经元图,feature map,输出图像
conv1 = tf.layers.conv2d(x_image,
32, # output channel number
(3,3), # kernal size
padding = 'same', # same 表明输出图像的大小没有变化,valid 表明不作padding
activation = tf.nn.relu,
name = 'conv1')
# 16*16
pooling1 = tf.layers.max_pooling2d(conv1,
(2, 2), # kernal size
(2, 2), # stride
name = 'pool1' # name为了给这一层作一个命名,这样会让图打印出来的时候会是一个有意义的图
)
# 通过两个个分组卷积
inception_2a = inception_block(pooling1,
[16, 16, 16],
name = 'inception_2a')
inception_2b = inception_block(inception_2a,
[16, 16, 16],
name = 'inception_2b')
# 接一个池化
pooling2 = tf.layers.max_pooling2d(inception_2b,
(2, 2),
(2, 2),
name = 'pool2'
)
# 再通过两个分组卷积核一个池化
inception_3a = inception_block(pooling2,
[16, 16, 16],
name = 'inception_3a')
inception_3b = inception_block(inception_3a,
[16, 16, 16],
name = 'inception_3b')
pooling3 = tf.layers.max_pooling2d(inception_3b,
(2, 2),
(2, 2),
name = 'pool3'
)
# [None, 4*4*42] 将三通道的图形转换成矩阵
flatten = tf.layers.flatten(pooling3)
y_ = tf.layers.dense(flatten, 10)
# 交叉熵
loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
# y_-> softmax
# y -> one_hot
# loss = ylogy_
# bool
predict = tf.argmax(y_, 1)
# [1,0,1,1,1,0,0,0]
correct_prediction = tf.equal(predict, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
with tf.name_scope('train_op'):
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
复制代码
Mobile-Netbash
Mobile Net 的基本结构 深度可分类的卷积 -> BN ->RELU-> 1*1 的卷积 -> BN -> RELU网络
这里BN先不加,这是下节课的内容app
def separable_conv_block(x,
output_channel_number,
name):
"""separable_conv block implementation"""
""" Args: - x: 输入数据 - output_channel_number: 通过深度可分离卷积以后,再通过1*1 的卷积生成的通道数目 - name: 每组的卷积命名 """
# variable_scope 在这个scope下命名不会有冲突 conv1 = 'conv1' => scope_name/conv1
with tf.variable_scope(name):
input_channel = x.get_shape().as_list()[-1]
# 将x 在 第四个维度(axis+1) 上 拆分红 input_channel 份
# channel_wise_x: [channel1, channel2, ...]
channel_wise_x = tf.split(x, input_channel, axis = 3)
output_channels = []
for i in range(len(channel_wise_x)):
output_channel = tf.layers.conv2d(channel_wise_x[i],
1,
(3,3),
strides = (1,1),
padding = 'same',
activation = tf.nn.relu,
name = 'conv_%d' % i)
output_channels.append(output_channel)
concat_layers = tf.concat(output_channels, axis = 3)
conv1_1 = tf.layers.conv2d(concat_layers,
output_channel_number,
(1,1),
strides = (1,1),
padding = 'same',
activation = tf.nn.relu,
name = 'conv1_1')
return conv1_1
x = tf.placeholder(tf.float32, [None, 3072])
y = tf.placeholder(tf.int64, [None])
# 将向量变成具备三通道的图片的格式
x_image = tf.reshape(x, [-1,3,32,32])
# 32*32
x_image = tf.transpose(x_image, perm = [0, 2, 3, 1])
# conv1:神经元图,feature map,输出图像
conv1 = tf.layers.conv2d(x_image,
32, # output channel number
(3,3), # kernal size
padding = 'same', # same 表明输出图像的大小没有变化,valid 表明不作padding
activation = tf.nn.relu,
name = 'conv1')
# 16*16
pooling1 = tf.layers.max_pooling2d(conv1,
(2, 2), # kernal size
(2, 2), # stride
name = 'pool1' # name为了给这一层作一个命名,这样会让图打印出来的时候会是一个有意义的图
)
separable_2a = separable_conv_block(pooling1,
32,
name = 'separable_2a')
separable_2b = separable_conv_block(separable_2a,
32,
name = 'separable_2b')
pooling2 = tf.layers.max_pooling2d(separable_2b,
(2, 2),
(2, 2),
name = 'pool2'
)
separable_3a = separable_conv_block(pooling2,
32,
name = 'separable_3a')
separable_3b = separable_conv_block(separable_3a,
32,
name = 'separable_3b')
pooling3 = tf.layers.max_pooling2d(separable_3b,
(2, 2),
(2, 2),
name = 'pool3')
# [None, 4*4*42] 将三通道的图形转换成矩阵
flatten = tf.layers.flatten(pooling3)
y_ = tf.layers.dense(flatten, 10)
# 交叉熵
loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
# y_-> softmax
# y -> one_hot
# loss = ylogy_
# bool
predict = tf.argmax(y_, 1)
# [1,0,1,1,1,0,0,0]
correct_prediction = tf.equal(predict, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
with tf.name_scope('train_op'):
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
复制代码
这里的准确率是10000次百分之60,这是由于mobile net 的 参数减少和计算率减少影响了准确率。ide
这里的训练咱们都使用的是一万次训练,真正的神经网络训练远不止于此,可能会达到100万次的规模ui