在跑一個簡單的CNN模型, 圖片為64X64
第一層卷積為8X5X5,池化為5X5,步長為2;
第二層卷積為16X5X5,池化為5X5,步長為2;
第三層卷積為32X1X1,池化為全局池化 16X16
輸出1X1X32的特征值,展平后輸入全連接層
第四層全連接層 輸出為2維
算出來最后輸入全連接層的特征值應(yīng)該有32個 輸出2個值
batch_size為20
但在使用sparse_softmax_cross_entropy計算交叉熵?fù)p失的時候 一直報維度錯誤:
logits and labels must have the same first dimension, got logits shape [1280,2] and labels shape [20]
代碼如下:
# -*- coding: utf-8 -*-
# Steganalysis with High-Level API
# import dataset
import load_record
import tensorflow as tf
import numpy as np
import layer_module
flags = tf.app.flags
flags.DEFINE_integer('num_epochs', 10, 'Number of training epochs')
flags.DEFINE_integer('batch_size', 20, 'Batch size')
flags.DEFINE_float('learning_rate', 0.01, 'Learning rate')
flags.DEFINE_float('dropout_rate', 0.5, 'Dropout rate')
flags.DEFINE_string('train_dataset', './dataset/train512.tfrecords',
'Filename of training dataset')
flags.DEFINE_string('eval_dataset', './dataset/test512.tfrecords',
'Filename of evaluation dataset')
flags.DEFINE_string('test_dataset', './dataset/test512.tfrecords',
'Filename of testing dataset')
flags.DEFINE_string('model_dir', 'models/steganalysis_cnn_model',
'Filename of testing dataset')
FLAGS = flags.FLAGS
def stg_model_fn(features, labels, mode):
# Input Layer
x = tf.reshape(features, [-1, 64, 64, 1])
# print(x)
x = layer_module.conv_group(
inputs = x,
activation = "tanh",
filters = 8,
kernel_size = [5, 5],
pool_size = 5,
strides = 2,
abs_layer = True,
pool_padding = "same")
print(x)
x = layer_module.conv_group(
inputs = x,
filters = 16,
activation = "tanh",
kernel_size = [5, 5],
pool_size = 5,
strides = 2,
abs_layer = False,
pool_padding = "same")
print(x)
x = layer_module.conv_group(
inputs = x,
filters = 32,
activation = "relu",
kernel_size = [1, 1],
pool_size = 16,
strides = 1,
abs_layer = False,
pool_padding = "valid")
print(x)
x = tf.reshape(x, [-1, 32])
x = tf.layers.dense(inputs = x, units = 2)
# x = tf.contrib.layers.flatten(inputs = x)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(tf.nn.softmax(x, name="softmax_tensor").eval(), labels.shape)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=x, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(x, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
loss = tf.losses.sparse_softmax_cross_entropy(labels = labels, logits = x)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def parser(record):
keys_to_features = {
'img_raw': tf.FixedLenFeature((), tf.string),
'label': tf.FixedLenFeature((), tf.int64)
}
parsed = tf.parse_single_example(record, keys_to_features)
image = tf.decode_raw(parsed['img_raw'], tf.uint8)
image = tf.cast(image, tf.float32)
label = tf.cast(parsed['label'], tf.int32)
return image, label
def save_hp_to_json():
'''Save hyperparameters to a json file'''
filename = os.path.join(FLAGS.model_dir, 'hparams.json')
hparams = FLAGS.flag_values_dict()
with open(filename, 'w') as f:
json.dump(hparams, f, indent=4, sort_keys=True)
def main(unused_argv):
def train_input_fn():
train_dataset = tf.data.TFRecordDataset(FLAGS.train_dataset)
train_dataset = train_dataset.map(parser)
train_dataset = train_dataset.repeat(FLAGS.num_epochs)
train_dataset = train_dataset.batch(FLAGS.batch_size)
train_iterator = train_dataset.make_one_shot_iterator()
features, labels = train_iterator.get_next()
return features, labels
def eval_input_fn():
eval_dataset = tf.data.TFRecordDataset(FLAGS.eval_dataset)
eval_dataset = eval_dataset.map(parser)
# eval_dataset = eval_dataset.repeat(FLAGS.num_epochs)
eval_dataset = eval_dataset.batch(FLAGS.batch_size)
eval_iterator = eval_dataset.make_one_shot_iterator()
features, labels = eval_iterator.get_next()
return features, labels
steg_classifier = tf.estimator.Estimator(
model_fn=stg_model_fn, model_dir=FLAGS.model_dir)
# Train
steg_classifier.train(input_fn=train_input_fn)
# Evaluation
eval_results = steg_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
tf.logging.info('Saving hyperparameters ...')
if __name__ == "__main__":
tf.app.run()
我不懂那個1280是怎么來的 明明batch_size只有20
補充layer_module的代碼:
def conv_group(inputs, activation, filters, kernel_size, pool_size, strides, pool_padding, abs_layer):
x = tf.layers.conv2d(
inputs = inputs,
filters = filters,
kernel_size = kernel_size,
padding = "same")
if (abs_layer):
x = tf.abs(x)
x = tf.layers.batch_normalization(inputs = x)
if (activation == "relu"):
x = tf.nn.relu(x)
elif (activation == "tanh"):
x = tf.nn.tanh(x)
print(x)
x = tf.layers.average_pooling2d(
inputs = x,
padding = pool_padding,
pool_size = pool_size,
strides = strides)
print(x)
return x
北大青鳥APTECH成立于1999年。依托北京大學(xué)優(yōu)質(zhì)雄厚的教育資源和背景,秉承“教育改變生活”的發(fā)展理念,致力于培養(yǎng)中國IT技能型緊缺人才,是大數(shù)據(jù)專業(yè)的國家
北大青鳥中博軟件學(xué)院創(chuàng)立于2003年,作為華東區(qū)著名互聯(lián)網(wǎng)學(xué)院和江蘇省首批服務(wù)外包人才培訓(xùn)基地,中博成功培育了近30000名軟件工程師走向高薪崗位,合作企業(yè)超4
中公教育集團創(chuàng)建于1999年,經(jīng)過二十年潛心發(fā)展,已由一家北大畢業(yè)生自主創(chuàng)業(yè)的信息技術(shù)與教育服務(wù)機構(gòu),發(fā)展為教育服務(wù)業(yè)的綜合性企業(yè)集團,成為集合面授教學(xué)培訓(xùn)、網(wǎng)
達(dá)內(nèi)教育集團成立于2002年,是一家由留學(xué)海歸創(chuàng)辦的高端職業(yè)教育培訓(xùn)機構(gòu),是中國一站式人才培養(yǎng)平臺、一站式人才輸送平臺。2014年4月3日在美國成功上市,融資1
曾工作于聯(lián)想擔(dān)任系統(tǒng)開發(fā)工程師,曾在博彥科技股份有限公司擔(dān)任項目經(jīng)理從事移動互聯(lián)網(wǎng)管理及研發(fā)工作,曾創(chuàng)辦藍(lán)懿科技有限責(zé)任公司從事總經(jīng)理職務(wù)負(fù)責(zé)iOS教學(xué)及管理工作。
浪潮集團項目經(jīng)理。精通Java與.NET 技術(shù), 熟練的跨平臺面向?qū)ο箝_發(fā)經(jīng)驗,技術(shù)功底深厚。 授課風(fēng)格 授課風(fēng)格清新自然、條理清晰、主次分明、重點難點突出、引人入勝。
精通HTML5和CSS3;Javascript及主流js庫,具有快速界面開發(fā)的能力,對瀏覽器兼容性、前端性能優(yōu)化等有深入理解。精通網(wǎng)頁制作和網(wǎng)頁游戲開發(fā)。
具有10 年的Java 企業(yè)應(yīng)用開發(fā)經(jīng)驗。曾經(jīng)歷任德國Software AG 技術(shù)顧問,美國Dachieve 系統(tǒng)架構(gòu)師,美國AngelEngineers Inc. 系統(tǒng)架構(gòu)師。