TensorFlowDeepAutoencoder icon indicating copy to clipboard operation
TensorFlowDeepAutoencoder copied to clipboard

how to save and restore the model?

Open simjs00 opened this issue 8 years ago • 2 comments

i tried saving after the supervised training and restore the model , it gave an error of Attempting to use uninitialized value autoencoder_variables/biases3_2. Almost the same thing happen when i try to save the pretrained network after pretraining is done.

simjs00 avatar Aug 31 '17 05:08 simjs00

If you want an advice, I recommend you to provide more details. For example, the most important lines of code.

Also, try looking for this problem at the StackOverflow. I think that your problem is in the way you save and restore it, but not in this particular model.

Svito-zar avatar Aug 31 '17 06:08 Svito-zar

[this is what i did, first i load the graph and restore the value from the checkpoint and construct the auto encoder network with the same training shape parameter , then try to do evaluation for the correct label of mnist datasets. error raised : Attempting to use uninitialized value autoencoder_variables/biases4_2

def do_eval(sess, eval_correct,images_placeholder, labels_placeholder, data_set): true_count = 0 # Counts the number of correct predictions. steps_per_epoch = data_set.num_examples // FLAGS.batch_size num_examples = steps_per_epoch * FLAGS.batch_size for step in range(steps_per_epoch): feed_dict = fill_feed_dict(data_set, images_placeholder, labels_placeholder) true_count += sess.run(eval_correct, feed_dict=feed_dict) precision = true_count / num_examples print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' % (num_examples, true_count, precision))

def evaluation(logits, labels): return tf.reduce_sum(tf.cast(correct, tf.int32))

def test_nets(self): data = read_data_sets(FLAGS.data_dir) ckpt = tf.train.get_checkpoint_state("model_sps_2017-08-29_11:45:25") sess = tf.InteractiveSession() saver = tf.train.import_meta_graph('model_sps_2017-08-29_11:45:25/model.meta') saver.restore(sess, ckpt.model_checkpoint_path) with sess.as_default():

    ae_shape = [784, 2000, 2000, 2000, 10]
    ae = AutoEncoder(ae_shape, sess)

    input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size,
                                         FLAGS.image_pixels),name='input_pl')
    sup_net = ae.supervised_net(input_pl)
    data = read_data_sets(FLAGS.data_dir)
    labels_placeholder = tf.placeholder(tf.int32,
                                      shape=FLAGS.batch_size,
                                      name='target_pl')
    eval_correct = evaluation(sup_net, labels_placeholder)
    do_eval(sess,
      eval_correct,
      input_pl,
      labels_placeholder,
      data.test)

this is how train and save the code def main_supervised(ae):

with ae.session.graph.as_default(): saver = tf.train.Saver() sess = ae.session input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, FLAGS.image_pixels), name='input_pl') logits = ae.supervised_net(input_pl)

data = read_data_sets(FLAGS.data_dir)
num_train = data.train.num_examples

labels_placeholder = tf.placeholder(tf.int32,
                                    shape=FLAGS.batch_size,
                                    name='target_pl')

loss = loss_supervised(logits, labels_placeholder)
train_op, global_step = training(loss, FLAGS.supervised_learning_rate)
eval_correct = evaluation(logits, labels_placeholder)

hist_summaries = [ae['biases{0}'.format(i + 1)]
                  for i in range(ae.num_hidden_layers + 1)]
hist_summaries.extend([ae['weights{0}'.format(i + 1)]
                       for i in range(ae.num_hidden_layers + 1)])

hist_summaries = [tf.summary.histogram(v.op.name + "_fine_tuning", v)
                  for v in hist_summaries]
summary_op = tf.summary.merge(hist_summaries)

summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph_def)
# tf.train.SummaryWriter(pjoin(FLAGS.summary_dir,
#                                               'fine_tuning'),
#                                         graph_def=sess.graph_def,
#                                         flush_secs=FLAGS.flush_secs)

vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1)
vars_to_init.append(global_step)
#sess.run(tf.initialize_variables(vars_to_init))
init = tf.initialize_all_variables() 
sess.run(init)
steps =  (num_train//FLAGS.batch_size)
for k in range(1):
  for step in range(1):
    start_time = time.time()

    feed_dict = fill_feed_dict(data.train,
                               input_pl,
                               labels_placeholder)

    _, loss_value = sess.run([train_op, loss],
                             feed_dict=feed_dict)

    duration = time.time() - start_time

    # Write the summaries and print an overview fairly often.
    if step % 1 == 0:
      # Print status to stdout.
      print('Step %d/%d: loss = %.2f (%.3f sec)' % (step, steps,loss_value, duration))
      # Update the events file.

      summary_str = sess.run(summary_op, feed_dict=feed_dict)
      summary_writer.add_summary(summary_str, step)
      # summary_img_str = sess.run(
      #     tf.summary.image("training_images",
      #                      tf.reshape(input_pl,
      #                                 (FLAGS.batch_size,
      #                                  FLAGS.image_size,
      #                                  FLAGS.image_size, 1)),
      #                      max_outputs=10),
      #     feed_dict=feed_dict
      # )
      # summary_writer.add_summary(summary_img_str)

    if (step + 1) % 1000 == 0 or (step + 1) == steps:
      train_sum = do_eval_summary("training_error",
                                  sess,
                                  eval_correct,
                                  input_pl,
                                  labels_placeholder,
                                  data.train)

      val_sum = do_eval_summary("validation_error",
                                sess,
                                eval_correct,
                                input_pl,
                                labels_placeholder,
                                data.validation)

      test_sum = do_eval_summary("test_error",
                                 sess,
                                 eval_correct,
                                 input_pl,
                                 labels_placeholder,
                                 data.test)

      summary_writer.add_summary(train_sum, step)
      summary_writer.add_summary(val_sum, step)
      summary_writer.add_summary(test_sum, step)

folder = "model_sps_"+str(strftime("%Y-%m-%d_%H:%M:%S", gmtime()))
os.mkdir(folder)
folder += "/model"
saver.save(sess, folder)
do_eval(sess,
    eval_correct,
    input_pl,
    labels_placeholder,
    data.test)

simjs00 avatar Aug 31 '17 07:08 simjs00