|
| 1 | +import os |
| 2 | +import numpy as np |
| 3 | +import pandas as pd |
| 4 | +import tensorflow as tf |
| 5 | +import matplotlib.pyplot as plt |
| 6 | +import cPickle as pickle |
| 7 | +import copy |
| 8 | +import json |
| 9 | +from tqdm import tqdm |
| 10 | + |
| 11 | +from utils.nn import NN |
| 12 | +from utils.coco.coco import COCO |
| 13 | +from utils.coco.pycocoevalcap.eval import COCOEvalCap |
| 14 | +from utils.misc import ImageLoader, CaptionData, TopN |
| 15 | + |
| 16 | +class BaseModel(object): |
| 17 | + def __init__(self, config): |
| 18 | + self.config = config |
| 19 | + self.is_train = True if config.phase == 'train' else False |
| 20 | + self.train_cnn = self.is_train and config.train_cnn |
| 21 | + self.image_loader = ImageLoader('./DeepRNN/utils/ilsvrc_2012_mean.npy') |
| 22 | + self.image_shape = [224, 224, 3] |
| 23 | + self.nn = NN(config) |
| 24 | + self.global_step = tf.Variable(0, |
| 25 | + name = 'global_step', |
| 26 | + trainable = False) |
| 27 | + self.build() |
| 28 | + |
| 29 | + def build(self): |
| 30 | + raise NotImplementedError() |
| 31 | + |
| 32 | + def test(self, sess, test_data, vocabulary): |
| 33 | + """ Test the model using any given images. """ |
| 34 | + config = self.config |
| 35 | + |
| 36 | + # Generate the captions for the images |
| 37 | + for k in tqdm(list(range(test_data.num_batches)), desc='path'): |
| 38 | + batch = test_data.next_batch() |
| 39 | + caption_data = self.beam_search(sess, batch, vocabulary) |
| 40 | + |
| 41 | + fake_cnt = 0 if k<test_data.num_batches-1 \ |
| 42 | + else test_data.fake_count |
| 43 | + for l in range(test_data.batch_size-fake_cnt): |
| 44 | + word_idxs = caption_data[l][0].sentence |
| 45 | + score = caption_data[l][0].score |
| 46 | + caption = vocabulary.get_sentence(word_idxs) |
| 47 | + print('**'+caption+'**') |
| 48 | + |
| 49 | + def beam_search(self, sess, image_files, vocabulary): |
| 50 | + """Use beam search to generate the captions for a batch of images.""" |
| 51 | + # Feed in the images to get the contexts and the initial LSTM states |
| 52 | + config = self.config |
| 53 | + images = self.image_loader.load_images(image_files) |
| 54 | + contexts, initial_memory, initial_output = sess.run( |
| 55 | + [self.conv_feats, self.initial_memory, self.initial_output], |
| 56 | + feed_dict = {self.images: images}) |
| 57 | + |
| 58 | + partial_caption_data = [] |
| 59 | + complete_caption_data = [] |
| 60 | + for k in range(config.batch_size): |
| 61 | + initial_beam = CaptionData(sentence = [], |
| 62 | + memory = initial_memory[k], |
| 63 | + output = initial_output[k], |
| 64 | + score = 1.0) |
| 65 | + partial_caption_data.append(TopN(config.beam_size)) |
| 66 | + partial_caption_data[-1].push(initial_beam) |
| 67 | + complete_caption_data.append(TopN(config.beam_size)) |
| 68 | + |
| 69 | + # Run beam search |
| 70 | + for idx in range(config.max_caption_length): |
| 71 | + partial_caption_data_lists = [] |
| 72 | + for k in range(config.batch_size): |
| 73 | + data = partial_caption_data[k].extract() |
| 74 | + partial_caption_data_lists.append(data) |
| 75 | + partial_caption_data[k].reset() |
| 76 | + |
| 77 | + num_steps = 1 if idx == 0 else config.beam_size |
| 78 | + for b in range(num_steps): |
| 79 | + if idx == 0: |
| 80 | + last_word = np.zeros((config.batch_size), np.int32) |
| 81 | + else: |
| 82 | + last_word = np.array([pcl[b].sentence[-1] |
| 83 | + for pcl in partial_caption_data_lists], |
| 84 | + np.int32) |
| 85 | + |
| 86 | + last_memory = np.array([pcl[b].memory |
| 87 | + for pcl in partial_caption_data_lists], |
| 88 | + np.float32) |
| 89 | + last_output = np.array([pcl[b].output |
| 90 | + for pcl in partial_caption_data_lists], |
| 91 | + np.float32) |
| 92 | + |
| 93 | + memory, output, scores = sess.run( |
| 94 | + [self.memory, self.output, self.probs], |
| 95 | + feed_dict = {self.contexts: contexts, |
| 96 | + self.last_word: last_word, |
| 97 | + self.last_memory: last_memory, |
| 98 | + self.last_output: last_output}) |
| 99 | + |
| 100 | + # Find the beam_size most probable next words |
| 101 | + for k in range(config.batch_size): |
| 102 | + caption_data = partial_caption_data_lists[k][b] |
| 103 | + words_and_scores = list(enumerate(scores[k])) |
| 104 | + words_and_scores.sort(key=lambda x: -x[1]) |
| 105 | + words_and_scores = words_and_scores[0:config.beam_size+1] |
| 106 | + |
| 107 | + # Append each of these words to the current partial caption |
| 108 | + for w, s in words_and_scores: |
| 109 | + sentence = caption_data.sentence + [w] |
| 110 | + score = caption_data.score * s |
| 111 | + beam = CaptionData(sentence, |
| 112 | + memory[k], |
| 113 | + output[k], |
| 114 | + score) |
| 115 | + if vocabulary.words[w] == '.': |
| 116 | + complete_caption_data[k].push(beam) |
| 117 | + else: |
| 118 | + partial_caption_data[k].push(beam) |
| 119 | + |
| 120 | + results = [] |
| 121 | + for k in range(config.batch_size): |
| 122 | + if complete_caption_data[k].size() == 0: |
| 123 | + complete_caption_data[k] = partial_caption_data[k] |
| 124 | + results.append(complete_caption_data[k].extract(sort=True)) |
| 125 | + |
| 126 | + return results |
| 127 | + |
| 128 | + def load(self, sess, model_file=None): |
| 129 | + """ Load the model. """ |
| 130 | + config = self.config |
| 131 | + if model_file is not None: |
| 132 | + save_path = model_file |
| 133 | + else: |
| 134 | + info_path = os.path.join(config.save_dir, "config.pickle") |
| 135 | + info_file = open(info_path, "rb") |
| 136 | + config = pickle.load(info_file) |
| 137 | + global_step = config.global_step |
| 138 | + info_file.close() |
| 139 | + save_path = os.path.join(config.save_dir, |
| 140 | + str(global_step)+".npy") |
| 141 | + |
| 142 | + data_dict = np.load(save_path).item() |
| 143 | + count = 0 |
| 144 | + for v in tqdm(tf.global_variables()): |
| 145 | + if v.name in data_dict.keys(): |
| 146 | + sess.run(v.assign(data_dict[v.name])) |
| 147 | + count += 1 |
| 148 | + |
| 149 | + def load_cnn(self, session, data_path, ignore_missing=True): |
| 150 | + """ Load a pretrained CNN model. """ |
| 151 | + data_dict = np.load(data_path).item() |
| 152 | + count = 0 |
| 153 | + for op_name in tqdm(data_dict): |
| 154 | + with tf.variable_scope(op_name, reuse = True): |
| 155 | + for param_name, data in data_dict[op_name].iteritems(): |
| 156 | + try: |
| 157 | + var = tf.get_variable(param_name) |
| 158 | + session.run(var.assign(data)) |
| 159 | + count += 1 |
| 160 | + except ValueError: |
| 161 | + pass |
0 commit comments