Skip to content

Commit

Permalink
Merge pull request #740 from ufal/three-wmt18-in-one
Browse files Browse the repository at this point in the history
Three wmt18-related changes in one
  • Loading branch information
jlibovicky authored Jul 19, 2018
2 parents dbf7d60 + 030f636 commit 21af0fe
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 4 deletions.
11 changes: 10 additions & 1 deletion neuralmonkey/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,16 @@ def load_variables(self, variable_files: List[str] = None) -> None:
self.build_model()

if variable_files is None:
variable_files = [self.get_path("variables.data")]
if os.path.exists(self.get_path("variables.data.avg-0.index")):
variable_files = [self.get_path("variables.data.avg-0")]
elif os.path.exists(self.get_path("variables.data.avg.index")):
variable_files = [self.get_path("variables.data.avg")]
else:
best_var_file = self.get_path("variables.data.best")
with open(best_var_file, "r") as f_best:
var_path = f_best.read().rstrip()
variable_files = [self.get_path(var_path)]

log("Default variable file '{}' will be used for loading "
"variables.".format(variable_files[0]))

Expand Down
5 changes: 5 additions & 0 deletions neuralmonkey/trainers/generic_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,11 @@ def __init__(self,
tf.summary.scalar("train_l2", l2_value,
collections=["summary_train"])

# log all objectives
for obj in objectives:
tf.summary.scalar(
obj.name, obj.loss, collections=["summary_train"])

# if the objective does not have its own gradients,
# just use TF to do the derivative
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
Expand Down
14 changes: 11 additions & 3 deletions scripts/imagenet_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,19 @@ def main():
"repository")
parser.add_argument("--model-checkpoint", type=str, required=True,
help="Path to the ImageNet model checkpoint.")
parser.add_argument("--conv-map", type=str, required=True,
parser.add_argument("--conv-map", type=str, required=False, default=None,
help="Name of the convolutional map that is.")
parser.add_argument("--vector", type=str, required=False, default=None,
help="Name of the feed-forward layer.")
parser.add_argument("--images", type=str,
help="File with paths to images or stdin by default.")
parser.add_argument("--batch-size", type=int, default=128)
args = parser.parse_args()

if args.conv_map is None == args.vector is None:
raise ValueError(
"You must provide either convolutional map or feed-forward layer.")

if not os.path.exists(args.input_prefix):
raise ValueError("Directory {} does not exist.".format(
args.input_prefix))
Expand All @@ -67,7 +73,7 @@ def main():
imagenet = ImageNet(
name="imagenet", data_id="images", network_type=args.net,
slim_models_path=args.slim_models, load_checkpoint=args.model_checkpoint,
spatial_layer=args.conv_map)
spatial_layer=args.conv_map, encoded_layer=args.vector)

log("Creating TensorFlow session.")
session = tf.Session()
Expand All @@ -87,7 +93,9 @@ def main():
def process_images():
dataset = Dataset("dataset", {"images": np.array(images)}, {})
feed_dict = imagenet.feed_dict(dataset)
feature_maps = session.run(imagenet.spatial_states, feed_dict=feed_dict)

fetch = imagenet.encoded if args.vector else imagenet.spatial_states
feature_maps = session.run(fetch, feed_dict=feed_dict)

for features, rel_path in zip(feature_maps, image_paths):
npz_path = os.path.join(args.output_prefix, rel_path + ".npz")
Expand Down

0 comments on commit 21af0fe

Please sign in to comment.