In [ ]:
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
In [ ]:
!nvidia-smi
Fri Jun 11 14:27:06 2021       
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 465.27       Driver Version: 460.32.03    CUDA Version: 11.2     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|                               |                      |               MIG M. |
|===============================+======================+======================|
|   0  Tesla V100-SXM2...  Off  | 00000000:00:04.0 Off |                    0 |
| N/A   45C    P0    27W / 300W |      0MiB / 16160MiB |      0%      Default |
|                               |                      |                  N/A |
+-------------------------------+----------------------+----------------------+
                                                                               
+-----------------------------------------------------------------------------+
| Processes:                                                                  |
|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
|        ID   ID                                                   Usage      |
|=============================================================================|
|  No running processes found                                                 |
+-----------------------------------------------------------------------------+

Configuration

In [ ]:
RUN_TRAINING = True
LOAD_PREV_MODEL = False
RUN_TESTING = True
In [ ]:
if RUN_TRAINING and LOAD_PREV_MODEL:
  raise Exception('Redundant training is being run. Change RUN_TRAINING or LOAD_PREV_MODEL value.')

if not RUN_TRAINING and not LOAD_PREV_MODEL:
  raise Exception('No weights will be loaded for the testing phase. Change RUN_TRAINING or LOAD_PREVIOUS_MODEL.')
In [ ]:
# Variables and hyperparameters for model

BATCH_SIZE = 32
TOTAL_LOSS_VAE_MULTIPLIER = 100
NUM_EPOCHS = 5
LAMBDA = 1e-3
LEARNING_RATE = 0.0001
In [ ]:
# Necessary tensorflow addons for triplet loss
!pip install -q -U tensorflow-addons
     |████████████████████████████████| 686kB 15.1MB/s 
In [ ]:
# For saving models
!pip install -q pyyaml h5py

Architecture setup

In [ ]:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import io
import tensorflow_addons as tfa
import os
In [ ]:
vae = keras.models.load_model('/content/drive/MyDrive/saved_model')
vae.build((None,256,256,3))
WARNING:tensorflow:SavedModel saved prior to TF 2.5 detected when loading Keras model. Please ensure that you are saving the model with model.save() or tf.keras.models.save_model(), *NOT* tf.saved_model.save(). To confirm, there should be a file named "keras_metadata.pb" in the SavedModel directory.
In [ ]:
class ConvexCombination(layers.Layer):
    '''
    Custom layer for convex combination.
    
    Input: 2 x None x 256 x 256 x 3 tensor, where [0] is batch of original vehicle images and [1] is a batch
    of vehicle images with salient features highlighted by the VAE.
    
    Output: None x 256 x 256 x 3 tensor (a batch of convex-combined vehicle images)
    '''
    def __init__(self):
        super(ConvexCombination, self).__init__()
        init_weight_tensor = tf.convert_to_tensor(0.01)
        self.w = tf.Variable(initial_value=init_weight_tensor, trainable=True)
        
    def call(self, inputs):
        clipped_w = tf.clip_by_value(self.w, 0, 1)
        orig_img_part = clipped_w * inputs[0]
        recon_img_part = (1 - clipped_w) * inputs[1]
        return orig_img_part + recon_img_part
In [ ]:
# Import the pre-trained ResNet50 model
resnet50 = tf.keras.applications.ResNet50(include_top=False, input_shape=(256,256,3), weights=None)
In [ ]:
def load_aic21():
  '''
  Loads AIC21 onto local disk.
  '''
  if not (tf.io.gfile.exists('/content/AIC21')):
    !mkdir /content/AIC21
    print("Zipping AIC21...")
    !zip -FF /content/drive/MyDrive/AIC21/AIC21_Track2_ReID.zip --out /content/AIC21/AIC21_Track2_ReID_full.zip
    %cd /content/AIC21
    print("Unzipping AIC21...")
    !unzip -FF -q AIC21_Track2_ReID_full.zip
    print("Unzipping AIC21 done!")
  return "/content/AIC21/AIC21_Track2_ReID"
In [ ]:
from IPython.display import clear_output

train_dir = load_aic21()
clear_output()

already_run = False
In [ ]:
if not already_run:
  %cd /content/AIC21/AIC21_Track2_ReID/image_train/

  # Copy the script which creates the AIC21 image folders into the correct location
  !cp /content/drive/MyDrive/AIC21/format_aic21_ids_linux.sh /content/AIC21/AIC21_Track2_ReID/image_train

  !chmod 777 /content/AIC21/AIC21_Track2_ReID/image_train/format_aic21_ids_linux.sh

  !/content/AIC21/AIC21_Track2_ReID/image_train/./format_aic21_ids_linux.sh

  already_run = True
/content/AIC21/AIC21_Track2_ReID/image_train
In [ ]:
class ReId(keras.Model):
    def __init__(self, vae, num_ids, **kwargs):
        super(ReId, self).__init__(**kwargs)
        self.vae = vae
        self.convex_combination = ConvexCombination()
        self.resnet_50 = resnet50
        self.glob_avg_pool = keras.layers.GlobalAveragePooling2D()
        self.bnneck = keras.layers.BatchNormalization()
        self.final_fc_layer = keras.layers.Dense(num_ids)
        self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
        self.vae_loss_tracker = keras.metrics.Mean(
            name="vae_loss"
        )
        self.classification_loss_tracker = keras.metrics.Mean(name="classification_loss")
        self.triplet_loss_tracker = keras.metrics.Mean(name="triplet_loss")

    @property
    def metrics(self):
        return [
            self.total_loss_tracker,
            self.vae_loss_tracker,
            # Classification loss is cross-entropy loss here
            self.classification_loss_tracker,
            self.triplet_loss_tracker,
        ]

    def train_step(self, data):
        x, y = data
        y = tf.cast(y, tf.int32)
        with tf.GradientTape() as tape:
            # Calculate VAE loss
            z_mean, z_log_var, z = self.vae.encoder(x)
            reconstruction = self.vae.decoder(z)
            reconstruction_loss = tf.reduce_mean(
                    keras.losses.mean_squared_error(x, reconstruction)
            )
            kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
            kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
            
            # Calculate VAE loss
            vae_loss = reconstruction_loss + (LAMBDA * kl_loss)
            
            embedding_output = self.call(x)
            bnneck_output = self.bnneck(embedding_output)
            training_output = self.final_fc_layer(bnneck_output)

            # Calculate triplet loss
            triplet_loss = tfa.losses.triplet_semihard_loss(y_true=y, y_pred=embedding_output)

            # Calculate cross-entropy loss
            ce_loss = keras.losses.sparse_categorical_crossentropy(y, training_output)
            
            # Sum up the losses
            total_loss = triplet_loss + ce_loss + (TOTAL_LOSS_VAE_MULTIPLIER * vae_loss)
            
        grads = tape.gradient(total_loss, self.trainable_weights)
        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
        self.total_loss_tracker.update_state(total_loss)
        self.classification_loss_tracker.update_state(ce_loss)
        self.triplet_loss_tracker.update_state(triplet_loss)
        self.vae_loss_tracker.update_state(vae_loss)
        return {
            "loss": self.total_loss_tracker.result(),
            "classification loss:": self.classification_loss_tracker.result(),
            "triplet loss:": self.triplet_loss_tracker.result(),
            "vae loss:": self.vae_loss_tracker.result(),
        }

    def call(self, data):
        recon_img = self.vae(data)
        convex_combo_input = [data, (data - recon_img)]
        convex_combo_output = self.convex_combination(convex_combo_input)
        final_output = self.glob_avg_pool(self.resnet_50(convex_combo_output))
        return final_output

Image import

In [ ]:
def normalize_vehicle_img(input_img):
    rescaled_input_img = input_img / 255.0
    output_img = rescaled_input_img - (tf.reduce_mean(rescaled_input_img))
    output_img = output_img / (tf.math.reduce_std(rescaled_input_img))
    output_img = (output_img * 0.5) + 0.5
    return tf.clip_by_value(output_img, 0, 1)
In [ ]:
import tensorflow_datasets as tfds
In [ ]:
import os

def read_data(training_directory):
  '''
  Returns a three-tuple. The first item is a list of lists of paths to images,
  the second is a list of lists of labels, and the third is a dictionary mapping
  the class directory names to the labels in the second list.

  Parameters:
    training_directory: Path to directory containing directories of images where
                        each directory only contains images of one ID.
  '''
  image_list_final = []
  label_list_final = []
  label_map_dict = {}
  count_label = 0

  # Iterate through each class directory within training directory
  for class_name in os.listdir(training_directory):
    image_list = []
    label_list = []

    # Get path of class directory
    class_path = os.path.join(training_directory, class_name)

    # Ensure path is a directory and not a file
    if not (os.path.isdir(class_path)):
      continue

    # Entering name of directory as key in dictionary returns the index of the
    # class
    label_map_dict[class_name] = count_label

    # Iterate through each image in the class directory
    for image_name in os.listdir(class_path):
      # Get path of the image
      image_path = os.path.join(class_path, image_name)

      # Add index of the class to the label list
      label_list.append(count_label)

      # Add path to image to the image list
      image_list.append(image_path)

    count_label += 1

    # Add the new lists to the final lists
    image_list_final.append(image_list)
    label_list_final.append(label_list)

  return image_list_final, label_list_final, label_map_dict


def _parse_function(filename, label):
  '''
  Function used to parse files into dataset.
  '''
  image_string = tf.io.read_file(filename, "file_reader")
  image_decoded = tf.image.decode_jpeg(image_string, channels=3)
  image = tf.cast(image_decoded, tf.float32)
  image = tf.image.resize(image, (256,256))
  image = normalize_vehicle_img(image)
  return image, label


def gen_list_of_id_datasets(training_directory):
  '''
  Returns a list of datasets where each dataset contains the images from a
  single ID.

  Parameters:
    training_directory: Path to directory containing directories of images where
                        each directory only contains images of one ID.
  '''
  image_list_final, label_list_final, label_map_dict = read_data(training_directory)
  
  dataset_list = []
  for i,j in zip(image_list_final, label_list_final):
    dataset = tf.data.Dataset.from_tensor_slices((tf.constant(i), tf.constant(j)))
    dataset = dataset.shuffle(len(i))
    #dataset = dataset.repeat(epochs)
    dataset = dataset.map(_parse_function)
    dataset_list.append(dataset)

  img_count = sum( [ len(inner_lst) for inner_lst in image_list_final])
  print("Created datasets for", str(len(dataset_list)), "IDs, containing a total of", str(img_count), "images.")

  return dataset_list
In [ ]:
import random

def gen_list_of_equalcut_datasets(training_directory, N, split=1.0):
  '''
  Returns a list of datasets where each dataset contains the images from a
  single ID.

  Parameters:
    training_directory: Path to directory containing directories of images where
                        each directory only contains images of one ID.
    N:                  Factor to divide BATCH_SIZE by in order to determine how many instances of each ID will exist
                        within a batch.
    split=1.0:          Proportion of the dataset to take images from for cases where a smaller sample size is needed.
  '''
  # Calculate how many instances of each ID should be within each batch.
  K = int(BATCH_SIZE / N)

  # Get list of image file names, label indeces and a dictionary linking indices to class names
  image_list_final, label_list_final, label_map_dict = read_data(training_directory)

  # Create dictionary to map labels to paths
  id_split_dict = {}
  for i,j in zip(image_list_final, label_list_final):
    for a,b in zip(i,j):
      if b in id_split_dict:
        id_split_dict[b].append(a)
      else:
        id_split_dict[b] = [a]

  # Shuffle dataset
  kys = list(id_split_dict.keys())
  random.shuffle(kys)

  # Value to keep track of current index in each of the lists
  i = 0

  # Lists of paths and labels which will be in the final dataset
  final_dataset_paths = []
  final_dataset_labels = []

  while True:
    # Identify the lists which still have images that haven't been added to the dataset yet
    remaining_list_ids = []
    for id in kys:
      if len(id_split_dict[id]) > i:
        remaining_list_ids.append(id)
    
    # Ending condition for loop: when there aren't any (or there's just one) lists left with image paths in them
    if len(remaining_list_ids) <= 1:
      break

    # Concatenate K image paths from each list into the new list of paths
    for id in remaining_list_ids:
      final_dataset_paths.extend((id_split_dict[id])[i:i+K])
      ext_length = len(id_split_dict[id][i:i+K])
      final_dataset_labels.extend([id]*ext_length)

    # Move forward K places
    i += K

  # Create dataset from the list of paths
  dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tuple(final_dataset_paths)), tf.constant(tuple(final_dataset_labels))))

  #dataset = dataset.repeat(epochs)

  # Convert each of the paths into actual images
  dataset = dataset.map(_parse_function).batch(BATCH_SIZE)

  print("Created dataset of length", len(dataset))

  return dataset
In [ ]:
dl = gen_list_of_equalcut_datasets('/content/AIC21/AIC21_Track2_ReID/image_train', 8)
Created dataset of length 1643

Model training

In [ ]:
reid = ReId(vae, 440)
In [ ]:
!mkdir -p /content/tr

checkpoint_path = "/content/training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                 save_weights_only=True,
                                                 verbose=1)
In [ ]:
if RUN_TRAINING:
  reid.compile(optimizer=keras.optimizers.Adam(learning_rate=LEARNING_RATE))
  reid.fit(dl, epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, callbacks=[cp_callback])
Epoch 1/5
1643/1643 [==============================] - 547s 316ms/step - loss: 14.1225 - classification loss:: 8.8753 - triplet loss:: 0.8222 - vae loss:: 0.0443

Epoch 00001: saving model to /content/training_1/cp.ckpt
Epoch 2/5
1643/1643 [==============================] - 514s 313ms/step - loss: 11.1443 - classification loss:: 6.3401 - triplet loss:: 0.2405 - vae loss:: 0.0456

Epoch 00002: saving model to /content/training_1/cp.ckpt
Epoch 3/5
1643/1643 [==============================] - 513s 312ms/step - loss: 10.9159 - classification loss:: 6.1608 - triplet loss:: 0.2383 - vae loss:: 0.0452

Epoch 00003: saving model to /content/training_1/cp.ckpt
Epoch 4/5
1643/1643 [==============================] - 513s 312ms/step - loss: 10.6439 - classification loss:: 6.0914 - triplet loss:: 0.1906 - vae loss:: 0.0436

Epoch 00004: saving model to /content/training_1/cp.ckpt
Epoch 5/5
1643/1643 [==============================] - 513s 312ms/step - loss: 10.6179 - classification loss:: 6.0993 - triplet loss:: 0.1695 - vae loss:: 0.0435

Epoch 00005: saving model to /content/training_1/cp.ckpt
In [ ]:
if RUN_TRAINING:
  # Save the entire model as a SavedModel.
  !mkdir -p /content/saved_model
  reid.build((None,256,256,3))
  reid.save('/content/drive/MyDrive/full_pipeline_vehicle_reid_prev_model/reid_latest')
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-29-1cdc94d7719c> in <module>()
      3   get_ipython().system('mkdir -p /content/saved_model')
      4   reid.build((None,256,256,3))
----> 5   reid.save('/content/drive/MyDrive/full_pipeline_vehicle_reid_prev_model/reid_latest')

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py in save(self, filepath, overwrite, include_optimizer, save_format, signatures, options, save_traces)
   2110     # pylint: enable=line-too-long
   2111     save.save_model(self, filepath, overwrite, include_optimizer, save_format,
-> 2112                     signatures, options, save_traces)
   2113 
   2114   def save_weights(self,

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/save.py in save_model(model, filepath, overwrite, include_optimizer, save_format, signatures, options, save_traces)
    149     with generic_utils.SharedObjectSavingScope():
    150       saved_model_save.save(model, filepath, overwrite, include_optimizer,
--> 151                             signatures, options, save_traces)
    152 
    153 

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/saved_model/save.py in save(model, filepath, overwrite, include_optimizer, signatures, options, save_traces)
     88     with utils.keras_option_scope(save_traces):
     89       saved_nodes, node_paths = save_lib.save_and_return_nodes(
---> 90           model, filepath, signatures, options)
     91 
     92     # Save all metadata to a separate file in the SavedModel directory.

/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/save.py in save_and_return_nodes(obj, export_dir, signatures, options, raise_metadata_warning, experimental_skip_checkpoint)
   1102   _, exported_graph, object_saver, asset_info, saved_nodes, node_paths = (
   1103       _build_meta_graph(obj, signatures, options, meta_graph_def,
-> 1104                         raise_metadata_warning))
   1105   saved_model.saved_model_schema_version = constants.SAVED_MODEL_SCHEMA_VERSION
   1106 

/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/save.py in _build_meta_graph(obj, signatures, options, meta_graph_def, raise_metadata_warning)
   1289   with save_context.save_context(options):
   1290     return _build_meta_graph_impl(obj, signatures, options, meta_graph_def,
-> 1291                                   raise_metadata_warning)

/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/save.py in _build_meta_graph_impl(obj, signatures, options, meta_graph_def, raise_metadata_warning)
   1206   if signatures is None:
   1207     signatures = signature_serialization.find_function_to_export(
-> 1208         checkpoint_graph_view)
   1209 
   1210   signatures, wrapped_functions = (

/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/signature_serialization.py in find_function_to_export(saveable_view)
     97   # If the user did not specify signatures, check the root object for a function
     98   # that can be made into a signature.
---> 99   functions = saveable_view.list_functions(saveable_view.root)
    100   signature = functions.get(DEFAULT_SIGNATURE_ATTR, None)
    101   if signature is not None:

/usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/save.py in list_functions(self, obj)
    153     if obj_functions is None:
    154       obj_functions = obj._list_functions_for_serialization(  # pylint: disable=protected-access
--> 155           self._serialization_cache)
    156       self._functions[obj] = obj_functions
    157     return obj_functions

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py in _list_functions_for_serialization(self, serialization_cache)
   2712     self.predict_function = None
   2713     functions = super(
-> 2714         Model, self)._list_functions_for_serialization(serialization_cache)
   2715     self.train_function = train_function
   2716     self.test_function = test_function

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/base_layer.py in _list_functions_for_serialization(self, serialization_cache)
   3015   def _list_functions_for_serialization(self, serialization_cache):
   3016     return (self._trackable_saved_model_saver
-> 3017             .list_functions_for_serialization(serialization_cache))
   3018 
   3019   @property

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/saved_model/base_serialization.py in list_functions_for_serialization(self, serialization_cache)
     90       return {}
     91 
---> 92     fns = self.functions_to_serialize(serialization_cache)
     93 
     94     # The parent AutoTrackable class saves all user-defined tf.functions, and

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/saved_model/layer_serialization.py in functions_to_serialize(self, serialization_cache)
     72   def functions_to_serialize(self, serialization_cache):
     73     return (self._get_serialized_attributes(
---> 74         serialization_cache).functions_to_serialize)
     75 
     76   def _get_serialized_attributes(self, serialization_cache):

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/saved_model/layer_serialization.py in _get_serialized_attributes(self, serialization_cache)
     88 
     89     object_dict, function_dict = self._get_serialized_attributes_internal(
---> 90         serialization_cache)
     91 
     92     serialized_attr.set_and_validate_objects(object_dict)

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/saved_model/model_serialization.py in _get_serialized_attributes_internal(self, serialization_cache)
     46     # cache (i.e. this is the root level object).
     47     if len(serialization_cache[constants.KERAS_CACHE_KEY]) == 1:
---> 48       default_signature = save_impl.default_save_signature(self.obj)
     49 
     50     # Other than the default signature function, all other attributes match with

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/saved_model/save_impl.py in default_save_signature(layer)
    213 def default_save_signature(layer):
    214   original_losses = _reset_layer_losses(layer)
--> 215   fn = saving_utils.trace_model_call(layer)
    216   fn.get_concrete_function()
    217   _restore_layer_losses(original_losses)

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/saving_utils.py in trace_model_call(model, input_signature)
    117 
    118   if input_signature is None:
--> 119     raise_model_input_error(model)
    120 
    121   @def_function.function(input_signature=input_signature)

/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/saving/saving_utils.py in raise_model_input_error(model)
     92       'set. Usually, input shapes are automatically determined from calling'
     93       ' `.fit()` or `.predict()`. To manually set the shapes, call '
---> 94       '`model.build(input_shape)`.'.format(model))
     95 
     96 

ValueError: Model <__main__.ReId object at 0x7fe554658590> cannot be saved because the input shapes have not been set. Usually, input shapes are automatically determined from calling `.fit()` or `.predict()`. To manually set the shapes, call `model.build(input_shape)`.
In [ ]:
reid.summary()
In [ ]:

Loading the Testing Set

In [ ]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator

datagen = ImageDataGenerator(rescale=1.0/255.0,samplewise_center=True, samplewise_std_normalization=True)
test_it = datagen.flow_from_directory(train_dir, classes=['image_test'], batch_size=BATCH_SIZE,
                                       class_mode=None)

Loading Previous Model

In [ ]:
if LOAD_PREV_MODEL:
  reid. = keras.models.load_model('/content/drive/MyDrive/full_pipeline_vehicle_reid_prev_model/reid_09_06_21_12_51/reid_09_06_21_12_51.data-00000-of-00001')

Testing with two vehicle images

In [ ]:
def show_imgs(arr):
  '''
  Shows multiple images in an array of images.

  Parameters:
    arr:  Array of images
  '''
  for img in arr:
    plt.figure()
    plt.imshow(img)
In [ ]:
img1 = test_it[0][7]
img2 = test_it[0][11]
show_imgs([img1,img2])
In [ ]:
from datetime import datetime

before = datetime.now()

pred1 = reid.predict(np.expand_dims(img1, axis=0))

after = datetime.now()

print("Running pred1 took",(after-before))

from datetime import datetime

before = datetime.now()

pred2 = reid.predict(np.expand_dims(img2, axis=0))


after = datetime.now()

print("Running pred2 took",(after-before))
In [ ]:

In [ ]:
# The image to query the gallery set with
gallery_test_img = test_it[0][8]
show_imgs([gallery_test_img])

# Pass query image through model
query_embedding = reid.predict(np.expand_dims(gallery_test_img, axis=0))
In [ ]:
preds = reid.predict(test_it, workers=1, use_multiprocessing=False, verbose=1)
preds.shape
In [ ]:
# Calculate the Euclidean distances for each image from the query image
euc_distances = []

i = 0
for pred in preds:
  dist = query_embedding - pred
  euc_distance = np.linalg.norm(dist)
  euc_distances.append((euc_distance, i))

  i += 1
In [ ]:
def euc_sort(e):
  return e[0]

euc_distances.sort(key=euc_sort)
In [ ]:
euc_distances[0:10]
In [ ]:
tst_img = test_it[375][31]
show_imgs([tst_img])

Debugging

In [ ]:
import math

nan_ratios = []

print("Starting...")
for weight in reid.weights:
    num_nans = 0
    num_weights = 0
    for single_weight in weight.numpy().flatten():
        num_weights += 1
        if math.isnan(single_weight):
            num_nans += 1
    nan_ratios.append(num_nans / num_weights)

import matplotlib.pyplot as plt
plt.plot(nan_ratios)
plt.ylabel('some numbers')
plt.show()