Masterful CLI Trainer: Model Output Formats

Introduction

This guide descibes the model output formats supported by Masterful. At the end of this guide, you should know what formats you can save the model into, and how to load those models back into your Python code.

Output Formats

Masterful currently supports the following serialization formats for your model:

Masterful Name

Format

Description

saved_model

Tensorflow Saved Model

A Tensorflow specific serialization format useful for encapsulating the entire model computation graph.

onnx

ONNX

ONNX is an open format built to represent and share machine learning models.

Training vs Inference Model

The model saved by Masterful is an inference model, which includes all of the preprocessing and standardization used during training of the model. Therefore, the input for the model when used during inference should be un-resized, 8-bit integer images in the range [0,255]. This also means the model only excepts unbatched data - specifically single examples or examples with a batch size of 1.

All Masterful models output logits, so in order to get the class probabilities you need to apply a softmax/sigmoid function to the outputs, which you can see below.

Tensorflow Saved Model

A Tensorflow Saved Model is a Tensorflow serialization format that contains a complete TensorFlow program, including trained parameters (i.e, tf.Variables) and computation. It does not require the original model building code to run, which makes it useful for sharing or deploying with TFLite, TensorFlow.js, TensorFlow Serving, or TensorFlow Hub.

How to Load a Saved Model in Tensorflow

Assume you have saved your model in the Tensorflow Saved Model format from Masterful. This creates a directory with all of the required files inside, including things like the saved weights and the protocol buffer definition of the model architecture. For demonstration purposes, you can download the pretrained model for a Hot Dog/Not Hot Dog detector from here. This .tar.gz file contains the saved model output for a model trained using this configuration file on in the public S3 bucket s3://masterful-public/datasets/hot_dog/. This model predicts 1 if the image is a hot dog, and 0 otherwise.

def silence_logging():
  import os
  # Must be set before importing tensorflow.
  os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
  os.environ['TF_CPP_MAX_LOG_LEVEL'] = '0'
  os.environ['TF_CPP_MAX_VLOG_LEVEL'] = '0'

  import tensorflow as tf
  # Monky patch ABSL logging to disable warnings as well.
  from absl import logging as absl_logging

  def _warning(msg, *args, **kwargs):
    pass

  absl_logging.warning = _warning
silence_logging()

import numpy as np
import os
from PIL import Image
import tarfile
import tempfile
import tensorflow as tf
import urllib.request

# Saved model from the Hot Dog/Not Hot Dog dataset
SAVED_MODEL_TAR_GZ = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/saved_model.tar.gz"

# Sample images that you will predict on. Two hot dogs, two not hot dogs.
HOT_DOG_IMAGE_1 = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/824342.jpg"
HOT_DOG_IMAGE_2 = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/838604.jpg"
NOT_HOT_DOG_IMAGE_1 = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/58448.jpg"
NOT_HOT_DOG_IMAGE_2 = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/60069.jpg"

def predict_on_image(inference_fn, image_path: str, threshold: float = 0.5) -> float:
  """Wrapper function to predict the class for a local image path."""
  with Image.open(image_path) as pil_image:
    image = np.array(pil_image)
    logits = inference_fn(args_0=image)['model']
    predictions = tf.nn.sigmoid(logits)

  cls_prediction = predictions[0][0]
  return "hot dog" if cls_prediction > 0.5 else "NOT hot dog"
  
# Save all of the artifacts (saved model, images, etc) into a temporary
# directory. 
with tempfile.TemporaryDirectory() as tempdir:
  # Download and extract the saved model tar.gz  
  file_name, _ = urllib.request.urlretrieve(SAVED_MODEL_TAR_GZ, os.path.join(tempdir, "saved_model.tar.gz"))
  with tarfile.open(file_name) as tar_file:
    tar_file.extractall(tempdir)
  
  # Download the images into the temp directory
  hot_dog_image_1, _ = urllib.request.urlretrieve(HOT_DOG_IMAGE_1, os.path.join(tempdir, "824342.jpg"))
  hot_dog_image_2, _ = urllib.request.urlretrieve(HOT_DOG_IMAGE_2, os.path.join(tempdir, "838604.jpg"))
  not_hot_dog_image_1, _ = urllib.request.urlretrieve(NOT_HOT_DOG_IMAGE_1, os.path.join(tempdir, "58448.jpg"))
  not_hot_dog_image_2, _ = urllib.request.urlretrieve(NOT_HOT_DOG_IMAGE_2, os.path.join(tempdir, "60069.jpg"))
 
  # Load the TF saved model
  model = tf.saved_model.load(os.path.join(tempdir, "saved_model"))
  
  # Get a reference to the inference function for serving.
  inference_fn = model.signatures["serving_default"]

  # Make the predictions on the sample images.
  prediction = predict_on_image(inference_fn, hot_dog_image_1)
  print(f"Image 1: {prediction} ({HOT_DOG_IMAGE_1})")

  prediction = predict_on_image(inference_fn, hot_dog_image_2)
  print(f"Image 2: {prediction} ({HOT_DOG_IMAGE_2})")

  prediction = predict_on_image(inference_fn, not_hot_dog_image_1)
  print(f"Image 3: {prediction} ({NOT_HOT_DOG_IMAGE_1})")

  prediction = predict_on_image(inference_fn, not_hot_dog_image_2)
  print(f"Image 4: {prediction} ({NOT_HOT_DOG_IMAGE_2})")

Running this script will yield the following results in the console:

Image 1: hot dog (https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/824342.jpg)
Image 2: hot dog (https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/838604.jpg)
Image 3: NOT hot dog (https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/58448.jpg)
Image 4: NOT hot dog (https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/60069.jpg)

You can find more details on loading and using Tensorflow Saved Models here.

Open Neural Network Exchange (ONNX)

ONNX is an open format built to represent machine learning models. ONNX defines a common set of operators - the building blocks of machine learning and deep learning models - and a common file format to enable AI developers to use models with a variety of frameworks, tools, runtimes, and compilers.

From the ONNX website:

ONNX provides an open source format for AI models,
both deep learning and traditional ML. It defines
an extensible computation graph model, as well as 
definitions of built-in operators and standard data
types. Currently we focus on the capabilities needed
for inferencing (scoring).

ONNX is widely supported and can be found in many
frameworks, tools, and hardware.

How to load an ONNX model

ONNX provides an inference runtime engine that makes it easy to use ONNX models for inference. The ONNX Runtime can natively run your model on a variety of different platforms. For more details on using an ONNX model in the ONNX runtime environment, please see Inference using ONNX Runtime

The following snippet shows how to make predictions using the ONNX model saved training the Hot Dog/Not Hot Dog model above.

import numpy as np
import onnx
import onnxruntime
import os
from PIL import Image
from scipy.stats import logistic
import tarfile
import tempfile
import urllib.request

# Saved model from the Hot Dog/Not Hot Dog dataset
ONNX_MODEL_TAR_GZ = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/onnx.tar.gz"

# Sample images that you will predict on. Two hot dogs, two not hot dogs.
HOT_DOG_IMAGE_1 = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/824342.jpg"
HOT_DOG_IMAGE_2 = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/838604.jpg"
NOT_HOT_DOG_IMAGE_1 = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/58448.jpg"
NOT_HOT_DOG_IMAGE_2 = "https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/60069.jpg"

def predict_on_image(session, image_path: str, threshold: float = 0.5) -> float:
  """Wrapper function to predict the class for a local image path."""
  with Image.open(image_path) as pil_image:
    image = np.array(pil_image)
    logits = session.run({}, {"args_0": image})[0]
    predictions = logistic.cdf(logits)

  cls_prediction = predictions[0][0]
  return "hot dog" if cls_prediction > 0.5 else "NOT hot dog"
  
# Save all of the artifacts (saved model, images, etc) into a temporary
# directory. 
with tempfile.TemporaryDirectory() as tempdir:
  # Download and extract the saved model tar.gz  
  file_name, _ = urllib.request.urlretrieve(ONNX_MODEL_TAR_GZ, os.path.join(tempdir, "onnx.tar.gz"))
  with tarfile.open(file_name) as tar_file:
    tar_file.extractall(tempdir)
  
  # Download the images into the temp directory
  hot_dog_image_1, _ = urllib.request.urlretrieve(HOT_DOG_IMAGE_1, os.path.join(tempdir, "824342.jpg"))
  hot_dog_image_2, _ = urllib.request.urlretrieve(HOT_DOG_IMAGE_2, os.path.join(tempdir, "838604.jpg"))
  not_hot_dog_image_1, _ = urllib.request.urlretrieve(NOT_HOT_DOG_IMAGE_1, os.path.join(tempdir, "58448.jpg"))
  not_hot_dog_image_2, _ = urllib.request.urlretrieve(NOT_HOT_DOG_IMAGE_2, os.path.join(tempdir, "60069.jpg"))

  # Load the onnx model
  session = onnxruntime.InferenceSession(os.path.join(tempdir, "onnx/efficientnetb0_v1.onnx"), None) 
 
  # Make the predictions on the sample images.
  prediction = predict_on_image(session, hot_dog_image_1)
  print(f"Image 1: {prediction} ({HOT_DOG_IMAGE_1})")

  prediction = predict_on_image(session, hot_dog_image_2)
  print(f"Image 2: {prediction} ({HOT_DOG_IMAGE_2})")

  prediction = predict_on_image(session, not_hot_dog_image_1)
  print(f"Image 3: {prediction} ({NOT_HOT_DOG_IMAGE_1})")

  prediction = predict_on_image(session, not_hot_dog_image_2)
  print(f"Image 4: {prediction} ({NOT_HOT_DOG_IMAGE_2})")

Running this script will yield the following results in the console:

Image 1: hot dog (https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/824342.jpg)
Image 2: hot dog (https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/838604.jpg)
Image 3: NOT hot dog (https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/58448.jpg)
Image 4: NOT hot dog (https://masterful-public.s3.us-west-1.amazonaws.com/933013963/static-data/60069.jpg)