#!/usr/bin/env python # coding: utf-8 # # Hello Image Classification # # This basic introduction to OpenVINO™ shows how to do inference with an image classification model. # # A pre-trained [MobileNetV3 model](https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/mobilenet-v3-small-1.0-224-tf/README.md) from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used in this tutorial. For more information about how OpenVINO IR models are created, refer to the [TensorFlow to OpenVINO](../tensorflow-classification-to-openvino/tensorflow-classification-to-openvino.ipynb) tutorial. # # # #### Table of contents: # # - [Imports](#Imports) # - [Download the Model and data samples](#Download-the-Model-and-data-samples) # - [Select inference device](#Select-inference-device) # - [Load the Model](#Load-the-Model) # - [Load an Image](#Load-an-Image) # - [Do Inference](#Do-Inference) # # # ### Installation Instructions # # This is a self-contained example that relies solely on its own code. # # We recommend running the notebook in a virtual environment. You only need a Jupyter server to start. # For details, please refer to [Installation Guide](https://github.com/openvinotoolkit/openvino_notebooks/blob/latest/README.md#-installation-guide). # # # # In[ ]: # Install required packages get_ipython().run_line_magic('pip', 'install -q "openvino>=2023.1.0" opencv-python tqdm "matplotlib>=3.4"') # ## Imports # [back to top ⬆️](#Table-of-contents:) # # In[1]: from pathlib import Path import cv2 import matplotlib.pyplot as plt import numpy as np import openvino as ov # Fetch `notebook_utils` module import requests r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) from notebook_utils import download_file, device_widget # ## Download the Model and data samples # [back to top ⬆️](#Table-of-contents:) # # In[2]: base_artifacts_dir = Path("./artifacts").expanduser() model_name = "v3-small_224_1.0_float" model_xml_name = f"{model_name}.xml" model_bin_name = f"{model_name}.bin" model_xml_path = base_artifacts_dir / model_xml_name base_url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/mobelinet-v3-tf/FP32/" if not model_xml_path.exists(): download_file(base_url + model_xml_name, model_xml_name, base_artifacts_dir) download_file(base_url + model_bin_name, model_bin_name, base_artifacts_dir) else: print(f"{model_name} already downloaded to {base_artifacts_dir}") # ## Select inference device # [back to top ⬆️](#Table-of-contents:) # # select device from dropdown list for running inference using OpenVINO # In[3]: device = device_widget() device # ## Load the Model # [back to top ⬆️](#Table-of-contents:) # # In[4]: core = ov.Core() model = core.read_model(model=model_xml_path) compiled_model = core.compile_model(model=model, device_name=device.value) output_layer = compiled_model.output(0) # ## Load an Image # [back to top ⬆️](#Table-of-contents:) # # In[5]: # Download the image from the openvino_notebooks storage image_filename = download_file( "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg", directory="data", ) # The MobileNet model expects images in RGB format. image = cv2.cvtColor(cv2.imread(filename=str(image_filename)), code=cv2.COLOR_BGR2RGB) # Resize to MobileNet image shape. input_image = cv2.resize(src=image, dsize=(224, 224)) # Reshape to model input shape. input_image = np.expand_dims(input_image, 0) plt.imshow(image); # ## Do Inference # [back to top ⬆️](#Table-of-contents:) # # In[6]: result_infer = compiled_model([input_image])[output_layer] result_index = np.argmax(result_infer) # In[ ]: imagenet_filename = download_file( "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/datasets/imagenet/imagenet_2012.txt", directory="data", ) imagenet_classes = imagenet_filename.read_text().splitlines() # In[7]: # The model description states that for this model, class 0 is a background. # Therefore, a background must be added at the beginning of imagenet_classes. imagenet_classes = ["background"] + imagenet_classes imagenet_classes[result_index]