inception_v4 / eval_onnx.py
zhengrongzhang's picture
init model
1fec2a7
raw
history blame contribute delete
No virus
5.5 kB
#!/usr/bin/env python
import argparse
import math
import numpy as np
import os
import time
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import onnxruntime as ort
parser = argparse.ArgumentParser()
parser.add_argument(
"--onnx_model", default="inceptionv4_int8.onnx", help="Input onnx model")
parser.add_argument(
"--val_data_dir",
default="/workspace/dataset/imagenet/val",
help="Data directory of validation set")
parser.add_argument(
"--val_image_list",
default="/workspace/dataset/imagenet/val.txt",
help="Validation images list")
parser.add_argument(
"--subset_len",
default=50000,
type=int,
help="Subset length of validation set to use")
parser.add_argument(
"--batch_size", default=1, type=int, help="Validation batch size")
parser.add_argument(
"--ipu",
action="store_true",
help="Use IPU for inference.",
)
parser.add_argument(
"--provider_config",
type=str,
default="vaip_config.json",
help="Path of the config file for seting provider_options.",
)
args = parser.parse_args()
class DataLoader(object):
def __init__(self, height=224, width=224):
self.output_height = height
self.output_width = width
def _inception_preprocess(self,
image,
central_fraction=0.875,
central_crop=True):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_crop and central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if self.output_height and self.output_width:
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(
image, [self.output_height, self.output_width], align_corners=False)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def _build_placeholder(self):
input_image_path = tf.placeholder(
tf.string, shape=(None), name="input_image_path")
image = tf.io.read_file(input_image_path)
image = tf.image.decode_jpeg(image, channels=3)
return image, input_image_path
def build_preprocess(self):
"""Returns image tensor used to read image."""
image, input_image_path = self._build_placeholder()
image = self._inception_preprocess(image)
return image, input_image_path
def main():
input_shape = (299, 299, 3)
label_offset = 0
with tf.Session() as tf_session:
loader = DataLoader(input_shape[0], input_shape[1])
image, image_path = loader.build_preprocess()
in_image = tf.placeholder(
tf.float32, shape=(None,) + input_shape, name='in_image')
in_label = tf.placeholder(tf.int64, shape=(None, 1), name='in_label')
num_classes = 1001 - label_offset
logits = tf.placeholder(
tf.float32, shape=(None, num_classes), name='logits')
top1, top1_update = tf.metrics.recall_at_k(
in_label, logits, 1, name="precision_top1")
top5, top5_update = tf.metrics.recall_at_k(
in_label, logits, 5, name="precision_top5")
var_list = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="precision")
vars_initializer = tf.variables_initializer(var_list=var_list)
tf_session.run(vars_initializer)
with open(args.val_image_list, 'r') as fr:
lines = fr.readlines()
if args.subset_len > len(lines):
raise ValueError(
"subset_len(%d) should be less or equal than total images(%d)." %
(args.subset_len, len(lines)))
eval_steps = math.ceil(args.subset_len / args.batch_size)
start_t = time.time()
if args.ipu:
providers = ["VitisAIExecutionProvider"]
provider_options = [{"config_file": args.provider_config}]
else:
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
provider_options = None
ort_session = ort.InferenceSession(
args.onnx_model, providers=providers, provider_options=provider_options)
for step in range(eval_steps):
print(f'Eval step {step} / {eval_steps}')
batch_images = []
batch_labels = []
for i in range(args.batch_size):
index = step * args.batch_size + i
if index >= args.subset_len:
break
img_path, label = lines[index].strip().split(" ")
img_path = os.path.join(args.val_data_dir, img_path)
# Run session to get image from feeded image path.
image_val = tf_session.run(image, feed_dict={image_path: img_path})
batch_images.append(image_val)
label = int(label) + 1 - label_offset
label = np.array([label], dtype=np.int64)
batch_labels.append(label)
batch_images = batch_images[0] if args.batch_size == 1 else np.squeeze(
batch_images)
ort_inputs = {ort_session.get_inputs()[0].name: batch_images}
outputs = ort_session.run(None, ort_inputs)
# Update top1/5 metric.
tf_session.run([top1_update, top5_update],
feed_dict={
in_image: batch_images,
in_label: batch_labels,
logits: outputs[0]
})
end_t = time.time()
top1_val, top5_val = tf_session.run([top1, top5])
print('Recall_1 = [%s]' % str(top1_val))
print('Recall_5 = [%s]' % str(top5_val))
print('Use_time = [%s]' % str(end_t - start_t))
if __name__ == "__main__":
main()