-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathObject_detection_in_image.py
More file actions
131 lines (116 loc) · 6.3 KB
/
Object_detection_in_image.py
File metadata and controls
131 lines (116 loc) · 6.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import numpy as np
import time
import os
import tensorflow as tf
from PIL import Image
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import cv2
# requires, tf-gpu 1.12, cuda 9.0, cuDNN v7.1.4, driver 390
class BallDetection:
def __init__(self):
self.now = time.time()
self.cwd = os.getcwd()
self.num_classes = 1
# Path to frozen detection graph. This is the actual model that is used for the object detection.
self.path_to_ckpt = self.cwd + '/content/datalab/fine_tuned_model' + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
self.path_to_labels = self.cwd + '/content/datalab' + '/label_map.pbtxt'
self.test_image_path = self.cwd + '/content/datalab/test_image/image1.jpg'
# Size, in inches, of the output images.
self.output_img_size = (12, 8)
self.label_map = label_map_util.load_labelmap(self.path_to_labels)
self.categories = label_map_util.convert_label_map_to_categories(self.label_map, \
max_num_classes=self.num_classes,
use_display_name=True)
self.category_index = label_map_util.create_category_index(self.categories)
self.detection_graph = tf.Graph()
self.load_detection_graph()
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(self, image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the
# image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
print(time.time() - self.now)
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
print(time.time() - self.now)
return output_dict
def load_detection_graph(self):
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def set_up_object_detection_api(self):
self.now = time.time()
test_image = Image.open(self.test_image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = self.load_image_into_numpy_array(test_image)
# Actual detection.
output_dict = self.run_inference_for_single_image(image_np, self.detection_graph)
# get obj pos
width, height = test_image.size
box_norm_coords = output_dict['detection_boxes'][0]
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
self.category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('image', image_np)
cv2.waitKey(0)
cv2.destroyAllWindows()
print(time.time() - self.now)
if __name__ == '__main__':
run = BallDetection()
while True:
input("Press enter to continue")
run.set_up_object_detection_api()