1、获取protobuf
https://github.com/google/protobuf/releases/tag/v2.6.1
下载protoc-2.6.1-win32.zip
是一个exe文件,不要安装滴
2、编译proto配置文件
在models\research\下执行
protoc.exe object_detection/protos/*.proto --python_out=.
3、检测API是否正常
这里的tensorflow的models下载地址:
https://github.com/tensorflow/models
1.将models\research\slim\nets目录复制到models\research下
2.将models\research\object_detection\builders下的model_builder_test.py复制到models\research
用spyder将model_builder_test.py打开运行,检测API是否正常
4、下载预训练模型
https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
模型解压的地址看代码里面,我这里直接放在research目录下面。
5.代码
指定模型名称
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
指定模型文件所在的路经
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
创建图来存放google训练好的模型
读写文件,句柄具有.read()方法,实测凡是使用tf.gfile.FastGFile()
的地方换乘open()并不会报错(包括读取普通文件和读取tf模型文件)
#with tf.gfile.FastGFile(os.path.join(MODEL_DIR, MODEL_FILE), 'rb') as f: # 使用tf.gfile.FastGFile()函数的方法
with open(os.path.join(MODEL_DIR, MODEL_FILE), 'rb') as f: # 使用open()函数的方法
graph_def = tf.GraphDef() # 生成图
graph_def.ParseFromString(f.read()) # 图加载模型
bottleneck_tensor,jpeg_data_tensor = tf.import_graph_def( # 从图上读取张量,同时把图设为默认图
graph_def,
return_elements=[BOTTLENECK_TENSOR_NAME,JPEG_DATA_TENSOR_NAME])
print(gfile.FastGFile(image_path,'rb').read()==open(image_path,'rb').read())
# True
接下来就是载入数据集标签
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
#得到分类集合
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
#得到分类索引
category_index = label_map_util.create_category_index(categories)
对图片进行数据强制转化
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
拿到模型里的占位符,如下:
image_tensor ,boxes,scores ,classes ,num_detections
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# 每个框代表一个物体被侦测到
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score 代表识别出的物体与标签匹配的相似程度,在类型标签后面
# 分数与成绩标签一起显示在结果图像上。
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
输入数据开始检测
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
对检测的结果进行可视化
# 可视化结果.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
下面借鉴大神代码改编的,复制就行了(电脑版)
main.py
# -*- coding: utf-8 -*-
import numpy as np
import os
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image
from tensorflow.models.research.object_detection.utils import label_map_util
from tensorflow.models.research.object_detection.utils import visualization_utils as vis_util
#指定模型名称
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
# 指定模型文件所在的路经
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# 数据集对应的label.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
tf.reset_default_graph()
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
#载入coco数据集标签文件
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
#得到分类集合
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
#得到分类索引
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# 从PATH_TO_TEST_IMAGES_DIR路径下读取测试图形文件
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# 设置输出图片大小
IMAGE_SIZE = (12, 8)
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=3)
return image_np
detection_graph = tf.get_default_graph()
with tf.Session(graph=detection_graph) as sess:
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)#读入图片文件
image_np = load_image_into_numpy_array(image)
detect_objects(image_np, sess, detection_graph)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
手机作为摄像头的代码
首先下载任意款网络摄像头
本人手机魅蓝note2,在应用超市筛选出来这款手机app摄像头
打开app后,点击开启云服务
就会出现以下内容了
这里出现192.168.2.104:8080等字样
高潮来了
把地址复制在cam_url='http://192.168.2.104:8080/video'
就行了
import numpy as np
import os
import time
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image
from tensorflow.models.research.object_detection.utils import label_map_util
from tensorflow.models.research.object_detection.utils import visualization_utils as vis_util
import cv2
from threading import Thread
#指定模型名称
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
# 指定模型文件所在的路经
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# 数据集对应的label.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
tf.reset_default_graph()
#载入coco数据集标签文件
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
#得到分类集合
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
#得到分类索引
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# 从PATH_TO_TEST_IMAGES_DIR路径下读取测试图形文件
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# 设置输出图片大小
IMAGE_SIZE = (12, 8)
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=3)
return image_np
detection_graph = tf.get_default_graph()
cam_url='http://192.168.2.104:8080/video'
cap = cv2.VideoCapture(cam_url)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while (1):
start = time.clock()
frame = None
while( str(frame) == 'None'):
if cap.isOpened():
rval, frame = cap.read()
else:
cap.open(cam_url)
rval, frame = cap.read()
# 按帧读视
if cv2.waitKey(1) & 0xFF == ord('q'):
break
image_np = frame
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
end = time.clock()
#print('frame:', 1.0 / (end - start))
cv2.imshow("capture", image_np)
cv2.waitKey(1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# 释放捕捉的对象和内存
cap.release()
cv2.destroyAllWindows()
效果图(这个网速和计算速度跟不上)
在多线程时候出现了问题,在spyder中无法创建多个进程,只能创建多个线程,所以只能在cmd窗口执行程序。如果发现无法出图,不妨试一试用cmd窗口咯!!!!!!!!!!!!!!!
import numpy as np
import os
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image
from tensorflow.models.research.object_detection.utils import label_map_util
from multiprocessing import Queue,Pool
from tensorflow.models.research.object_detection.utils import visualization_utils as vis_util
import time
import cv2
from threading import Thread
#指定模型名称
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
# 指定模型文件所在的路经
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# 数据集对应的label.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
tf.reset_default_graph()
#载入coco数据集标签文件
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
#得到分类集合
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
#得到分类索引
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=3)
return image_np
# 多线程,高效读视频
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stream.release()
self.stopped = True
class configs(object):
def __init__(self):
self.num_workers = 2 # worker数量
self.queue_size = 5 # 多进程,输入输出,队列长度
self.video_source = 0 # 0代表从摄像头读取视频流
self.width = 720 # 图片宽
self.height = 490 # 图片高
def worker(input_q, output_q):
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
while True:
frame = input_q.get()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
output_q.put(detect_objects(frame_rgb, sess, detection_graph))
sess.close()
if __name__ == '__main__':
args = configs()
input_q = Queue(maxsize=args.queue_size)
output_q = Queue(maxsize=args.queue_size)
pool = Pool(args.num_workers, worker, (input_q, output_q))
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
while True: # fps._numFrames < 120
frame = video_capture.read()
input_q.put(frame)
t = time.time()
if output_q.empty():
pass # fill up queue
else:
output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
cv2.imshow('Video', output_rgb)
print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pool.terminate()
video_capture.stop()
cv2.destroyAllWindows()