Browse Source

Change tab to spaces

dev
sipp11 5 years ago
parent
commit
780d5a910e
  1. 166
      examples/dlib_objs_tracking.py
  2. 302
      examples/dlib_objs_tracking_queue.py
  3. 117
      examples/opencv_objs_tracking.py

166
examples/dlib_objs_tracking.py

@ -14,21 +14,21 @@ import dlib
# construct the argument parser and parse the arguments # construct the argument parser and parse the arguments
ap = argparse.ArgumentParser() ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str, ap.add_argument("-v", "--video", type=str,
help="path to input video file") help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf", ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type") help="OpenCV object tracker type")
args = vars(ap.parse_args()) args = vars(ap.parse_args())
# initialize a dictionary that maps strings to their corresponding # initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations # OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = { OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create, "csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create, "kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create, "boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create, "mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create, "tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create, "medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create "mosse": cv2.TrackerMOSSE_create
} }
# initialize OpenCV's special multi-object tracker # initialize OpenCV's special multi-object tracker
@ -37,92 +37,92 @@ trackers = []
# if a video path was not supplied, grab the reference to the web cam # if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False): if not args.get("video", False):
print("[INFO] starting video stream...") print("[INFO] starting video stream...")
vs = VideoStream(src=0).start() vs = VideoStream(src=0).start()
time.sleep(1.0) time.sleep(1.0)
# otherwise, grab a reference to the video file # otherwise, grab a reference to the video file
else: else:
vs = cv2.VideoCapture(args["video"]) vs = cv2.VideoCapture(args["video"])
# loop over frames from the video stream # loop over frames from the video stream
while True: while True:
# grab the current frame, then handle if we are using a # grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object # VideoStream or VideoCapture object
frame = vs.read() frame = vs.read()
frame = frame[1] if args.get("video", False) else frame frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream # check to see if we have reached the end of the stream
if frame is None: if frame is None:
break break
# resize the frame (so we can process it faster) # resize the frame (so we can process it faster)
# frame = imutils.resize(frame, width=600) # frame = imutils.resize(frame, width=600)
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# grab the updated bounding box coordinates (if any) for each # grab the updated bounding box coordinates (if any) for each
# object that is being tracked # object that is being tracked
# (success, boxes) = trackers.update(frame) # (success, boxes) = trackers.update(frame)
# print('success', success) # print('success', success)
# print('boxes', boxes) # print('boxes', boxes)
for tk in trackers: for tk in trackers:
tk.update(frame_rgb) tk.update(frame_rgb)
pos = tk.get_position() pos = tk.get_position()
# unpack the position object # unpack the position object
startX = int(pos.left()) startX = int(pos.left())
startY = int(pos.top()) startY = int(pos.top())
endX = int(pos.right()) endX = int(pos.right())
endY = int(pos.bottom()) endY = int(pos.bottom())
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2) cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
# loop over the bounding boxes and draw then on the frame # loop over the bounding boxes and draw then on the frame
# for box in boxes: # for box in boxes:
# (x, y, w, h) = [int(v) for v in box] # (x, y, w, h) = [int(v) for v in box]
# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# show the output frame # show the output frame
cv2.imshow("Frame", frame) cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding # if the 's' key is selected, we are going to "select" a bounding
# box to track # box to track
if key == ord("s"): if key == ord("s"):
# select the bounding box of the object we want to track (make # select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI) # sure you press ENTER or SPACE after selecting the ROI)
box = cv2.selectROI("Frame", frame, fromCenter=False, box = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True) showCrosshair=True)
print('select box: ', box) print('select box: ', box)
(x,y,w,h) = box (x,y,w,h) = box
startX = x startX = x
startY = y startY = y
endX = x + w endX = x + w
endY = y + h endY = y + h
print(startX, startY, endX, endY) print(startX, startY, endX, endY)
# create a new object tracker for the bounding box and add it # create a new object tracker for the bounding box and add it
# to our multi-object tracker # to our multi-object tracker
# tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]() # tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
# trackers.add(tracker, frame, box) # trackers.add(tracker, frame, box)
tracker = dlib.correlation_tracker() tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY) rect = dlib.rectangle(startX, startY, endX, endY)
print('rect', rect) print('rect', rect)
tracker.start_track(frame_rgb, rect) tracker.start_track(frame_rgb, rect)
trackers.append(tracker) trackers.append(tracker)
# if the `q` key was pressed, break from the loop # if the `q` key was pressed, break from the loop
elif key == ord("q"): elif key == ord("q"):
break break
# if we are using a webcam, release the pointer # if we are using a webcam, release the pointer
if not args.get("video", False): if not args.get("video", False):
vs.stop() vs.stop()
# otherwise, release the file pointer # otherwise, release the file pointer
else: else:
vs.release() vs.release()
# close all windows # close all windows
cv2.destroyAllWindows() cv2.destroyAllWindows()

302
examples/dlib_objs_tracking_queue.py

@ -12,47 +12,47 @@ import dlib
import cv2 import cv2
def start_tracker(box, label, rgb, inputQueue, outputQueue): def start_tracker(box, label, rgb, inputQueue, outputQueue):
# construct a dlib rectangle object from the bounding box # construct a dlib rectangle object from the bounding box
# coordinates and then start the correlation tracker # coordinates and then start the correlation tracker
t = dlib.correlation_tracker() t = dlib.correlation_tracker()
rect = dlib.rectangle(box[0], box[1], box[2], box[3]) rect = dlib.rectangle(box[0], box[1], box[2], box[3])
t.start_track(rgb, rect) t.start_track(rgb, rect)
# loop indefinitely -- this function will be called as a daemon # loop indefinitely -- this function will be called as a daemon
# process so we don't need to worry about joining it # process so we don't need to worry about joining it
while True: while True:
# attempt to grab the next frame from the input queue # attempt to grab the next frame from the input queue
rgb = inputQueue.get() rgb = inputQueue.get()
# if there was an entry in our queue, process it # if there was an entry in our queue, process it
if rgb is not None: if rgb is not None:
# update the tracker and grab the position of the tracked # update the tracker and grab the position of the tracked
# object # object
t.update(rgb) t.update(rgb)
pos = t.get_position() pos = t.get_position()
# unpack the position object # unpack the position object
startX = int(pos.left()) startX = int(pos.left())
startY = int(pos.top()) startY = int(pos.top())
endX = int(pos.right()) endX = int(pos.right())
endY = int(pos.bottom()) endY = int(pos.bottom())
# add the label + bounding box coordinates to the output # add the label + bounding box coordinates to the output
# queue # queue
outputQueue.put((label, (startX, startY, endX, endY))) outputQueue.put((label, (startX, startY, endX, endY)))
# construct the argument parser and parse the arguments # construct the argument parser and parse the arguments
ap = argparse.ArgumentParser() ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True, ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file") help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True, ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model") help="path to Caffe pre-trained model")
ap.add_argument("-v", "--video", required=True, ap.add_argument("-v", "--video", required=True,
help="path to input video file") help="path to input video file")
ap.add_argument("-o", "--output", type=str, ap.add_argument("-o", "--output", type=str,
help="path to optional output video file") help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.2, ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections") help="minimum probability to filter weak detections")
args = vars(ap.parse_args()) args = vars(ap.parse_args())
# initialize our list of queues -- both input queue and output queue # initialize our list of queues -- both input queue and output queue
@ -63,9 +63,9 @@ outputQueues = []
# initialize the list of class labels MobileNet SSD was trained to # initialize the list of class labels MobileNet SSD was trained to
# detect # detect
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat", CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep", "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"] "sofa", "train", "tvmonitor"]
# load our serialized model from disk # load our serialized model from disk
print("[INFO] loading model...") print("[INFO] loading model...")
@ -81,120 +81,120 @@ fps = FPS().start()
# loop over frames from the video file stream # loop over frames from the video file stream
while True: while True:
# grab the next frame from the video file # grab the next frame from the video file
(grabbed, frame) = vs.read() (grabbed, frame) = vs.read()
# check to see if we have reached the end of the video file # check to see if we have reached the end of the video file
if frame is None: if frame is None:
break break
# resize the frame for faster processing and then convert the # resize the frame for faster processing and then convert the
# frame from BGR to RGB ordering (dlib needs RGB ordering) # frame from BGR to RGB ordering (dlib needs RGB ordering)
frame = imutils.resize(frame, width=600) frame = imutils.resize(frame, width=600)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# if we are supposed to be writing a video to disk, initialize # if we are supposed to be writing a video to disk, initialize
# the writer # the writer
if args["output"] is not None and writer is None: if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG") fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30, writer = cv2.VideoWriter(args["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True) (frame.shape[1], frame.shape[0]), True)
# if our list of queues is empty then we know we have yet to # if our list of queues is empty then we know we have yet to
# create our first object tracker # create our first object tracker
if len(inputQueues) == 0: if len(inputQueues) == 0:
# grab the frame dimensions and convert the frame to a blob # grab the frame dimensions and convert the frame to a blob
(h, w) = frame.shape[:2] (h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 0.007843, (w, h), 127.5) blob = cv2.dnn.blobFromImage(frame, 0.007843, (w, h), 127.5)
# pass the blob through the network and obtain the detections # pass the blob through the network and obtain the detections
# and predictions # and predictions
net.setInput(blob) net.setInput(blob)
detections = net.forward() detections = net.forward()
# loop over the detections # loop over the detections
for i in np.arange(0, detections.shape[2]): for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated # extract the confidence (i.e., probability) associated
# with the prediction # with the prediction
confidence = detections[0, 0, i, 2] confidence = detections[0, 0, i, 2]
# filter out weak detections by requiring a minimum # filter out weak detections by requiring a minimum
# confidence # confidence
if confidence > args["confidence"]: if confidence > args["confidence"]:
# extract the index of the class label from the # extract the index of the class label from the
# detections list # detections list
idx = int(detections[0, 0, i, 1]) idx = int(detections[0, 0, i, 1])
label = CLASSES[idx] label = CLASSES[idx]
# if the class label is not a person, ignore it # if the class label is not a person, ignore it
if CLASSES[idx] != "person": if CLASSES[idx] != "person":
continue continue
# compute the (x, y)-coordinates of the bounding box # compute the (x, y)-coordinates of the bounding box
# for the object # for the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int") (startX, startY, endX, endY) = box.astype("int")
bb = (startX, startY, endX, endY) bb = (startX, startY, endX, endY)
# create two brand new input and output queues, # create two brand new input and output queues,
# respectively # respectively
iq = multiprocessing.Queue() iq = multiprocessing.Queue()
oq = multiprocessing.Queue() oq = multiprocessing.Queue()
inputQueues.append(iq) inputQueues.append(iq)
outputQueues.append(oq) outputQueues.append(oq)
# spawn a daemon process for a new object tracker # spawn a daemon process for a new object tracker
p = multiprocessing.Process( p = multiprocessing.Process(
target=start_tracker, target=start_tracker,
args=(bb, label, rgb, iq, oq)) args=(bb, label, rgb, iq, oq))
p.daemon = True p.daemon = True
p.start() p.start()
# grab the corresponding class label for the detection # grab the corresponding class label for the detection
# and draw the bounding box # and draw the bounding box
cv2.rectangle(frame, (startX, startY), (endX, endY), cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 255, 0), 2) (0, 255, 0), 2)
cv2.putText(frame, label, (startX, startY - 15), cv2.putText(frame, label, (startX, startY - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2) cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
# otherwise, we've already performed detection so let's track # otherwise, we've already performed detection so let's track
# multiple objects # multiple objects
else: else:
# loop over each of our input ques and add the input RGB # loop over each of our input ques and add the input RGB
# frame to it, enabling us to update each of the respective # frame to it, enabling us to update each of the respective
# object trackers running in separate processes # object trackers running in separate processes
for iq in inputQueues: for iq in inputQueues:
iq.put(rgb) iq.put(rgb)
# loop over each of the output queues # loop over each of the output queues
for oq in outputQueues: for oq in outputQueues:
# grab the updated bounding box coordinates for the # grab the updated bounding box coordinates for the
# object -- the .get method is a blocking operation so # object -- the .get method is a blocking operation so
# this will pause our execution until the respective # this will pause our execution until the respective
# process finishes the tracking update # process finishes the tracking update
(label, (startX, startY, endX, endY)) = oq.get() (label, (startX, startY, endX, endY)) = oq.get()
# draw the bounding box from the correlation object # draw the bounding box from the correlation object
# tracker # tracker
cv2.rectangle(frame, (startX, startY), (endX, endY), cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 255, 0), 2) (0, 255, 0), 2)
cv2.putText(frame, label, (startX, startY - 15), cv2.putText(frame, label, (startX, startY - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2) cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
# check to see if we should write the frame to disk # check to see if we should write the frame to disk
if writer is not None: if writer is not None:
writer.write(frame) writer.write(frame)
# show the output frame # show the output frame
cv2.imshow("Frame", frame) cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop # if the `q` key was pressed, break from the loop
if key == ord("q"): if key == ord("q"):
break break
# update the FPS counter # update the FPS counter
fps.update() fps.update()
# stop the timer and display FPS information # stop the timer and display FPS information
fps.stop() fps.stop()
@ -203,7 +203,7 @@ print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# check to see if we need to release the video writer pointer # check to see if we need to release the video writer pointer
if writer is not None: if writer is not None:
writer.release() writer.release()
# do a bit of cleanup # do a bit of cleanup
cv2.destroyAllWindows() cv2.destroyAllWindows()

117
examples/opencv_objs_tracking.py

@ -11,21 +11,21 @@ import cv2
# construct the argument parser and parse the arguments # construct the argument parser and parse the arguments
ap = argparse.ArgumentParser() ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str, ap.add_argument("-v", "--video", type=str,
help="path to input video file") help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf", ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type") help="OpenCV object tracker type")
args = vars(ap.parse_args()) args = vars(ap.parse_args())
# initialize a dictionary that maps strings to their corresponding # initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations # OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = { OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create, "csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create, "kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create, "boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create, "mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create, "tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create, "medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create "mosse": cv2.TrackerMOSSE_create
} }
# initialize OpenCV's special multi-object tracker # initialize OpenCV's special multi-object tracker
@ -33,68 +33,67 @@ trackers = cv2.MultiTracker_create()
# if a video path was not supplied, grab the reference to the web cam # if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False): if not args.get("video", False):
print("[INFO] starting video stream...") print("[INFO] starting video stream...")
vs = VideoStream(src=0).start() vs = VideoStream(src=0).start()
time.sleep(1.0) time.sleep(1.0)
# otherwise, grab a reference to the video file # otherwise, grab a reference to the video file
else: else:
vs = cv2.VideoCapture(args["video"]) vs = cv2.VideoCapture(args["video"])
# loop over frames from the video stream # loop over frames from the video stream
while True: while True:
# grab the current frame, then handle if we are using a # grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object # VideoStream or VideoCapture object
frame = vs.read() frame = vs.read()
frame = frame[1] if args.get("video", False) else frame frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream # check to see if we have reached the end of the stream
if frame is None: if frame is None:
break break
# resize the frame (so we can process it faster) # resize the frame (so we can process it faster)
# frame = imutils.resize(frame, width=600) # frame = imutils.resize(frame, width=600)
# grab the updated bounding box coordinates (if any) for each # grab the updated bounding box coordinates (if any) for each
# object that is being tracked # object that is being tracked
(success, boxes) = trackers.update(frame) (success, boxes) = trackers.update(frame)
print('success', success) print('success', success)
print('boxes', boxes) print('boxes', boxes)
trackers.d
# loop over the bounding boxes and draw then on the frame
# loop over the bounding boxes and draw then on the frame for box in boxes:
for box in boxes: (x, y, w, h) = [int(v) for v in box]
(x, y, w, h) = [int(v) for v in box] cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# show the output frame
# show the output frame cv2.imshow("Frame", frame)
cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF
key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding
# if the 's' key is selected, we are going to "select" a bounding # box to track
# box to track if key == ord("s"):
if key == ord("s"): # select the bounding box of the object we want to track (make
# select the bounding box of the object we want to track (make # sure you press ENTER or SPACE after selecting the ROI)
# sure you press ENTER or SPACE after selecting the ROI) box = cv2.selectROI("Frame", frame, fromCenter=False,
box = cv2.selectROI("Frame", frame, fromCenter=False, showCrosshair=True)
showCrosshair=True)
# create a new object tracker for the bounding box and add it
# create a new object tracker for the bounding box and add it # to our multi-object tracker
# to our multi-object tracker tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]() trackers.add(tracker, frame, box)
trackers.add(tracker, frame, box)
# if the `q` key was pressed, break from the loop
# if the `q` key was pressed, break from the loop elif key == ord("q"):
elif key == ord("q"): break
break
# if we are using a webcam, release the pointer # if we are using a webcam, release the pointer
if not args.get("video", False): if not args.get("video", False):
vs.stop() vs.stop()
# otherwise, release the file pointer # otherwise, release the file pointer
else: else:
vs.release() vs.release()
# close all windows # close all windows
cv2.destroyAllWindows() cv2.destroyAllWindows()

Loading…
Cancel
Save