I installed software called motion on the Raspberry PI and used it as a surveillance camera, and I am trying to identify who the person in the picture is, but it is a bit unsatisfactory because it only identifies using the saved still image. Around this time.
Since it was a big deal, I was wondering if I could do something by processing the video in real time and collecting information, but since there was an article that seems to be related, I made a script to detect the face in real time while referring to it.
Try converting webcam / camcorder videos in real time with OpenCV
This is miso, but please refer to the following article for the installation procedure.
Procedure to quickly create a deep learning environment on Mac with TensorFlow and OpenCV
Also, copy the classifier used for face detection to the working folder with the following command.
$ cp /usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml ./
The script published on the reference site has been modified as follows.
sebcam.py
import cv2
if __name__ == '__main__':
#Constant definition
ESC_KEY = 27 #Esc key
INTERVAL= 33 #Waiting time
FRAME_RATE = 30 # fps
ORG_WINDOW_NAME = "org"
GAUSSIAN_WINDOW_NAME = "gaussian"
DEVICE_ID = 0
#Designation of classifier
cascade_file = "haarcascade_frontalface_alt2.xml"
cascade = cv2.CascadeClassifier(cascade_file)
#Camera image acquisition
cap = cv2.VideoCapture(DEVICE_ID)
#Loading the initial frame
end_flag, c_frame = cap.read()
height, width, channels = c_frame.shape
#Window preparation
cv2.namedWindow(ORG_WINDOW_NAME)
cv2.namedWindow(GAUSSIAN_WINDOW_NAME)
#Conversion processing loop
while end_flag == True:
#Image acquisition and face detection
img = c_frame
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_list = cascade.detectMultiScale(img_gray, minSize=(100, 100))
#Mark the detected face
for (x, y, w, h) in face_list:
color = (0, 0, 225)
pen_w = 3
cv2.rectangle(img_gray, (x, y), (x+w, y+h), color, thickness = pen_w)
#Frame display
cv2.imshow(ORG_WINDOW_NAME, c_frame)
cv2.imshow(GAUSSIAN_WINDOW_NAME, img_gray)
#Exit with Esc key
key = cv2.waitKey(INTERVAL)
if key == ESC_KEY:
break
#Read next frame
end_flag, c_frame = cap.read()
#End processing
cv2.destroyAllWindows()
cap.release()
When you execute the following command, two windows will be displayed, and you should see the original image and grayscale with a square drawn on the face.
python
$ python sebcam.py
It was done (^-^) /
There was also a sample that tracks an object detected in real time, so I tried it.
Draw optical flow in real time with OpenCV (Shi-Tomasi method, Lucas-Kanade method)
Since the video reading source of the reference destination was a video file, I changed this to a webcam. And when the detected feature points disappear, it is changed to be detected again.
LucasKande.py
import numpy as np
import cv2
DEVICE_ID = 0
cap = cv2.VideoCapture(DEVICE_ID)
# Shi-Tomasi corner detection parameters
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Lucas-Kanade method parameters
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
#Randomly generate 100 colors (generate a random ndarray with 100 rows and 3 columns in the range of 0 to 255)
color = np.random.randint(0, 255, (100, 3))
#Processing of the first frame
end_flag, frame = cap.read()
gray_prev = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
feature_prev = cv2.goodFeaturesToTrack(gray_prev, mask = None, **feature_params)
mask = np.zeros_like(frame)
while(end_flag):
#Convert to grayscale
gray_next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Optical flow detection
feature_next, status, err = cv2.calcOpticalFlowPyrLK(gray_prev, gray_next, feature_prev, None, **lk_params)
if len(feature_next[status == 1]) == 0:
feature_prev = cv2.goodFeaturesToTrack(gray_prev, mask = None, **feature_params)
mask = np.zeros_like(frame)
feature_next, status, err = cv2.calcOpticalFlowPyrLK(gray_prev, gray_next, feature_prev, None, **lk_params)
#Select feature points for which optical flow was detected (0: not detected, 1: detected)
good_prev = feature_prev[status == 1]
good_next = feature_next[status == 1]
#Draw optical flow
for i, (next_point, prev_point) in enumerate(zip(good_next, good_prev)):
prev_x, prev_y = prev_point.ravel()
next_x, next_y = next_point.ravel()
mask = cv2.line(mask, (next_x, next_y), (prev_x, prev_y), color[i].tolist(), 2)
frame = cv2.circle(frame, (next_x, next_y), 25, color[i].tolist(), -1)
img = cv2.add(frame, mask)
#Show in window
cv2.imshow('window', img)
#Finish by pressing the ESC key
if cv2.waitKey(30) & 0xff == 27:
break
#Preparation of next frame and point
gray_prev = gray_next.copy()
feature_prev = good_next.reshape(-1, 1, 2)
end_flag, frame = cap.read()
#End processing
cv2.destroyAllWindows()
cap.release()
did it!
Recommended Posts