Détection d'organes faciaux actuellement en dlib mais pas en OpenCV.
Pour le moment, je l'ai coloré de force, mais il doit y avoir un moyen plus intelligent. Ou plutôt, je pense que c'est juste que la référence n'est pas chargée correctement. .. ..
La vidéo est ci-dessous. C'est embarrassant de montrer mon visage, alors j'utilise la détection de visage pour le cacher. https://www.youtube.com/watch?v=s2YtXqcBuPY [](https://www.youtube.com/watch? v = s2YtXqcBuPY)
Le code source est ci-dessous. Pour que cela fonctionne, placez les données entraînées des organes du visage dans le même répertoire que le fichier py. Doit être placé. →shape_predictor_68_face_landmarks.dat(http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
face_landmark_detector.py.
Usage:
face_landmark_detector.py [<video source>] [<resize rate>] [<privacy mask>]
'''
import sys
import dlib
import cv2
import time
import copy
try:
fn = sys.argv[1]
if fn.isdigit() == True:
fn = int(fn)
except:
fn = 0
try:
resize_rate = sys.argv[2]
resize_rate = int(resize_rate)
except:
resize_rate = 1
try:
privacy_mask = sys.argv[3]
privacy_mask = int(privacy_mask)
except:
privacy_mask = 0
predictor_path = "./shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
video_input = cv2.VideoCapture(fn)
while(video_input.isOpened() == True):
ret, frame = video_input.read()
temp_frame = copy.deepcopy(frame)
#Réduction de la trame cible pour réduire la charge de traitement (lorsque l'argument est spécifié)
height, width = frame.shape[:2]
temp_frame = cv2.resize(frame, (int(width/resize_rate), int(height/resize_rate)))
#Détection facial
start = time.time()
dets = detector(temp_frame, 1)
elapsed_time = time.time() - start
print ("detector processing time:{0}".format(elapsed_time)) + "[sec]"
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
#Détection d'organes faciaux
start = time.time()
shape = predictor(temp_frame, d)
elapsed_time = time.time() - start
print ("predictor processing time:{0}".format(elapsed_time)) + "[sec]"
#dessin
rect_offset = 20
if privacy_mask == 1:
cv2.rectangle(frame, (int(d.left() * resize_rate) - rect_offset, int(d.top() * resize_rate) - rect_offset), \
(int(d.right() * resize_rate) + rect_offset, int(d.bottom() * resize_rate) + rect_offset), (255, 255, 255), -1)
for shape_point_count in range(shape.num_parts):
shape_point = shape.part(shape_point_count)
if shape_point_count < 17: # [0-16]:Contour
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 0, 255), -1)
elif shape_point_count < 22: # [17-21]Sourcils (à droite)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 255, 0), -1)
elif shape_point_count < 27: # [22-26]Sourcils (à gauche)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 0, 0), -1)
elif shape_point_count < 31: # [27-30]Nez en arrière
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 255, 255), -1)
elif shape_point_count < 36: # [31-35]Ailes de nez, bouts de nez
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 255, 0), -1)
elif shape_point_count < 42: # [36-4142 yeux 47)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 0, 255), -1)
elif shape_point_count < 48: # [42-47]Yeux (à gauche)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 0, 128), -1)
elif shape_point_count < 55: # [48-54]Lèvre supérieure (contour supérieur)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 128, 0), -1)
elif shape_point_count < 60: # [54-59]Lèvre inférieure (contour inférieur)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (128, 0, 0), -1)
elif shape_point_count < 65: # [60-64]Lèvre supérieure (contour inférieur)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 128, 255), -1)
elif shape_point_count < 68: # [65-67]Lèvre inférieure (contour supérieur)
cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (128, 255, 0), -1)
cv2.imshow('face landmark detector', frame)
c = cv2.waitKey(50) & 0xFF
if c==27: # ESC
break
video_input.release()
cv2.destroyAllWindows()
c'est tout.
Recommended Posts