Use Cases
1. Extract Face Region
2. Search for Face Match
Deep face recognition with Keras, Dlib and OpenCV
Key Research Papers - Face Recognition
1. Extract Face Region
2. Search for Face Match
Deep face recognition with Keras, Dlib and OpenCV
Key Research Papers - Face Recognition
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#pip install face_recognition | |
#https://github.com/zeynepCankara/Face-Recognition-Tensorflow/blob/master/Face_Rec_System.ipynb | |
import os | |
import face_recognition | |
import cv2 | |
#Get All Images in directory | |
images = os.listdir(r'E:\\FaceFeatures\Test') | |
print(images) | |
#Extract Area of Face | |
def ExtractFaceRegion(imagefilepath): | |
image = cv2.imread(imagefilepath) | |
# Convert it from BGR to RGB | |
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
# detect face in the image and get its location (square boxes coordinates) | |
boxes = face_recognition.face_locations(image, model='hog') | |
print(type(boxes)) | |
top = boxes[0][0] | |
right = boxes[0][1] | |
bottom = boxes[0][2] | |
left = boxes[0][3] | |
# draw the predicted face name on the image | |
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2) | |
y= top - 15 if top - 15 > 15 else top + 15 | |
cv2.putText(image, "Result", (left, y), cv2.FONT_ITALIC,0.75, (0, 255, 0), 2) | |
cv2.imshow("Highlighted Image", image) | |
cv2.waitKey(0) | |
crop = image[top:bottom, left:right] | |
cv2.imshow("Cropped Image", crop) | |
cv2.waitKey(0) | |
encoding = face_recognition.face_encodings(image, boxes) | |
print(encoding) | |
return | |
def SearchforMatch(imagefilepath): | |
imagesearch = face_recognition.load_image_file(imagefilepath) | |
imagesearchencoded = face_recognition.face_encodings(imagesearch)[0] | |
for imagename in images: | |
current_image = face_recognition.load_image_file(r'E:\\FaceFeatures\Test\\'+imagename) | |
current_image_encoded = face_recognition.face_encodings(current_image)[0] | |
result = face_recognition.compare_faces([imagesearchencoded],current_image_encoded) | |
if result[0]==True: | |
print("Matched:"+imagename) | |
else: | |
print("Not Matched:"+imagename) | |
return | |
imagefilepath = r'E:\\FaceFeatures\Test\1.jpg' | |
ExtractFaceRegion(imagefilepath) | |
SearchforMatch(imagefilepath) | |
cv2.destroyAllWindows() | |
#Example 2 | |
#pip install mtcnn | |
#https://github.com/ipazc/mtcnn | |
from mtcnn import MTCNN | |
import cv2 | |
img = cv2.cvtColor(cv2.imread(r"E:\Siva_Learning_Blog_Posts\Face1.jpg"),cv2.COLOR_BGR2RGB) | |
detector = MTCNN() | |
print(detector.detect_faces(img)) | |
result = detector.detect_faces(img) | |
bounding_box = result[0]['box'] | |
keypoints = result[0]['keypoints'] | |
cv2.rectangle(img, | |
(bounding_box[0], bounding_box[1]), | |
(bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]), | |
(0,155,255), | |
2) | |
cv2.circle(img,(keypoints['left_eye']), 2, (0,155,255), 2) | |
cv2.circle(img,(keypoints['right_eye']), 2, (0,155,255), 2) | |
cv2.circle(img,(keypoints['nose']), 2, (0,155,255), 2) | |
cv2.circle(img,(keypoints['mouth_left']), 2, (0,155,255), 2) | |
cv2.circle(img,(keypoints['mouth_right']), 2, (0,155,255), 2) | |
cv2.imwrite(r'E:\Siva_Learning_Blog_Posts\Result.jpg', cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) | |
print(result) |
Happy Learning!!!London bar uses #FacialRecognition— Ronald van Loon (@Ronald_vanLoon) July 14, 2020
by @tictoc#Tech #Technology #IT
Cc: @evankirstel pic.twitter.com/z2I1mClzU9
No comments:
Post a Comment