"""Performs face alignment and calculates L2 distance between the embeddings of images."""

# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. from __future__ import absolute_import
from __future__ import division
from __future__ import print_function from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import argparse
import facenet
import align.detect_face def main():
model = "../models/20170216-091149"
image_files = ['compare_images/index10.png', 'compare_images/index73.png']
image_size = 160
margin = 44
gpu_memory_fraction = 0.5 images = load_and_align_data(image_files, image_size, margin, gpu_memory_fraction)
with tf.Graph().as_default(): with tf.Session() as sess: # Load the model
facenet.load_model(model) # Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") # Run forward pass to calculate embeddings
feed_dict = { images_placeholder: images, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict) nrof_images = len(image_files) print('Images:')
for i in range(nrof_images):
print('%1d: %s' % (i, image_files[i]))
print('') # Print distance matrix
print('Distance matrix')
print(' ', end='')
for i in range(nrof_images):
print(' %1d ' % i, end='')
print('')
for i in range(nrof_images):
print('%1d ' % i, end='')
for j in range(nrof_images):
dist = np.sqrt(np.sum(np.square(np.subtract(emb[i,:], emb[j,:]))))
print(' %1.4f ' % dist, end='')
print('') def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction): minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None) nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in range(nrof_samples):
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images # def parse_arguments(argv):
# parser = argparse.ArgumentParser()
#
# parser.add_argument('model', type=str, default="./models/20170216-091149",
# help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
# parser.add_argument('image_files', type=str, default="src/compare_images/index10.png src/compare_images/index73.png "
# , nargs='+', help='Images to compare')
# parser.add_argument('--image_size', type=int,
# help='Image size (height, width) in pixels.', default=160)
# parser.add_argument('--margin', type=int,
# help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
# parser.add_argument('--gpu_memory_fraction', type=float,
# help='Upper bound on the amount of GPU memory that will be used by the process.', default=0.5)
# return parser.parse_args(argv) if __name__ == '__main__':
main()
"""Validate a face recognizer on the "Labeled Faces in the Wild" dataset (http://vis-www.cs.umass.edu/lfw/).
Embeddings are calculated using the pairs from http://vis-www.cs.umass.edu/lfw/pairs.txt and the ROC curve
is calculated and plotted. Both the model metagraph and the model parameters need to exist
in the same directory, and the metagraph should have the extension '.meta'.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. from __future__ import absolute_import
from __future__ import division
from __future__ import print_function import tensorflow as tf
import numpy as np
import argparse
import facenet
import lfw
import os
import sys
import math
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
import numpy
from PIL import Image,ImageDraw
import cv2 from scipy import misc
import argparse
import align.detect_face def detetet_face_init():
cap = cv2.VideoCapture(0)
print(cap.isOpened())
classifier=cv2.CascadeClassifier("./xml/haarcascade_frontalface_alt.xml")
count=0
return cap,classifier,count def detect_face_clear():
cap.release()
cv2.destroyAllWindows() def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction): minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None) nrof_samples = len(image_paths)
img_list = [None] * nrof_samples
for i in range(nrof_samples):
img = misc.imread(os.path.expanduser(image_paths[i]))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list[i] = prewhitened
images = np.stack(img_list)
return images def compare_facevec(facevec1, facevec2):
dist = np.sqrt(np.sum(np.square(np.subtract(facevec1, facevec2))))
#print(' %1.4f ' % dist, end='')
return dist def face_recognition_using_facenet():
cap,classifier,count = detetet_face_init()
model = "../models/20170216-091149"
image_files = ['compare_images/index10.png', 'compare_images/index73.png']
image_size = 160
margin = 44
gpu_memory_fraction = 0.5
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(model) # Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") #image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs
image_size = 160
embedding_size = embeddings.get_shape()[1]
index = 0
th = 0.7
face_recognition_tag = True
color = (0,255,0)
exist_face_vec = []
face_detect_vec = []
while count > -1:
ret,img = cap.read()
faceRects = classifier.detectMultiScale(img, 1.2, 2, cv2.CASCADE_SCALE_IMAGE,(20,20))
if len(faceRects)>0:
for faceRect in faceRects:
x, y, w, h = faceRect
#cv2.rectangle(img, (int(x), int(y)), (int(x)+int(w), int(y)+int(h)), (0,255,0), 2,0)
#print "save faceimg"
face_win = img[int(y):int(y) + int(h), int(x):int(x) + int(w)]
face_detect = cv2.resize(face_win,(image_size,image_size),interpolation=cv2.INTER_CUBIC)
#cv2.imwrite('faceimg/index' + str(index) + '.bmp', face_win)
# Run forward pass to calculate embeddings
#print('Runnning forward pass on face detect')
nrof_samples = 1
img_list = [None] * nrof_samples
prewhitened = facenet.prewhiten(face_detect)
img_list[0] = prewhitened
images = np.stack(img_list)
if index == 10:
feed_dict = {images_placeholder:images, phase_train_placeholder:False }
exist_face_vec = sess.run(embeddings, feed_dict=feed_dict)
elif index > 10 and index % 10 == 0:
feed_dict = {images_placeholder:images, phase_train_placeholder:False }
face_detect_vec = sess.run(embeddings, feed_dict=feed_dict)
cp = compare_facevec(face_detect_vec, exist_face_vec)
print("index ", index, " dist ", cp)
if cp < th:
print(True)
face_recognition_tag = True
else:
print(False)
face_recognition_tag = False
index +=1
# if face_recognition_tag == True:
# cv2.rectangle(img, (int(x), int(y)), (int(x)+int(w), int(y)+int(h)), (255,0,0), 2,0)
# else:
# cv2.rectangle(img, (int(x), int(y)), (int(x)+int(w), int(y)+int(h)), (0,255,0), 2,0)
cv2.rectangle(img, (int(x), int(y)), (int(x)+int(w), int(y)+int(h)), (0,255,0), 2,0) cv2.imshow('video',img)
key=cv2.waitKey(1)
if key==ord('q'):
break
detect_face_clear(cap) if __name__ == '__main__':
face_recognition_using_facenet()

最新文章

  1. 一篇笔记带你快速掌握面向对象的Javascript(纯手打)
  2. 快速排序-java
  3. Oracle笔记 三、function 、select
  4. mysql 主从同步 Last_SQL_Error
  5. autoplay media studio couldn&#39;t load
  6. 24种设计模式--组合模式【Composite Pattern】
  7. css动画怎么写:3个属性实现
  8. WINDOWS系统下环境变量PATH和CLASSPATH的意思
  9. 优雅地解决Ajax接口参数来自另一个接口的问题
  10. Redis搭建多台哨兵
  11. Spring Boot(九)Swagger2自动生成接口文档和Mock模拟数据
  12. [Codeforces Round #438][Codeforces 868C. Qualification Rounds]
  13. C# .NET 获取枚举值的自定义属性
  14. pip install Yellowfin失败的问题
  15. CLR如何加载程序集以及程序集版本策略
  16. CoffeeScript?TypeScript?还是JavaScript
  17. Hadoop守护进程【简】
  18. vijos 1894 二分
  19. C#获取本地磁盘信息
  20. Node.js模块 require和 exports

热门文章

  1. 使用ASP.NET上传多个文件到服务器
  2. mysql 流程函数 存储引擎 InnoDB简单特性
  3. [Swift A] - A Swift Tour
  4. nuget命令的用法:
  5. unix 网络编程 第五章
  6. LDAP编辑器 LDAPAdmin
  7. OFBiz:添加样式【转】
  8. 微信小程序列表加载更多
  9. mysql 如何查看sql语句执行时间
  10. 四种对象生存期和作用域、static 用法总结