python人脸识别基于mtcnn和facenet考勤

基于人脸识别的课堂考勤系统

 

课堂考勤是保证学生出勤率的重要手段之一,也是学生课程成绩重要的组成部分,课堂考勤可以很好的监督学生,从而确保了课堂的教学质量。目前主要的考勤手段仍然是教师人工点名或者随机抽查的方式,这种人工点名的方式不仅占用上课时间而且无法解决学生的早退、代签和旷课等现象,无法很好的对学生考勤做到监管.市面上也存在着各种各样的考勤机,但是由于硬件和部署的成本较高,无法很好的解决考勤问题。

为了解决上述的问题,本文采用人脸识别技术,设计实现了一个适合日常课堂考勤的自动化人脸识别考勤系统,主要研究了该系统的设计与功能的实现。阐述了人脸识别技术的基本概念、发展阶段和基本方法,分析了传统考勤方式的不足,同时也介绍了国内外相关技术的发展历史和研究成果。接着详细介绍了活体检测、人脸检测及人脸识别的主要方法。然后根据考勤系统在功能和性能上的各种需求,进行软件架构的设计。最终整体介绍系统的功能以及测试内容,并展示了相应的GUI界面。

利用python实现人脸识别技术是基于mtcnn和facenet考勤

实现了人脸大角度识别

 

 

 

import cv2
import os
import numpy as np
from net.mtcnn import mtcnn
import utils.utils as utils
from net.inception import InceptionResNetV1
import time
from dateutil.parser import parse


font = cv2.FONT_HERSHEY_COMPLEX

def seekopp(list_2,i):
    for j in range(i-1,-1,-1):
        if list_2[j]==0:
            continue 
        elif list_2[j]>0:
            return 1
        elif list_2[j]<0:
            return 0

def local_maximum(list_1):
    a=len(list_1)
    if a==0:
        return 'error'
    if a==1:
        return list_1
    if a==2:
        if list_1[0]>list_1[1]:
            return list_1[0]
        elif list_1[0]2:
        list_2=[]
        index_1=[]
        for i in range(0,a-1):
            list_2.append(list_1[i+1]-list_1[i])
        b=len(list_2)
        if list_2[0]<0:
            index_1.append(0)
        for i in range(0,b-1):
            if list_2[i+1]<0:
                if list_2[i]>0:
                    index_1.append(i+1)
                elif list_2[i]==0:
                    if seekopp(list_2,i):
                        index_1.append(i+1)
                else:
                    continue 
            else:
                continue 
        list_3=[]
        for i in index_1:
            list_3.append(list_1[i])
        return list_3

def transform(t1,t2):
    timeArray1 = time.localtime(t1)
    timeArray2 = time.localtime(t2)
    time_1 = time.strftime("%Y-%m-%d %H:%M:%S", timeArray1)
    time_2 = time.strftime("%Y-%m-%d %H:%M:%S", timeArray2)
    date1 = parse(time_1)
    date2 = parse(time_2)
    result = int((date2 - date1).total_seconds())
    return result


class face_rec():
    time = 0
    def __init__(self):
        # 创建mtcnn对象
        # 检测图片中的人脸
        self.mtcnn_model = mtcnn()
        # 门限函数
        self.threshold = [0.5,0.8,0.9]
        # 载入facenet
        # 将检测到的人脸转化为128维的向量
        self.facenet_model = InceptionResNetV1()
        # model.summary()
        model_path = './model_data/facenet_keras.h5'
        self.facenet_model.load_weights(model_path)

        #-----------------------------------------------#
        #   对数据库中的人脸进行编码
        #   known_face_encodings中存储的是编码后的人脸
        #   known_face_names为人脸的名字
        #-----------------------------------------------#
        face_list = os.listdir("face_dataset")
        self.known_face_encodings=[]
        self.known_face_names=[]
        self.time_list=[]
        self_learn_time = 0
        self.total_time = []

        for face in face_list:
            name = face.split(".")[0]

            img = cv2.imread("./face_dataset/"+face)
            img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)

            # 检测人脸
            rectangles = self.mtcnn_model.detectFace(img, self.threshold)

            # 转化成正方形
            rectangles = utils.rect2square(np.array(rectangles))
            # facenet要传入一个160x160的图片
            rectangle = rectangles[0]
            # 记下他们的landmark
            landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*160

            crop_img = img[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img,(160,160))

            new_img,_ = utils.Alignment_1(crop_img,landmark)

            new_img = np.expand_dims(new_img,0)
            # 将检测到的人脸传入到facenet的模型中,实现128维特征向量的提取
            face_encoding = utils.calc_128_vec(self.facenet_model,new_img)

            self.known_face_encodings.append(face_encoding)
            self.known_face_names.append(name)

    def recognize(self,draw):
        #-----------------------------------------------#
        #   人脸识别
        #   先定位,再进行数据库匹配
        #-----------------------------------------------#
        height,width,_ = np.shape(draw)
        draw_rgb = cv2.cvtColor(draw,cv2.COLOR_BGR2RGB)
        # 检测人脸
        rectangles = self.mtcnn_model.detectFace(draw_rgb, self.threshold)
        '''
        if len(rectangles)==0:
            font = cv2.FONT_HERSHEY_SIMPLEX
            if time_flag==0:
                print('首次执行!')
                first_time = time.time()
                t = transform(int(first_time),int(time.time()))
                cv2.putText(draw, "Focus on time:"+str(t)+" s", (250, 40), font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
                time_flag==1
                return
            else:
                print('第二次执行!')
                t = transform(int(first_time),int(time.time()))
                cv2.putText(draw, "Focus on time:"+str(t)+" s", (250, 40), font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
                return
        '''
        if len(rectangles)==0:
            self.time_list.append(time.time())
            font = cv2.FONT_HERSHEY_SIMPLEX
            t = transform(self.time_list[0],self.time_list[-1])
            #print(t,type(t))
            cv2.putText(draw, "Noface", (40, 40), font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
            cv2.putText(draw, "Focus on time:"+str(t)+" s", (350, 40), font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
            #print(self.total_time,type(self.total_time))
            if len(self.total_time)==0:
                self.total_time.append(t)
            elif self.total_time[-1]<= t:
                 self.total_time.pop()
                 self.total_time.append(t)
            elif self.total_time[-1]>= t:
                self.total_time.append(t)
            else:
                pass
            return draw,self.total_time
        self.time_list.clear()
        # 转化成正方形
        rectangles = utils.rect2square(np.array(rectangles,dtype=np.int32))
        rectangles[:,0] = np.clip(rectangles[:,0],0,width)
        rectangles[:,1] = np.clip(rectangles[:,1],0,height)
        rectangles[:,2] = np.clip(rectangles[:,2],0,width)
        rectangles[:,3] = np.clip(rectangles[:,3],0,height)
        #-----------------------------------------------#
        #   对检测到的人脸进行编码
        #-----------------------------------------------#
        face_encodings = []
        for rectangle in rectangles:
            landmark = (np.reshape(rectangle[5:15],(5,2)) - np.array([int(rectangle[0]),int(rectangle[1])]))/(rectangle[3]-rectangle[1])*160

            crop_img = draw_rgb[int(rectangle[1]):int(rectangle[3]), int(rectangle[0]):int(rectangle[2])]
            crop_img = cv2.resize(crop_img,(160,160))

            new_img,_ = utils.Alignment_1(crop_img,landmark)
            new_img = np.expand_dims(new_img,0)

            face_encoding = utils.calc_128_vec(self.facenet_model,new_img)
            face_encodings.append(face_encoding)

        face_names = []
        for face_encoding in face_encodings:
            # 取出一张脸并与数据库中所有的人脸进行对比,计算得分
            matches = utils.compare_faces(self.known_face_encodings, face_encoding, tolerance = 0.9)
            name = "Unknown"
            # 找出距离最近的人脸
            face_distances = utils.face_distance(self.known_face_encodings, face_encoding)
            # 取出这个最近人脸的评分
            best_match_index = np.argmin(face_distances)
            if matches[best_match_index]:
                name = self.known_face_names[best_match_index]
            face_names.append(name)

        rectangles = rectangles[:,0:4]
        #-----------------------------------------------#
        #   画框~!~
        #-----------------------------------------------#
        for (left, top, right, bottom), name in zip(rectangles, face_names):
            cv2.rectangle(draw, (left, top), (right, bottom), (0, 0, 255), 2)
            
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(draw, name, (left , bottom - 15), font, 0.75, (255, 255, 255), 2) 
  
        if name!='Unknown':
            cv2.putText(draw, 'name:'+name, (10, 20), font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
            cv2.putText(draw, "state:"+"clock on", (10, 60), font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
            cv2.putText(draw, "Please concentrate on your study!", (200, 40), font, 0.6, (0, 0, 255), 1, cv2.LINE_AA)
        else:
            cv2.putText(draw, 'name:'+name, (10, 20), font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
            cv2.putText(draw, "state:"+"clock out failed", (10, 60), font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
            cv2.putText(draw, "Please add face!", (300, 40), font, 0.6, (0, 0, 255), 1, cv2.LINE_AA)
        
        return draw,self.total_time

if __name__ == "__main__":

    dududu = face_rec()
    video_capture = cv2.VideoCapture(0)

    while True:
        ret, draw = video_capture.read()
        dududu.recognize(draw) 
        cv2.imshow('Video', draw)
        if cv2.waitKey(20) & 0xFF == ord('q'):
            break

    video_capture.release()
    cv2.destroyAllWindows()

 

欢迎分享,转载请注明来源:内存溢出

原文地址:https://www.54852.com/langs/797279.html

(0)
打赏 微信扫一扫微信扫一扫 支付宝扫一扫支付宝扫一扫
上一篇 2022-05-06
下一篇2022-05-06

发表评论

登录后才能评论

评论列表(0条)

    保存