e
2025-07-29 c8a3efd67d61bcc47af69227ba0b964198e43db3
Merge branch 'master' of ssh://115.28.86.8:29418/~admin/天开景运_0405_250719
9个文件已添加
420 ■■■■■ 已修改文件
Client/张德虎/log/日志_张德虎_0728 -.doc 补丁 | 查看 | 原始文档 | blame | 历史
Client/程泽坤/log/日志_程泽坤_0728.doc 补丁 | 查看 | 原始文档 | blame | 历史
Client/舒令文/log/日志_舒令文_20250728.doc 补丁 | 查看 | 原始文档 | blame | 历史
Client/雷鹏涛/log/日报_雷鹏涛_0728.doc 补丁 | 查看 | 原始文档 | blame | 历史
Server/付林涛/log/~WRL0001.tmp 补丁 | 查看 | 原始文档 | blame | 历史
Server/付林涛/log/日志_付林涛_0728.doc 补丁 | 查看 | 原始文档 | blame | 历史
Server/刘创世/code/face_predict_use_keras.py 143 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
Server/刘创世/code/keras_train.py 277 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
Server/李晨飞/log/日志_李晨飞_0728.doc 补丁 | 查看 | 原始文档 | blame | 历史
Client/Õŵ»¢/log/ÈÕÖ¾_Õŵ»¢_0728 -.doc
Binary files differ
Client/³ÌÔóÀ¤/log/ÈÕÖ¾_³ÌÔóÀ¤_0728.doc
Binary files differ
Client/ÊæÁîÎÄ/log/ÈÕÖ¾_ÊæÁîÎÄ_20250728.doc
Binary files differ
Client/À×ÅôÌÎ/log/ÈÕ±¨_À×ÅôÌÎ_0728.doc
Binary files differ
Server/¸¶ÁÖÌÎ/log/~WRL0001.tmp
Binary files differ
Server/¸¶ÁÖÌÎ/log/ÈÕÖ¾_¸¶ÁÖÌÎ_0728.doc
Binary files differ
Server/Áõ´´ÊÀ/code/face_predict_use_keras.py
New file
@@ -0,0 +1,143 @@
# -*- coding: utf-8 -*-
#'''文件修改说明
#-1、将手动增加类别改成通过json映射获取
#-2、识别结果和时间结合起来放一个日志里面,另外把识别的图片放同一个文件夹,最大500张
#   é™Œç”Ÿäººå°±ç”¨æ—¶é—´å’Œè¯†åˆ«å¤±è´¥ç»“合起来,图片也放另一个文件夹,最大500张,多的就更新
#’‘’
import cv2
import sys
import gc
import time                       # æ–°å¢žï¼šç”¨äºŽèŽ·å–å½“å‰æ—¶é—´
import os                         # æ–°å¢žï¼šç”¨äºŽæ–‡ä»¶å’Œæ–‡ä»¶å¤¹æ“ä½œ
import json                       #--------------------修改,新增导入
from keras_train import Model
#------------新增-------------
# å®šä¹‰ä¿å­˜å›¾ç‰‡çš„æ–‡ä»¶å¤¹è·¯å¾„
SUCCESS_FOLDER = 'recognized_images'
FAILURE_FOLDER = 'unrecognized_images'
# å®šä¹‰æ—¥å¿—文件路径
LOG_FILE = 'recognition_log.txt'
# åˆ›å»ºä¿å­˜å›¾ç‰‡çš„æ–‡ä»¶å¤¹ï¼ˆå¦‚果不存在)
if not os.path.exists(SUCCESS_FOLDER):
    os.makedirs(SUCCESS_FOLDER)
if not os.path.exists(FAILURE_FOLDER):
    os.makedirs(FAILURE_FOLDER)
#############################
if __name__ == '__main__':
    # if len(sys.argv) != 2:
    #     print("Usage:%s camera_id\r\n" % (0))
    #     sys.exit(0)
    # åŠ è½½æ¨¡åž‹
    model = Model()
    model.load_model(file_path='./model/me.face.model.h5')
    # æ¡†ä½äººè„¸çš„矩形边框颜色
    color = (0, 255, 0)
    # æ•获指定摄像头的实时视频流
    cap = cv2.VideoCapture(0)
    # äººè„¸è¯†åˆ«åˆ†ç±»å™¨æœ¬åœ°å­˜å‚¨è·¯å¾„
    cascade_path = ".\\model\\haarcascade_frontalface_alt2.xml"
    #-------新增---------
    # ä»Ž JSON æ–‡ä»¶ä¸­åŠ è½½ç±»åˆ«æ˜ å°„ï¼Œå¢žåŠ å¼‚å¸¸æ£€æµ‹                                    -------------------修改部分,标注一
    try:
        with open('class_indices.json', 'r', encoding='utf-8') as file:
            class_indices = json.load(file)
        # åè½¬å­—典,以便通过索引查找类别名称
        human = {v: k for k, v in class_indices.items()}
        # æ·»åŠ æœªçŸ¥ç±»åˆ«
        human[-1] = 'others'
    except FileNotFoundError:
        print("错误:未找到 class_indices.json æ–‡ä»¶ï¼Œè¯·å…ˆè¿è¡Œè®­ç»ƒè„šæœ¬ã€‚")
        sys.exit(1)
    # å¾ªçŽ¯æ£€æµ‹è¯†åˆ«äººè„¸
    while True:
        _, frame = cap.read()  # è¯»å–一帧视频
        # å›¾åƒç°åŒ–,降低计算复杂度
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # ä½¿ç”¨äººè„¸è¯†åˆ«åˆ†ç±»å™¨ï¼Œè¯»å…¥åˆ†ç±»å™¨
        cascade = cv2.CascadeClassifier(cascade_path)
        # åˆ©ç”¨åˆ†ç±»å™¨è¯†åˆ«å‡ºå“ªä¸ªåŒºåŸŸä¸ºäººè„¸
        faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
        if len(faceRects) > 0:
            for faceRect in faceRects:
                x, y, w, h = faceRect
                # æˆªå–脸部图像提交给模型识别这是谁
                image = frame[y - 10: y + h + 10, x - 10: x + w + 10]
                # print("image:",image)
                faceID = model.face_predict(image)
                #------------------------新增--------------------------
                # èŽ·å–å½“å‰æ—¶é—´
                current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                if faceID != -1:
                    # è¯†åˆ«æˆåŠŸ
                    person_name = human[faceID]
                    log_message = f"{current_time}: è¯†åˆ«åˆ° {person_name}"
                    save_folder = SUCCESS_FOLDER
                    image_name = f"{person_name}_{current_time.replace(':', '-')}.jpg"
                else:
                    # è¯†åˆ«å¤±è´¥
                    person_name = 'others'
                    log_message = f"{current_time}: è¯†åˆ«å¤±è´¥"
                    save_folder = FAILURE_FOLDER
                    image_name = f"recognition_failure_{current_time.replace(':', '-')}.jpg"
                # è®°å½•日志
                with open(LOG_FILE, 'a', encoding='utf-8') as log_file:
                    log_file.write(log_message + '\n')
                # ä¿å­˜å›¾ç‰‡
                image_path = os.path.join(save_folder, image_name)
                if image.size > 0:
                    cv2.imwrite(image_path, image)
                # æ£€æŸ¥æ–‡ä»¶å¤¹ä¸­å›¾ç‰‡æ•°é‡æ˜¯å¦è¶…过 500 å¼ 
                image_files = [f for f in os.listdir(save_folder) if f.endswith('.jpg')]
                if len(image_files) > 500:
                    # æŒ‰ä¿®æ”¹æ—¶é—´æŽ’序
                    image_files.sort(key=lambda f: os.path.getmtime(os.path.join(save_folder, f)))
                    # åˆ é™¤æœ€æ—§çš„图片
                    os.remove(os.path.join(save_folder, image_files[0]))
                ###########################################
                # å¦‚果是“我”                                                -------------修改成自动的,也就是标注一
                #human = {0:'me',1:'huangzong', 2:'xileizhao', -1:'others',3:'wumu',4:'songyunfei',
                #         5:'wuhuiting',6:'yangyang',7:'wm'}
                cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, thickness=2)
                # æ–‡å­—提示是谁
                cv2.putText(frame, human[faceID],
                            (x + 30, y + 30),  # åæ ‡
                            cv2.FONT_HERSHEY_SIMPLEX,  # å­—体
                            1,  # å­—号
                            (255, 0, 255),  # é¢œè‰²
                            2)  # å­—的线宽
        cv2.imshow("shi bie ren lian", frame)
        # ç­‰å¾…10毫秒看是否有按键输入
        k = cv2.waitKey(10)
        # å¦‚果输入q则退出循环
        if k & 0xFF == ord('q'):
            break
    # é‡Šæ”¾æ‘„像头并销毁所有窗口
    cap.release()
    cv2.destroyAllWindows()
Server/Áõ´´ÊÀ/code/keras_train.py
New file
@@ -0,0 +1,277 @@
#-*- coding: utf-8 -*-
#‘’‘文件修改说明
#-将类型个数从手动改成从json文档映射
import random
import h5py
import numpy as np
import json                                               #-------------新增
# from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split    # ç‰ˆæœ¬æ›´æ–°æ¢åå­—了:换成 model_selection
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import load_model
from keras import backend as K
from load_data import load_dataset, resize_image, IMAGE_SIZE
class Dataset:
    def __init__(self, path_name, class_map_path='./class_indices.json'):  #-----------新增
        # è®­ç»ƒé›†
        self.train_images = None
        self.train_labels = None
        # éªŒè¯é›†
        self.valid_images = None
        self.valid_labels = None
        # æµ‹è¯•集
        self.test_images = None
        self.test_labels = None
        # æ•°æ®é›†åŠ è½½è·¯å¾„
        self.path_name = path_name
        # æ–°å¢žï¼šç±»åˆ«æ˜ å°„文件路径  ---------------------------------------新增
        self.class_map_path = class_map_path
        # å½“前库采用的维度顺序
        self.input_shape = None
    # åŠ è½½æ•°æ®é›†å¹¶æŒ‰ç…§äº¤å‰éªŒè¯çš„åŽŸåˆ™åˆ’åˆ†æ•°æ®é›†å¹¶è¿›è¡Œç›¸å…³é¢„å¤„ç†å·¥ä½œ--------------------修改nb_classes通过动态方式获取
    #def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,
    #         img_channels=3, nb_classes=3):
        # åŠ è½½æ•°æ®é›†åˆ°å†…å­˜
    #    images, labels = load_dataset(self.path_name)
    def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,img_channels=3 ,nb_classes=3):
        # åŠ è½½æ•°æ®é›†åˆ°å†…å­˜
        images, labels = load_dataset(self.path_name,self.class_map_path)
        train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.2,
                                                                                  random_state=random.randint(0, 100))
        _, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5,
                                                          random_state=random.randint(0, 100))
        # å½“前的维度顺序如果为'th',则输入图片数据时的顺序为:channels,rows,cols,否则:rows,cols,channels
        # è¿™éƒ¨åˆ†ä»£ç å°±æ˜¯æ ¹æ®keras库要求的维度顺序重组训练数据集
        # if K.image_dim_ordering() == 'th':
        if K.image_data_format() == "channels_first":
            train_images = train_images.reshape(train_images.shape[0], img_channels, img_rows, img_cols)
            valid_images = valid_images.reshape(valid_images.shape[0], img_channels, img_rows, img_cols)
            test_images = test_images.reshape(test_images.shape[0], img_channels, img_rows, img_cols)
            self.input_shape = (img_channels, img_rows, img_cols)
        else:
            train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels)
            valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels)
            test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels)
            self.input_shape = (img_rows, img_cols, img_channels)
            # è¾“出训练集、验证集、测试集的数量
            print(train_images.shape[0], 'train samples')
            print(valid_images.shape[0], 'valid samples')
            print(test_images.shape[0], 'test samples')
            # æˆ‘们的模型使用categorical_crossentropy作为损失函数,因此需要根据类别数量nb_classes将
            # ç±»åˆ«æ ‡ç­¾è¿›è¡Œone-hot编码使其向量化,在这里我们的类别只有两种,经过转化后标签数据变为二维
            train_labels = np_utils.to_categorical(train_labels, nb_classes)
            valid_labels = np_utils.to_categorical(valid_labels, nb_classes)
            test_labels = np_utils.to_categorical(test_labels, nb_classes)
            # åƒç´ æ•°æ®æµ®ç‚¹åŒ–以便归一化
            train_images = train_images.astype('float32')
            valid_images = valid_images.astype('float32')
            test_images = test_images.astype('float32')
            # å°†å…¶å½’一化,图像的各像素值归一化到0~1区间
            train_images /= 255
            valid_images /= 255
            test_images /= 255
            self.train_images = train_images
            self.valid_images = valid_images
            self.test_images = test_images
            self.train_labels = train_labels
            self.valid_labels = valid_labels
            self.test_labels = test_labels
# CNN网络模型类
class Model:
    def __init__(self):
        self.model = None
#1、增加函数层选择,默认是'relu'
        self.model_def=('relu','')
        # å»ºç«‹æ¨¡åž‹
    def build_model(self, dataset, nb_classes=3):
        # æž„建一个空的网络模型,它是一个线性堆叠模型,各神经网络层会被顺序添加,专业名称为序贯模型或线性堆叠模型
        self.model = Sequential()
        # ä»¥ä¸‹ä»£ç å°†é¡ºåºæ·»åŠ CNN网络需要的各层,一个add就是一个网络层
        # self.model.add(Convolution2D(32, 3, 3, border_mode='same',
        #                              input_shape=dataset.input_shape))  # 1 2维卷积层
        self.model.add(Convolution2D(32, 3, 3, padding='same',
                                     input_shape=dataset.input_shape))  # 1 2维卷积层
        self.model.add(Activation('relu'))  # 2 æ¿€æ´»å‡½æ•°å±‚
        self.model.add(Convolution2D(32, 3, 3))  # 3 2维卷积层
        self.model.add(Activation('relu'))  # 4 æ¿€æ´»å‡½æ•°å±‚
        self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 5 æ± åŒ–层
        self.model.add(Dropout(0.25))  # 6 Dropout层
        self.model.add(Convolution2D(64, 3, 3, padding='same'))  # 7  2维卷积层
        self.model.add(Activation('relu'))  # 8  æ¿€æ´»å‡½æ•°å±‚
        # self.model.add(Convolution2D(64, 3, 3))  # 9  2维卷积层
        self.model.add(Convolution2D(64, 3, 3, padding='same'))  # 7  2维卷积层
        self.model.add(Activation('relu'))  # 10 æ¿€æ´»å‡½æ•°å±‚
        # self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 11 æ± åŒ–层
        self.model.add(MaxPooling2D(pool_size=(2,2),padding='same'))
        self.model.add(Dropout(0.25))  # 12 Dropout层
        self.model.add(Flatten())  # 13 Flatten层
        self.model.add(Dense(512))  # 14 Dense层,又被称作全连接层
        self.model.add(Activation('relu'))  # 15 æ¿€æ´»å‡½æ•°å±‚
        self.model.add(Dropout(0.5))  # 16 Dropout层
        self.model.add(Dense(nb_classes))  # 17 Dense层
        self.model.add(Activation('softmax'))  # 18 åˆ†ç±»å±‚,输出最终结果
        # è¾“出模型概况
        self.model.summary()
        # è®­ç»ƒæ¨¡åž‹
    def train(self, dataset, batch_size=20, nb_epoch=10, data_augmentation=True):
        # å‚æ•°batch_size的作用即在于此,其指定每次迭代训练样本的数量
        # nb_epoch è®­ç»ƒè½®æ¢æ¬¡æ•°
        sgd = SGD(lr=0.01, decay=1e-6,
                  momentum=0.9, nesterov=True)  # é‡‡ç”¨SGD+momentum的优化器进行训练,首先生成一个优化器对象
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=sgd,
                           metrics=['accuracy'])  # å®Œæˆå®žé™…的模型配置工作
        # ä¸ä½¿ç”¨æ•°æ®æå‡ï¼Œæ‰€è°“的提升就是从我们提供的训练数据中利用旋转、翻转、加噪声等方法创造新的
        # è®­ç»ƒæ•°æ®ï¼Œæœ‰æ„è¯†çš„æå‡è®­ç»ƒæ•°æ®è§„模,增加模型训练量
        if not data_augmentation:
            self.model.fit(dataset.train_images,
                           dataset.train_labels,
                           batch_size=batch_size,
                           nb_epoch=nb_epoch,
                           validation_data=(dataset.valid_images, dataset.valid_labels),
                           shuffle=True)
        # ä½¿ç”¨å®žæ—¶æ•°æ®æå‡
        else:
            # å®šä¹‰æ•°æ®ç”Ÿæˆå™¨ç”¨äºŽæ•°æ®æå‡ï¼Œå…¶è¿”回一个生成器对象datagen,datagen每被调用一
            # æ¬¡å…¶ç”Ÿæˆä¸€ç»„数据(顺序生成),节省内存,其实就是python的数据生成器
            datagen = ImageDataGenerator(
                featurewise_center=False,  # æ˜¯å¦ä½¿è¾“入数据去中心化(均值为0),
                samplewise_center=False,  # æ˜¯å¦ä½¿è¾“入数据的每个样本均值为0
                featurewise_std_normalization=False,  # æ˜¯å¦æ•°æ®æ ‡å‡†åŒ–(输入数据除以数据集的标准差)
                samplewise_std_normalization=False,  # æ˜¯å¦å°†æ¯ä¸ªæ ·æœ¬æ•°æ®é™¤ä»¥è‡ªèº«çš„æ ‡å‡†å·®
                zca_whitening=False,  # æ˜¯å¦å¯¹è¾“入数据施以ZCA白化
                rotation_range=20,  # æ•°æ®æå‡æ—¶å›¾ç‰‡éšæœºè½¬åŠ¨çš„è§’åº¦(范围为0~180)
                width_shift_range=0.2,  # æ•°æ®æå‡æ—¶å›¾ç‰‡æ°´å¹³åç§»çš„幅度(单位为图片宽度的占比,0~1之间的浮点数)
                height_shift_range=0.2,  # åŒä¸Šï¼Œåªä¸è¿‡è¿™é‡Œæ˜¯åž‚ç›´
                horizontal_flip=True,  # æ˜¯å¦è¿›è¡Œéšæœºæ°´å¹³ç¿»è½¬
                vertical_flip=False)  # æ˜¯å¦è¿›è¡Œéšæœºåž‚直翻转
            # è®¡ç®—整个训练样本集的数量以用于特征值归一化、ZCA白化等处理
            datagen.fit(dataset.train_images)
            # åˆ©ç”¨ç”Ÿæˆå™¨å¼€å§‹è®­ç»ƒæ¨¡åž‹
            # self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
            #                                       batch_size=batch_size),
            #                          samples_per_epoch=dataset.train_images.shape[0],
            #                          nb_epoch=nb_epoch,
            #                          validation_data=(dataset.valid_images, dataset.valid_labels))
            # self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
            #                                       batch_size=batch_size),
            #                          steps_per_epoch =dataset.train_images.shape[0],
            #                          epochs=nb_epoch,
            #                          validation_data=(dataset.valid_images, dataset.valid_labels))
            self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
                                                  batch_size=batch_size),
                                     steps_per_epoch=float(len(dataset.train_images)/batch_size),
                                     epochs=nb_epoch,
                                     validation_data=(dataset.valid_images, dataset.valid_labels))
    MODEL_PATH = './me.face.model.h5'
    def save_model(self, file_path=MODEL_PATH):
        self.model.save(file_path)
    def load_model(self, file_path=MODEL_PATH):
        self.model = load_model(file_path)
    def evaluate(self, dataset):
        score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose=1)
        print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
    def face_predict(self, image):
        # ä¾ç„¶æ˜¯æ ¹æ®åŽç«¯ç³»ç»Ÿç¡®å®šç»´åº¦é¡ºåº
        # if K.image_dim_ordering() == 'th' and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
        if K.image_data_format() == "channels_first" and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
            image = resize_image(image)  # å°ºå¯¸å¿…须与训练集一致都应该是IMAGE_SIZE x IMAGE_SIZE
            image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE))  # ä¸Žæ¨¡åž‹è®­ç»ƒä¸åŒï¼Œè¿™æ¬¡åªæ˜¯é’ˆå¯¹1张图片进行预测
        # elif K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
        elif K.image_data_format() == "channels_last" and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
            image = resize_image(image)
            image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))
        # æ–°åŠ çš„å¤„ç†
        # image = resize_image(image)
        # image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))
            # æµ®ç‚¹å¹¶å½’一化
        image = image.astype('float32')
        image /= 255
        # ç»™å‡ºè¾“入属于各个类别的概率,我们是二值类别,则该函数会给出输入图像属于0和1的概率各为多少
        # result = self.model.predict_proba(image)
        # print("image===",image)
        predict_x = self.model.predict(image)
        print("predict_x:",predict_x)
        result = np.argmax(predict_x,axis=1)
        print('result:', result)
        # my_result = list(result[0]).index(max(result[0]))
        # max_result = max(result[0])
        my_result = result[0]
        max_result = predict_x[0][result[0]]
        print("result最大值下标:", my_result,max_result)
        if max_result>0.90:
            return my_result
        else:
            return -1
        # ç»™å‡ºç±»åˆ«é¢„测:0或者1
        # result = self.model.predict_classes(image)
if __name__ == '__main__':
    dataset = Dataset('.\\deep_learning')
    #dataset.load(nb_classes=8)           ----------------------更换自动的->
    ##########################更换->
    with open('class_indices.json', 'r', encoding='utf-8') as file:
        data = json.load(file)
    dataset.load(nb_classes=len(data))   #----------------------修改成自动识别类的个数
    ##########################更换<-
    # è®­ç»ƒæ¨¡åž‹
    model = Model()
    model.build_model(dataset,nb_classes=len(data))  #---------------修改成自动识别类的个数
    model.train(dataset)
    model.save_model(file_path='./model/me.face.model.h5')
    # è¯„估模型,确认模型的精度是否能达到要求
    model = Model()
    model.load_model(file_path='./model/me.face.model.h5')
    model.evaluate(dataset)
Server/À·É/log/ÈÕÖ¾_À·É_0728.doc
Binary files differ