From bbf2ff2b0990349f142fed7fc8834eb40509024d Mon Sep 17 00:00:00 2001
From: zdh <2915287352@qq.com>
Date: 星期二, 29 七月 2025 09:16:41 +0800
Subject: [PATCH] Merge branch 'master' of ssh://115.28.86.8:29418/~admin/天开景运_0405_250719

---
 Server/刘创世/code/face_predict_use_keras.py |  143 ++++++++++++++++++++
 Server/刘创世/code/keras_train.py            |  277 +++++++++++++++++++++++++++++++++++++++
 Server/李晨飞/log/日志_李晨飞_0728.doc            |    0 
 3 files changed, 420 insertions(+), 0 deletions(-)

diff --git "a/Server/\345\210\230\345\210\233\344\270\226/code/face_predict_use_keras.py" "b/Server/\345\210\230\345\210\233\344\270\226/code/face_predict_use_keras.py"
new file mode 100644
index 0000000..da0e9de
--- /dev/null
+++ "b/Server/\345\210\230\345\210\233\344\270\226/code/face_predict_use_keras.py"
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+#'''鏂囦欢淇敼璇存槑
+#-1銆佸皢鎵嬪姩澧炲姞绫诲埆鏀规垚閫氳繃json鏄犲皠鑾峰彇
+#-2銆佽瘑鍒粨鏋滃拰鏃堕棿缁撳悎璧锋潵鏀句竴涓棩蹇楅噷闈紝鍙﹀鎶婅瘑鍒殑鍥剧墖鏀惧悓涓�涓枃浠跺す锛屾渶澶�500寮�
+#   闄岀敓浜哄氨鐢ㄦ椂闂村拰璇嗗埆澶辫触缁撳悎璧锋潵锛屽浘鐗囦篃鏀惧彟涓�涓枃浠跺す锛屾渶澶�500寮狅紝澶氱殑灏辨洿鏂�
+#鈥欌�樷��
+
+
+import cv2
+import sys
+import gc
+import time                       # 鏂板锛氱敤浜庤幏鍙栧綋鍓嶆椂闂�
+import os                         # 鏂板锛氱敤浜庢枃浠跺拰鏂囦欢澶规搷浣�
+import json                       #--------------------淇敼锛屾柊澧炲鍏�
+from keras_train import Model
+#------------鏂板-------------
+# 瀹氫箟淇濆瓨鍥剧墖鐨勬枃浠跺す璺緞
+SUCCESS_FOLDER = 'recognized_images'
+FAILURE_FOLDER = 'unrecognized_images'
+# 瀹氫箟鏃ュ織鏂囦欢璺緞
+LOG_FILE = 'recognition_log.txt'
+
+# 鍒涘缓淇濆瓨鍥剧墖鐨勬枃浠跺す锛堝鏋滀笉瀛樺湪锛�
+if not os.path.exists(SUCCESS_FOLDER):
+    os.makedirs(SUCCESS_FOLDER)
+if not os.path.exists(FAILURE_FOLDER):
+    os.makedirs(FAILURE_FOLDER)
+#############################
+
+if __name__ == '__main__':
+    # if len(sys.argv) != 2:
+    #     print("Usage:%s camera_id\r\n" % (0))
+    #     sys.exit(0)
+
+    # 鍔犺浇妯″瀷
+    model = Model()
+    model.load_model(file_path='./model/me.face.model.h5')
+
+    # 妗嗕綇浜鸿劯鐨勭煩褰㈣竟妗嗛鑹�
+    color = (0, 255, 0)
+
+    # 鎹曡幏鎸囧畾鎽勫儚澶寸殑瀹炴椂瑙嗛娴�
+    cap = cv2.VideoCapture(0)
+
+    # 浜鸿劯璇嗗埆鍒嗙被鍣ㄦ湰鍦板瓨鍌ㄨ矾寰�
+    cascade_path = ".\\model\\haarcascade_frontalface_alt2.xml"
+
+    #-------鏂板---------
+    # 浠� JSON 鏂囦欢涓姞杞界被鍒槧灏勶紝澧炲姞寮傚父妫�娴�                                    -------------------淇敼閮ㄥ垎锛屾爣娉ㄤ竴
+    try:
+        with open('class_indices.json', 'r', encoding='utf-8') as file:
+            class_indices = json.load(file)
+        # 鍙嶈浆瀛楀吀锛屼互渚块�氳繃绱㈠紩鏌ユ壘绫诲埆鍚嶇О
+        human = {v: k for k, v in class_indices.items()}
+        # 娣诲姞鏈煡绫诲埆
+        human[-1] = 'others'
+    except FileNotFoundError:
+        print("閿欒锛氭湭鎵惧埌 class_indices.json 鏂囦欢锛岃鍏堣繍琛岃缁冭剼鏈��")
+        sys.exit(1)
+
+    # 寰幆妫�娴嬭瘑鍒汉鑴�
+    while True:
+        _, frame = cap.read()  # 璇诲彇涓�甯ц棰�
+
+        # 鍥惧儚鐏板寲锛岄檷浣庤绠楀鏉傚害
+        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+
+        # 浣跨敤浜鸿劯璇嗗埆鍒嗙被鍣紝璇诲叆鍒嗙被鍣�
+        cascade = cv2.CascadeClassifier(cascade_path)
+
+        # 鍒╃敤鍒嗙被鍣ㄨ瘑鍒嚭鍝釜鍖哄煙涓轰汉鑴�
+        faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
+        if len(faceRects) > 0:
+            for faceRect in faceRects:
+                x, y, w, h = faceRect
+
+                # 鎴彇鑴搁儴鍥惧儚鎻愪氦缁欐ā鍨嬭瘑鍒繖鏄皝
+                image = frame[y - 10: y + h + 10, x - 10: x + w + 10]
+                # print("image:",image)
+                faceID = model.face_predict(image)
+
+                #------------------------鏂板--------------------------
+                # 鑾峰彇褰撳墠鏃堕棿
+                current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
+
+                if faceID != -1:
+                    # 璇嗗埆鎴愬姛
+                    person_name = human[faceID]
+                    log_message = f"{current_time}: 璇嗗埆鍒� {person_name}"
+                    save_folder = SUCCESS_FOLDER
+                    image_name = f"{person_name}_{current_time.replace(':', '-')}.jpg"
+                else:
+                    # 璇嗗埆澶辫触
+                    person_name = 'others'
+                    log_message = f"{current_time}: 璇嗗埆澶辫触"
+                    save_folder = FAILURE_FOLDER
+                    image_name = f"recognition_failure_{current_time.replace(':', '-')}.jpg"
+
+                # 璁板綍鏃ュ織
+                with open(LOG_FILE, 'a', encoding='utf-8') as log_file:
+                    log_file.write(log_message + '\n')
+
+                # 淇濆瓨鍥剧墖
+                image_path = os.path.join(save_folder, image_name)
+                if image.size > 0:
+                    cv2.imwrite(image_path, image)
+
+                # 妫�鏌ユ枃浠跺す涓浘鐗囨暟閲忔槸鍚﹁秴杩� 500 寮�
+                image_files = [f for f in os.listdir(save_folder) if f.endswith('.jpg')]
+                if len(image_files) > 500:
+                    # 鎸変慨鏀规椂闂存帓搴�
+                    image_files.sort(key=lambda f: os.path.getmtime(os.path.join(save_folder, f)))
+                    # 鍒犻櫎鏈�鏃х殑鍥剧墖
+                    os.remove(os.path.join(save_folder, image_files[0]))
+                ###########################################
+
+                # 濡傛灉鏄�滄垜鈥�                                                -------------淇敼鎴愯嚜鍔ㄧ殑锛屼篃灏辨槸鏍囨敞涓�
+                #human = {0:'me',1:'huangzong', 2:'xileizhao', -1:'others',3:'wumu',4:'songyunfei',
+                #         5:'wuhuiting',6:'yangyang',7:'wm'}
+
+
+                cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, thickness=2)
+
+                # 鏂囧瓧鎻愮ず鏄皝
+                cv2.putText(frame, human[faceID],
+                            (x + 30, y + 30),  # 鍧愭爣
+                            cv2.FONT_HERSHEY_SIMPLEX,  # 瀛椾綋
+                            1,  # 瀛楀彿
+                            (255, 0, 255),  # 棰滆壊
+                            2)  # 瀛楃殑绾垮
+
+
+        cv2.imshow("shi bie ren lian", frame)
+
+        # 绛夊緟10姣鐪嬫槸鍚︽湁鎸夐敭杈撳叆
+        k = cv2.waitKey(10)
+        # 濡傛灉杈撳叆q鍒欓��鍑哄惊鐜�
+        if k & 0xFF == ord('q'):
+            break
+
+    # 閲婃斁鎽勫儚澶村苟閿�姣佹墍鏈夌獥鍙�
+    cap.release()
+    cv2.destroyAllWindows()
\ No newline at end of file
diff --git "a/Server/\345\210\230\345\210\233\344\270\226/code/keras_train.py" "b/Server/\345\210\230\345\210\233\344\270\226/code/keras_train.py"
new file mode 100644
index 0000000..9d54f2f
--- /dev/null
+++ "b/Server/\345\210\230\345\210\233\344\270\226/code/keras_train.py"
@@ -0,0 +1,277 @@
+#-*- coding: utf-8 -*-
+#鈥樷�欌�樻枃浠朵慨鏀硅鏄�
+#-灏嗙被鍨嬩釜鏁颁粠鎵嬪姩鏀规垚浠巎son鏂囨。鏄犲皠
+import random
+import h5py
+import numpy as np
+import json                                               #-------------鏂板
+# from sklearn.cross_validation import train_test_split
+from sklearn.model_selection import train_test_split    # 鐗堟湰鏇存柊鎹㈠悕瀛椾簡锛氭崲鎴� model_selection
+from keras.preprocessing.image import ImageDataGenerator
+from keras.models import Sequential
+from keras.layers import Dense, Dropout, Activation, Flatten
+from keras.layers import Convolution2D, MaxPooling2D
+from keras.optimizers import SGD
+from keras.utils import np_utils
+from keras.models import load_model
+from keras import backend as K
+from load_data import load_dataset, resize_image, IMAGE_SIZE
+
+
+class Dataset:
+    def __init__(self, path_name, class_map_path='./class_indices.json'):  #-----------鏂板
+        # 璁粌闆�
+        self.train_images = None
+        self.train_labels = None
+
+        # 楠岃瘉闆�
+        self.valid_images = None
+        self.valid_labels = None
+
+        # 娴嬭瘯闆�
+        self.test_images = None
+        self.test_labels = None
+
+        # 鏁版嵁闆嗗姞杞借矾寰�
+        self.path_name = path_name
+
+        # 鏂板锛氱被鍒槧灏勬枃浠惰矾寰�  ---------------------------------------鏂板
+        self.class_map_path = class_map_path
+
+        # 褰撳墠搴撻噰鐢ㄧ殑缁村害椤哄簭
+        self.input_shape = None
+
+    # 鍔犺浇鏁版嵁闆嗗苟鎸夌収浜ゅ弶楠岃瘉鐨勫師鍒欏垝鍒嗘暟鎹泦骞惰繘琛岀浉鍏抽澶勭悊宸ヤ綔--------------------淇敼nb_classes閫氳繃鍔ㄦ�佹柟寮忚幏鍙�
+    #def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,
+    #         img_channels=3, nb_classes=3):
+        # 鍔犺浇鏁版嵁闆嗗埌鍐呭瓨
+    #    images, labels = load_dataset(self.path_name)
+
+    def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,img_channels=3 ,nb_classes=3):
+        # 鍔犺浇鏁版嵁闆嗗埌鍐呭瓨
+        images, labels = load_dataset(self.path_name,self.class_map_path)
+
+
+
+        train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.2,
+                                                                                  random_state=random.randint(0, 100))
+        _, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5,
+                                                          random_state=random.randint(0, 100))
+
+        # 褰撳墠鐨勭淮搴﹂『搴忓鏋滀负'th'锛屽垯杈撳叆鍥剧墖鏁版嵁鏃剁殑椤哄簭涓猴細channels,rows,cols锛屽惁鍒�:rows,cols,channels
+        # 杩欓儴鍒嗕唬鐮佸氨鏄牴鎹甼eras搴撹姹傜殑缁村害椤哄簭閲嶇粍璁粌鏁版嵁闆�
+        # if K.image_dim_ordering() == 'th':
+        if K.image_data_format() == "channels_first":
+            train_images = train_images.reshape(train_images.shape[0], img_channels, img_rows, img_cols)
+            valid_images = valid_images.reshape(valid_images.shape[0], img_channels, img_rows, img_cols)
+            test_images = test_images.reshape(test_images.shape[0], img_channels, img_rows, img_cols)
+            self.input_shape = (img_channels, img_rows, img_cols)
+        else:
+            train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels)
+            valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels)
+            test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels)
+            self.input_shape = (img_rows, img_cols, img_channels)
+
+            # 杈撳嚭璁粌闆嗐�侀獙璇侀泦銆佹祴璇曢泦鐨勬暟閲�
+            print(train_images.shape[0], 'train samples')
+            print(valid_images.shape[0], 'valid samples')
+            print(test_images.shape[0], 'test samples')
+
+            # 鎴戜滑鐨勬ā鍨嬩娇鐢╟ategorical_crossentropy浣滀负鎹熷け鍑芥暟锛屽洜姝ら渶瑕佹牴鎹被鍒暟閲弉b_classes灏�
+            # 绫诲埆鏍囩杩涜one-hot缂栫爜浣垮叾鍚戦噺鍖栵紝鍦ㄨ繖閲屾垜浠殑绫诲埆鍙湁涓ょ锛岀粡杩囪浆鍖栧悗鏍囩鏁版嵁鍙樹负浜岀淮
+            train_labels = np_utils.to_categorical(train_labels, nb_classes)
+            valid_labels = np_utils.to_categorical(valid_labels, nb_classes)
+            test_labels = np_utils.to_categorical(test_labels, nb_classes)
+
+            # 鍍忕礌鏁版嵁娴偣鍖栦互渚垮綊涓�鍖�
+            train_images = train_images.astype('float32')
+            valid_images = valid_images.astype('float32')
+            test_images = test_images.astype('float32')
+
+            # 灏嗗叾褰掍竴鍖�,鍥惧儚鐨勫悇鍍忕礌鍊煎綊涓�鍖栧埌0~1鍖洪棿
+            train_images /= 255
+            valid_images /= 255
+            test_images /= 255
+
+            self.train_images = train_images
+            self.valid_images = valid_images
+            self.test_images = test_images
+            self.train_labels = train_labels
+            self.valid_labels = valid_labels
+            self.test_labels = test_labels
+
+
+# CNN缃戠粶妯″瀷绫�
+class Model:
+    def __init__(self):
+        self.model = None
+#1銆佸鍔犲嚱鏁板眰閫夋嫨,榛樿鏄�'relu'
+        self.model_def=('relu','')
+        # 寤虹珛妯″瀷
+
+    def build_model(self, dataset, nb_classes=3):
+        # 鏋勫缓涓�涓┖鐨勭綉缁滄ā鍨嬶紝瀹冩槸涓�涓嚎鎬у爢鍙犳ā鍨嬶紝鍚勭缁忕綉缁滃眰浼氳椤哄簭娣诲姞锛屼笓涓氬悕绉颁负搴忚疮妯″瀷鎴栫嚎鎬у爢鍙犳ā鍨�
+        self.model = Sequential()
+
+        # 浠ヤ笅浠g爜灏嗛『搴忔坊鍔燙NN缃戠粶闇�瑕佺殑鍚勫眰锛屼竴涓猘dd灏辨槸涓�涓綉缁滃眰
+        # self.model.add(Convolution2D(32, 3, 3, border_mode='same',
+        #                              input_shape=dataset.input_shape))  # 1 2缁村嵎绉眰
+        self.model.add(Convolution2D(32, 3, 3, padding='same',
+                                     input_shape=dataset.input_shape))  # 1 2缁村嵎绉眰
+        self.model.add(Activation('relu'))  # 2 婵�娲诲嚱鏁板眰
+
+        self.model.add(Convolution2D(32, 3, 3))  # 3 2缁村嵎绉眰
+        self.model.add(Activation('relu'))  # 4 婵�娲诲嚱鏁板眰
+
+        self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 5 姹犲寲灞�
+        self.model.add(Dropout(0.25))  # 6 Dropout灞�
+
+        self.model.add(Convolution2D(64, 3, 3, padding='same'))  # 7  2缁村嵎绉眰
+        self.model.add(Activation('relu'))  # 8  婵�娲诲嚱鏁板眰
+
+        # self.model.add(Convolution2D(64, 3, 3))  # 9  2缁村嵎绉眰
+        self.model.add(Convolution2D(64, 3, 3, padding='same'))  # 7  2缁村嵎绉眰
+        self.model.add(Activation('relu'))  # 10 婵�娲诲嚱鏁板眰
+
+        # self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 11 姹犲寲灞�
+        self.model.add(MaxPooling2D(pool_size=(2,2),padding='same'))
+        self.model.add(Dropout(0.25))  # 12 Dropout灞�
+
+        self.model.add(Flatten())  # 13 Flatten灞�
+        self.model.add(Dense(512))  # 14 Dense灞�,鍙堣绉颁綔鍏ㄨ繛鎺ュ眰
+        self.model.add(Activation('relu'))  # 15 婵�娲诲嚱鏁板眰
+        self.model.add(Dropout(0.5))  # 16 Dropout灞�
+        self.model.add(Dense(nb_classes))  # 17 Dense灞�
+        self.model.add(Activation('softmax'))  # 18 鍒嗙被灞傦紝杈撳嚭鏈�缁堢粨鏋�
+
+        # 杈撳嚭妯″瀷姒傚喌
+        self.model.summary()
+
+        # 璁粌妯″瀷
+    def train(self, dataset, batch_size=20, nb_epoch=10, data_augmentation=True):
+        # 鍙傛暟batch_size鐨勪綔鐢ㄥ嵆鍦ㄤ簬姝わ紝鍏舵寚瀹氭瘡娆¤凯浠h缁冩牱鏈殑鏁伴噺
+        # nb_epoch 璁粌杞崲娆℃暟
+        sgd = SGD(lr=0.01, decay=1e-6,
+                  momentum=0.9, nesterov=True)  # 閲囩敤SGD+momentum鐨勪紭鍖栧櫒杩涜璁粌锛岄鍏堢敓鎴愪竴涓紭鍖栧櫒瀵硅薄
+        self.model.compile(loss='categorical_crossentropy',
+                           optimizer=sgd,
+                           metrics=['accuracy'])  # 瀹屾垚瀹為檯鐨勬ā鍨嬮厤缃伐浣�
+
+        # 涓嶄娇鐢ㄦ暟鎹彁鍗囷紝鎵�璋撶殑鎻愬崌灏辨槸浠庢垜浠彁渚涚殑璁粌鏁版嵁涓埄鐢ㄦ棆杞�佺炕杞�佸姞鍣0绛夋柟娉曞垱閫犳柊鐨�
+        # 璁粌鏁版嵁锛屾湁鎰忚瘑鐨勬彁鍗囪缁冩暟鎹妯★紝澧炲姞妯″瀷璁粌閲�
+        if not data_augmentation:
+            self.model.fit(dataset.train_images,
+                           dataset.train_labels,
+                           batch_size=batch_size,
+                           nb_epoch=nb_epoch,
+                           validation_data=(dataset.valid_images, dataset.valid_labels),
+                           shuffle=True)
+        # 浣跨敤瀹炴椂鏁版嵁鎻愬崌
+        else:
+            # 瀹氫箟鏁版嵁鐢熸垚鍣ㄧ敤浜庢暟鎹彁鍗囷紝鍏惰繑鍥炰竴涓敓鎴愬櫒瀵硅薄datagen锛宒atagen姣忚璋冪敤涓�
+            # 娆″叾鐢熸垚涓�缁勬暟鎹紙椤哄簭鐢熸垚锛夛紝鑺傜渷鍐呭瓨锛屽叾瀹炲氨鏄痯ython鐨勬暟鎹敓鎴愬櫒
+            datagen = ImageDataGenerator(
+                featurewise_center=False,  # 鏄惁浣胯緭鍏ユ暟鎹幓涓績鍖栵紙鍧囧�间负0锛夛紝
+                samplewise_center=False,  # 鏄惁浣胯緭鍏ユ暟鎹殑姣忎釜鏍锋湰鍧囧�间负0
+                featurewise_std_normalization=False,  # 鏄惁鏁版嵁鏍囧噯鍖栵紙杈撳叆鏁版嵁闄や互鏁版嵁闆嗙殑鏍囧噯宸級
+                samplewise_std_normalization=False,  # 鏄惁灏嗘瘡涓牱鏈暟鎹櫎浠ヨ嚜韬殑鏍囧噯宸�
+                zca_whitening=False,  # 鏄惁瀵硅緭鍏ユ暟鎹柦浠CA鐧藉寲
+                rotation_range=20,  # 鏁版嵁鎻愬崌鏃跺浘鐗囬殢鏈鸿浆鍔ㄧ殑瑙掑害(鑼冨洿涓�0锝�180)
+                width_shift_range=0.2,  # 鏁版嵁鎻愬崌鏃跺浘鐗囨按骞冲亸绉荤殑骞呭害锛堝崟浣嶄负鍥剧墖瀹藉害鐨勫崰姣旓紝0~1涔嬮棿鐨勬诞鐐规暟锛�
+                height_shift_range=0.2,  # 鍚屼笂锛屽彧涓嶈繃杩欓噷鏄瀭鐩�
+                horizontal_flip=True,  # 鏄惁杩涜闅忔満姘村钩缈昏浆
+                vertical_flip=False)  # 鏄惁杩涜闅忔満鍨傜洿缈昏浆
+
+            # 璁$畻鏁翠釜璁粌鏍锋湰闆嗙殑鏁伴噺浠ョ敤浜庣壒寰佸�煎綊涓�鍖栥�乑CA鐧藉寲绛夊鐞�
+            datagen.fit(dataset.train_images)
+
+            # 鍒╃敤鐢熸垚鍣ㄥ紑濮嬭缁冩ā鍨�
+            # self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
+            #                                       batch_size=batch_size),
+            #                          samples_per_epoch=dataset.train_images.shape[0],
+            #                          nb_epoch=nb_epoch,
+            #                          validation_data=(dataset.valid_images, dataset.valid_labels))
+
+            # self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
+            #                                       batch_size=batch_size),
+            #                          steps_per_epoch =dataset.train_images.shape[0],
+            #                          epochs=nb_epoch,
+            #                          validation_data=(dataset.valid_images, dataset.valid_labels))
+
+            self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
+                                                  batch_size=batch_size),
+                                     steps_per_epoch=float(len(dataset.train_images)/batch_size),
+                                     epochs=nb_epoch,
+                                     validation_data=(dataset.valid_images, dataset.valid_labels))
+
+    MODEL_PATH = './me.face.model.h5'
+
+    def save_model(self, file_path=MODEL_PATH):
+        self.model.save(file_path)
+
+    def load_model(self, file_path=MODEL_PATH):
+        self.model = load_model(file_path)
+
+    def evaluate(self, dataset):
+        score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose=1)
+        print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
+
+    def face_predict(self, image):
+        # 渚濈劧鏄牴鎹悗绔郴缁熺‘瀹氱淮搴﹂『搴�
+        # if K.image_dim_ordering() == 'th' and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
+        if K.image_data_format() == "channels_first" and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
+            image = resize_image(image)  # 灏哄蹇呴』涓庤缁冮泦涓�鑷撮兘搴旇鏄疘MAGE_SIZE x IMAGE_SIZE
+            image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE))  # 涓庢ā鍨嬭缁冧笉鍚岋紝杩欐鍙槸閽堝1寮犲浘鐗囪繘琛岄娴�
+        # elif K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
+        elif K.image_data_format() == "channels_last" and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
+            image = resize_image(image)
+            image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))
+
+        # 鏂板姞鐨勫鐞�
+        # image = resize_image(image)
+        # image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))
+            # 娴偣骞跺綊涓�鍖�
+        image = image.astype('float32')
+        image /= 255
+
+        # 缁欏嚭杈撳叆灞炰簬鍚勪釜绫诲埆鐨勬鐜囷紝鎴戜滑鏄簩鍊肩被鍒紝鍒欒鍑芥暟浼氱粰鍑鸿緭鍏ュ浘鍍忓睘浜�0鍜�1鐨勬鐜囧悇涓哄灏�
+        # result = self.model.predict_proba(image)
+        # print("image===",image)
+        predict_x = self.model.predict(image)
+        print("predict_x:",predict_x)
+        result = np.argmax(predict_x,axis=1)
+        print('result:', result)
+        # my_result = list(result[0]).index(max(result[0]))
+        # max_result = max(result[0])
+        my_result = result[0]
+        max_result = predict_x[0][result[0]]
+        print("result鏈�澶у�间笅鏍�:", my_result,max_result)
+        if max_result>0.90:
+            return my_result
+        else:
+            return -1
+
+        # 缁欏嚭绫诲埆棰勬祴锛�0鎴栬��1
+        # result = self.model.predict_classes(image)
+
+
+
+if __name__ == '__main__':
+    dataset = Dataset('.\\deep_learning')
+
+    #dataset.load(nb_classes=8)           ----------------------鏇存崲鑷姩鐨�->
+    ##########################鏇存崲->
+    with open('class_indices.json', 'r', encoding='utf-8') as file:
+        data = json.load(file)
+    dataset.load(nb_classes=len(data))   #----------------------淇敼鎴愯嚜鍔ㄨ瘑鍒被鐨勪釜鏁�
+    ##########################鏇存崲<-
+
+    # 璁粌妯″瀷
+    model = Model()
+    model.build_model(dataset,nb_classes=len(data))  #---------------淇敼鎴愯嚜鍔ㄨ瘑鍒被鐨勪釜鏁�
+    model.train(dataset)
+    model.save_model(file_path='./model/me.face.model.h5')
+    # 璇勪及妯″瀷锛岀‘璁ゆā鍨嬬殑绮惧害鏄惁鑳借揪鍒拌姹�
+    model = Model()
+    model.load_model(file_path='./model/me.face.model.h5')
+    model.evaluate(dataset)
\ No newline at end of file
diff --git "a/Server/\346\235\216\346\231\250\351\243\236/log/\346\227\245\345\277\227_\346\235\216\346\231\250\351\243\236_0728.doc" "b/Server/\346\235\216\346\231\250\351\243\236/log/\346\227\245\345\277\227_\346\235\216\346\231\250\351\243\236_0728.doc"
new file mode 100644
index 0000000..8fe163a
--- /dev/null
+++ "b/Server/\346\235\216\346\231\250\351\243\236/log/\346\227\245\345\277\227_\346\235\216\346\231\250\351\243\236_0728.doc"
Binary files differ

--
Gitblit v1.8.0