New file |
| | |
| | | #-*- coding: utf-8 -*- |
| | | #âââæä»¶ä¿®æ¹è¯´æ |
| | | #-å°ç±»å个æ°ä»æå¨æ¹æä»jsonææ¡£æ å° |
| | | import random |
| | | import h5py |
| | | import numpy as np |
| | | import json #-------------æ°å¢ |
| | | # from sklearn.cross_validation import train_test_split |
| | | from sklearn.model_selection import train_test_split # çæ¬æ´æ°æ¢ååäºï¼æ¢æ model_selection |
| | | from keras.preprocessing.image import ImageDataGenerator |
| | | from keras.models import Sequential |
| | | from keras.layers import Dense, Dropout, Activation, Flatten |
| | | from keras.layers import Convolution2D, MaxPooling2D |
| | | from keras.optimizers import SGD |
| | | from keras.utils import np_utils |
| | | from keras.models import load_model |
| | | from keras import backend as K |
| | | from load_data import load_dataset, resize_image, IMAGE_SIZE |
| | | |
| | | |
| | | class Dataset: |
| | | def __init__(self, path_name, class_map_path='./class_indices.json'): #-----------æ°å¢ |
| | | # è®ç»é |
| | | self.train_images = None |
| | | self.train_labels = None |
| | | |
| | | # éªè¯é |
| | | self.valid_images = None |
| | | self.valid_labels = None |
| | | |
| | | # æµè¯é |
| | | self.test_images = None |
| | | self.test_labels = None |
| | | |
| | | # æ°æ®éå è½½è·¯å¾ |
| | | self.path_name = path_name |
| | | |
| | | # æ°å¢ï¼ç±»å«æ å°æä»¶è·¯å¾ ---------------------------------------æ°å¢ |
| | | self.class_map_path = class_map_path |
| | | |
| | | # å½ååºéç¨çç»´åº¦é¡ºåº |
| | | self.input_shape = None |
| | | |
| | | # å è½½æ°æ®éå¹¶æç
§äº¤åéªè¯çååååæ°æ®éå¹¶è¿è¡ç¸å
³é¢å¤çå·¥ä½--------------------ä¿®æ¹nb_classeséè¿å¨ææ¹å¼è·å |
| | | #def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE, |
| | | # img_channels=3, nb_classes=3): |
| | | # å è½½æ°æ®éå°å
å |
| | | # images, labels = load_dataset(self.path_name) |
| | | |
| | | def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,img_channels=3 ,nb_classes=3): |
| | | # å è½½æ°æ®éå°å
å |
| | | images, labels = load_dataset(self.path_name,self.class_map_path) |
| | | |
| | | |
| | | |
| | | train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.2, |
| | | random_state=random.randint(0, 100)) |
| | | _, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5, |
| | | random_state=random.randint(0, 100)) |
| | | |
| | | # å½åç维度顺åºå¦æä¸º'th'ï¼åè¾å
¥å¾çæ°æ®æ¶ç顺åºä¸ºï¼channels,rows,colsï¼å¦å:rows,cols,channels |
| | | # è¿é¨å代ç å°±æ¯æ ¹æ®kerasåºè¦æ±ç维度顺åºéç»è®ç»æ°æ®é |
| | | # if K.image_dim_ordering() == 'th': |
| | | if K.image_data_format() == "channels_first": |
| | | train_images = train_images.reshape(train_images.shape[0], img_channels, img_rows, img_cols) |
| | | valid_images = valid_images.reshape(valid_images.shape[0], img_channels, img_rows, img_cols) |
| | | test_images = test_images.reshape(test_images.shape[0], img_channels, img_rows, img_cols) |
| | | self.input_shape = (img_channels, img_rows, img_cols) |
| | | else: |
| | | train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels) |
| | | valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels) |
| | | test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels) |
| | | self.input_shape = (img_rows, img_cols, img_channels) |
| | | |
| | | # è¾åºè®ç»éãéªè¯éãæµè¯éçæ°é |
| | | print(train_images.shape[0], 'train samples') |
| | | print(valid_images.shape[0], 'valid samples') |
| | | print(test_images.shape[0], 'test samples') |
| | | |
| | | # æä»¬ç模å使ç¨categorical_crossentropyä½ä¸ºæå¤±å½æ°ï¼å æ¤éè¦æ ¹æ®ç±»å«æ°énb_classeså° |
| | | # ç±»å«æ ç¾è¿è¡one-hotç¼ç 使å
¶åéåï¼å¨è¿éæä»¬çç±»å«åªæä¸¤ç§ï¼ç»è¿è½¬ååæ ç¾æ°æ®å为äºç»´ |
| | | train_labels = np_utils.to_categorical(train_labels, nb_classes) |
| | | valid_labels = np_utils.to_categorical(valid_labels, nb_classes) |
| | | test_labels = np_utils.to_categorical(test_labels, nb_classes) |
| | | |
| | | # åç´ æ°æ®æµ®ç¹å以便å½ä¸å |
| | | train_images = train_images.astype('float32') |
| | | valid_images = valid_images.astype('float32') |
| | | test_images = test_images.astype('float32') |
| | | |
| | | # å°å
¶å½ä¸å,å¾åçååç´ å¼å½ä¸åå°0~1åºé´ |
| | | train_images /= 255 |
| | | valid_images /= 255 |
| | | test_images /= 255 |
| | | |
| | | self.train_images = train_images |
| | | self.valid_images = valid_images |
| | | self.test_images = test_images |
| | | self.train_labels = train_labels |
| | | self.valid_labels = valid_labels |
| | | self.test_labels = test_labels |
| | | |
| | | |
| | | # CNNç½ç»æ¨¡åç±» |
| | | class Model: |
| | | def __init__(self): |
| | | self.model = None |
| | | #1ãå¢å 彿°å±éæ©,é»è®¤æ¯'relu' |
| | | self.model_def=('relu','') |
| | | # å»ºç«æ¨¡å |
| | | |
| | | def build_model(self, dataset, nb_classes=3): |
| | | # æå»ºä¸ä¸ªç©ºçç½ç»æ¨¡åï¼å®æ¯ä¸ä¸ªçº¿æ§å å æ¨¡åï¼åç¥ç»ç½ç»å±ä¼è¢«é¡ºåºæ·»å ï¼ä¸ä¸å称为åºè´¯æ¨¡åæçº¿æ§å å æ¨¡å |
| | | self.model = Sequential() |
| | | |
| | | # 以ä¸ä»£ç å°é¡ºåºæ·»å CNNç½ç»éè¦çåå±ï¼ä¸ä¸ªaddå°±æ¯ä¸ä¸ªç½ç»å± |
| | | # self.model.add(Convolution2D(32, 3, 3, border_mode='same', |
| | | # input_shape=dataset.input_shape)) # 1 2ç»´å·ç§¯å± |
| | | self.model.add(Convolution2D(32, 3, 3, padding='same', |
| | | input_shape=dataset.input_shape)) # 1 2ç»´å·ç§¯å± |
| | | self.model.add(Activation('relu')) # 2 æ¿æ´»å½æ°å± |
| | | |
| | | self.model.add(Convolution2D(32, 3, 3)) # 3 2ç»´å·ç§¯å± |
| | | self.model.add(Activation('relu')) # 4 æ¿æ´»å½æ°å± |
| | | |
| | | self.model.add(MaxPooling2D(pool_size=(2, 2))) # 5 æ± åå± |
| | | self.model.add(Dropout(0.25)) # 6 Dropoutå± |
| | | |
| | | self.model.add(Convolution2D(64, 3, 3, padding='same')) # 7 2ç»´å·ç§¯å± |
| | | self.model.add(Activation('relu')) # 8 æ¿æ´»å½æ°å± |
| | | |
| | | # self.model.add(Convolution2D(64, 3, 3)) # 9 2ç»´å·ç§¯å± |
| | | self.model.add(Convolution2D(64, 3, 3, padding='same')) # 7 2ç»´å·ç§¯å± |
| | | self.model.add(Activation('relu')) # 10 æ¿æ´»å½æ°å± |
| | | |
| | | # self.model.add(MaxPooling2D(pool_size=(2, 2))) # 11 æ± åå± |
| | | self.model.add(MaxPooling2D(pool_size=(2,2),padding='same')) |
| | | self.model.add(Dropout(0.25)) # 12 Dropoutå± |
| | | |
| | | self.model.add(Flatten()) # 13 Flattenå± |
| | | self.model.add(Dense(512)) # 14 Denseå±,å被称ä½å
¨è¿æ¥å± |
| | | self.model.add(Activation('relu')) # 15 æ¿æ´»å½æ°å± |
| | | self.model.add(Dropout(0.5)) # 16 Dropoutå± |
| | | self.model.add(Dense(nb_classes)) # 17 Denseå± |
| | | self.model.add(Activation('softmax')) # 18 åç±»å±ï¼è¾åºæç»ç»æ |
| | | |
| | | # è¾åºæ¨¡åæ¦åµ |
| | | self.model.summary() |
| | | |
| | | # è®ç»æ¨¡å |
| | | def train(self, dataset, batch_size=20, nb_epoch=10, data_augmentation=True): |
| | | # åæ°batch_sizeçä½ç¨å³å¨äºæ¤ï¼å
¶æå®æ¯æ¬¡è¿ä»£è®ç»æ ·æ¬çæ°é |
| | | # nb_epoch è®ç»è½®æ¢æ¬¡æ° |
| | | sgd = SGD(lr=0.01, decay=1e-6, |
| | | momentum=0.9, nesterov=True) # éç¨SGD+momentumçä¼åå¨è¿è¡è®ç»ï¼é¦å
çæä¸ä¸ªä¼åå¨å¯¹è±¡ |
| | | self.model.compile(loss='categorical_crossentropy', |
| | | optimizer=sgd, |
| | | metrics=['accuracy']) # 宿å®é
çæ¨¡åé
ç½®å·¥ä½ |
| | | |
| | | # ä¸ä½¿ç¨æ°æ®æåï¼æè°çæåå°±æ¯ä»æä»¬æä¾çè®ç»æ°æ®ä¸å©ç¨æè½¬ã翻转ãå åªå£°çæ¹æ³åé æ°ç |
| | | # è®ç»æ°æ®ï¼ææè¯çæåè®ç»æ°æ®è§æ¨¡ï¼å¢å 模åè®ç»é |
| | | if not data_augmentation: |
| | | self.model.fit(dataset.train_images, |
| | | dataset.train_labels, |
| | | batch_size=batch_size, |
| | | nb_epoch=nb_epoch, |
| | | validation_data=(dataset.valid_images, dataset.valid_labels), |
| | | shuffle=True) |
| | | # 使ç¨å®æ¶æ°æ®æå |
| | | else: |
| | | # å®ä¹æ°æ®çæå¨ç¨äºæ°æ®æåï¼å
¶è¿åä¸ä¸ªçæå¨å¯¹è±¡datagenï¼datagenæ¯è¢«è°ç¨ä¸ |
| | | # 次å
¶çæä¸ç»æ°æ®ï¼é¡ºåºçæï¼ï¼èçå
åï¼å
¶å®å°±æ¯pythonçæ°æ®çæå¨ |
| | | datagen = ImageDataGenerator( |
| | | featurewise_center=False, # æ¯å¦ä½¿è¾å
¥æ°æ®å»ä¸å¿åï¼åå¼ä¸º0ï¼ï¼ |
| | | samplewise_center=False, # æ¯å¦ä½¿è¾å
¥æ°æ®çæ¯ä¸ªæ ·æ¬åå¼ä¸º0 |
| | | featurewise_std_normalization=False, # æ¯å¦æ°æ®æ ååï¼è¾å
¥æ°æ®é¤ä»¥æ°æ®éçæ åå·®ï¼ |
| | | samplewise_std_normalization=False, # æ¯å¦å°æ¯ä¸ªæ ·æ¬æ°æ®é¤ä»¥èªèº«çæ åå·® |
| | | zca_whitening=False, # æ¯å¦å¯¹è¾å
¥æ°æ®æ½ä»¥ZCAç½å |
| | | rotation_range=20, # æ°æ®æåæ¶å¾çéæºè½¬å¨çè§åº¦(èå´ä¸º0ï½180) |
| | | width_shift_range=0.2, # æ°æ®æåæ¶å¾çæ°´å¹³åç§»çå¹
度ï¼åä½ä¸ºå¾ç宽度çå æ¯ï¼0~1ä¹é´çæµ®ç¹æ°ï¼ |
| | | height_shift_range=0.2, # åä¸ï¼åªä¸è¿è¿éæ¯åç´ |
| | | horizontal_flip=True, # æ¯å¦è¿è¡éæºæ°´å¹³ç¿»è½¬ |
| | | vertical_flip=False) # æ¯å¦è¿è¡éæºåç´ç¿»è½¬ |
| | | |
| | | # è®¡ç®æ´ä¸ªè®ç»æ ·æ¬éçæ°é以ç¨äºç¹å¾å¼å½ä¸åãZCAç½åçå¤ç |
| | | datagen.fit(dataset.train_images) |
| | | |
| | | # å©ç¨çæå¨å¼å§è®ç»æ¨¡å |
| | | # self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels, |
| | | # batch_size=batch_size), |
| | | # samples_per_epoch=dataset.train_images.shape[0], |
| | | # nb_epoch=nb_epoch, |
| | | # validation_data=(dataset.valid_images, dataset.valid_labels)) |
| | | |
| | | # self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels, |
| | | # batch_size=batch_size), |
| | | # steps_per_epoch =dataset.train_images.shape[0], |
| | | # epochs=nb_epoch, |
| | | # validation_data=(dataset.valid_images, dataset.valid_labels)) |
| | | |
| | | self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels, |
| | | batch_size=batch_size), |
| | | steps_per_epoch=float(len(dataset.train_images)/batch_size), |
| | | epochs=nb_epoch, |
| | | validation_data=(dataset.valid_images, dataset.valid_labels)) |
| | | |
| | | MODEL_PATH = './me.face.model.h5' |
| | | |
| | | def save_model(self, file_path=MODEL_PATH): |
| | | self.model.save(file_path) |
| | | |
| | | def load_model(self, file_path=MODEL_PATH): |
| | | self.model = load_model(file_path) |
| | | |
| | | def evaluate(self, dataset): |
| | | score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose=1) |
| | | print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100)) |
| | | |
| | | def face_predict(self, image): |
| | | # ä¾ç¶æ¯æ ¹æ®å端系ç»ç¡®å®ç»´åº¦é¡ºåº |
| | | # if K.image_dim_ordering() == 'th' and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE): |
| | | if K.image_data_format() == "channels_first" and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE): |
| | | image = resize_image(image) # 尺寸å¿
é¡»ä¸è®ç»éä¸è´é½åºè¯¥æ¯IMAGE_SIZE x IMAGE_SIZE |
| | | image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE)) # 䏿¨¡åè®ç»ä¸åï¼è¿æ¬¡åªæ¯é对1å¼ å¾çè¿è¡é¢æµ |
| | | # elif K.image_dim_ordering() == 'tf' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3): |
| | | elif K.image_data_format() == "channels_last" and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3): |
| | | image = resize_image(image) |
| | | image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3)) |
| | | |
| | | # æ°å çå¤ç |
| | | # image = resize_image(image) |
| | | # image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3)) |
| | | # æµ®ç¹å¹¶å½ä¸å |
| | | image = image.astype('float32') |
| | | image /= 255 |
| | | |
| | | # ç»åºè¾å
¥å±äºå个类å«çæ¦çï¼æä»¬æ¯äºå¼ç±»å«ï¼åè¯¥å½æ°ä¼ç»åºè¾å
¥å¾åå±äº0å1çæ¦çå为å¤å° |
| | | # result = self.model.predict_proba(image) |
| | | # print("image===",image) |
| | | predict_x = self.model.predict(image) |
| | | print("predict_x:",predict_x) |
| | | result = np.argmax(predict_x,axis=1) |
| | | print('result:', result) |
| | | # my_result = list(result[0]).index(max(result[0])) |
| | | # max_result = max(result[0]) |
| | | my_result = result[0] |
| | | max_result = predict_x[0][result[0]] |
| | | print("resultæå¤§å¼ä¸æ :", my_result,max_result) |
| | | if max_result>0.90: |
| | | return my_result |
| | | else: |
| | | return -1 |
| | | |
| | | # ç»åºç±»å«é¢æµï¼0æè
1 |
| | | # result = self.model.predict_classes(image) |
| | | |
| | | |
| | | |
| | | if __name__ == '__main__': |
| | | dataset = Dataset('.\\deep_learning') |
| | | |
| | | #dataset.load(nb_classes=8) ----------------------æ´æ¢èªå¨ç-> |
| | | ##########################æ´æ¢-> |
| | | with open('class_indices.json', 'r', encoding='utf-8') as file: |
| | | data = json.load(file) |
| | | dataset.load(nb_classes=len(data)) #----------------------ä¿®æ¹æèªå¨è¯å«ç±»çä¸ªæ° |
| | | ##########################æ´æ¢<- |
| | | |
| | | # è®ç»æ¨¡å |
| | | model = Model() |
| | | model.build_model(dataset,nb_classes=len(data)) #---------------ä¿®æ¹æèªå¨è¯å«ç±»çä¸ªæ° |
| | | model.train(dataset) |
| | | model.save_model(file_path='./model/me.face.model.h5') |
| | | # è¯ä¼°æ¨¡åï¼ç¡®è®¤æ¨¡åç精度æ¯å¦è½è¾¾å°è¦æ± |
| | | model = Model() |
| | | model.load_model(file_path='./model/me.face.model.h5') |
| | | model.evaluate(dataset) |