Python: 3.6.10 Keras: 2.2.4 Tensorflow: 1.14.0 numpy: 1.16.4 sklearn: 0.22.2
create_datasets.py
from sklearn import model_selection
from PIL import Image
import os, glob
import numpy as np
import random
#Catégorie de classification
name = "name"
root_dir = "./datasets/" + name +"/"
savenpy = "./npy/"+name+".npy"
categories = os.listdir(root_dir)
nb_classes = len(categories)
image_size = 224
#Lire les données d'image pour chaque dossier
X = [] #données d'image
Y = [] #Données d'étiquette
for idx, category in enumerate(categories):
dir_path = root_dir + category
search_files = os.listdir(dir_path)
for file in search_files:
filepath = dir_path + "/" + file
img = Image.open(filepath)
img = img.convert("RGB")
#img = img.convert("L")
img = img.resize((image_size, image_size))
data = np.asarray(img)
X.append(data)
Y.append(idx)
X = np.array(X)
Y = np.array(Y)
print(len(X), len(Y))
# #Séparez les données d'entraînement et les données de test
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y, test_size=0.2)
xy = (X_train, X_test, y_train, y_test)
#Enregistrer le tableau Numpy dans un fichier
np.save(savenpy, xy)
print("end")
create_model.py
import keras
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Conv2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.utils import np_utils
import numpy as np
from keras.callbacks import EarlyStopping
#Catégorie à classer
name = "name"
root_dir = "./datasets/" + name +"/"
loadnpy = "./npy/"+name+".npy"
categories = os.listdir(root_dir)
nb_classes = len(categories)
#Définir la fonction principale
def main():
X_train,X_test,y_train,y_test = np.load(loadnpy, allow_pickle=True)
#Normalisation du fichier image
X_train = X_train.astype('float') / 256
X_test = X_test.astype('float') / 256
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
model = model_train(X_train,y_train)
model_eval(model,X_test,y_test)
#Apprendre le modèle
def model_train(X,y):
model = Sequential()
model.add(Conv2D(32,(3,3), padding='same',input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.summary()
#Méthode d'optimisation
opt = keras.optimizers.rmsprop(lr=0.0001,decay=1e-6)
#Compiler le modèle
model.compile(loss='categorical_crossentropy',
optimizer=opt,metrics=['accuracy'])
#Apprentissage de modèle
model.fit(X, y, batch_size=32,epochs=100)
#Enregistrer le modèle
model.save('./save_model.h5')
return model
#Évaluer le modèle
def model_eval(model, X, y):
score = model.evaluate(X, y)
print('loss=', score[0])
print('accuracy=', score[1])
if __name__ == "__main__":
main()
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
img_path = 'Chemin du fichier image que vous souhaitez identifier (jpg/png)'
model_file_path='Chemin du fichier modèle (fichier h5)'
root_dir = "./datasets/" + name +"/"#Chemin avec le nom du dossier à classer
categories = os.listdir(root_dir)#Obtenir l'étiquette du nom du dossier
model=load_model(model_file_path)
img = img_to_array(load_img(img_path, target_size=(224,224)))
img_nad = img_to_array(img)/255
img_nad = img_nad[None, ...]
pred = model.predict(img_nad, batch_size=1, verbose=0)
score = np.max(pred)
pred_label = categories[np.argmax(pred[0])]
print('name:',pred_label)
print('score:',score)
Recommended Posts