Python: 3.6.10 Keras: 2.2.4 Tensorflow: 1.14.0 numpy: 1.16.4 sklearn: 0.22.2
create_datasets.py
from sklearn import model_selection
from PIL import Image
import os, glob
import numpy as np
import random
#Classification category
name = "name"
root_dir = "./datasets/" + name +"/"
savenpy = "./npy/"+name+".npy"
categories = os.listdir(root_dir)
nb_classes = len(categories)
image_size = 224
#Read image data for each folder
X = [] #image data
Y = [] #Label data
for idx, category in enumerate(categories):
dir_path = root_dir + category
search_files = os.listdir(dir_path)
for file in search_files:
filepath = dir_path + "/" + file
img = Image.open(filepath)
img = img.convert("RGB")
#img = img.convert("L")
img = img.resize((image_size, image_size))
data = np.asarray(img)
X.append(data)
Y.append(idx)
X = np.array(X)
Y = np.array(Y)
print(len(X), len(Y))
# #Separate training data and test data
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y, test_size=0.2)
xy = (X_train, X_test, y_train, y_test)
#Save Numpy array to file
np.save(savenpy, xy)
print("end")
create_model.py
import keras
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Conv2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.utils import np_utils
import numpy as np
from keras.callbacks import EarlyStopping
#Category to be classified
name = "name"
root_dir = "./datasets/" + name +"/"
loadnpy = "./npy/"+name+".npy"
categories = os.listdir(root_dir)
nb_classes = len(categories)
#Define the main function
def main():
X_train,X_test,y_train,y_test = np.load(loadnpy, allow_pickle=True)
#Image file normalization
X_train = X_train.astype('float') / 256
X_test = X_test.astype('float') / 256
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
model = model_train(X_train,y_train)
model_eval(model,X_test,y_test)
#Learn model
def model_train(X,y):
model = Sequential()
model.add(Conv2D(32,(3,3), padding='same',input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.summary()
#Optimization method
opt = keras.optimizers.rmsprop(lr=0.0001,decay=1e-6)
#Compiling the model
model.compile(loss='categorical_crossentropy',
optimizer=opt,metrics=['accuracy'])
#Model learning
model.fit(X, y, batch_size=32,epochs=100)
#Save model
model.save('./save_model.h5')
return model
#Evaluate the model
def model_eval(model, X, y):
score = model.evaluate(X, y)
print('loss=', score[0])
print('accuracy=', score[1])
if __name__ == "__main__":
main()
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
img_path = 'Image file path you want to identify (jpg/png file)'
model_file_path='Model file path (h5 file)'
root_dir = "./datasets/" + name +"/"#Path with the name of the folder to classify
categories = os.listdir(root_dir)#Get label from folder name
model=load_model(model_file_path)
img = img_to_array(load_img(img_path, target_size=(224,224)))
img_nad = img_to_array(img)/255
img_nad = img_nad[None, ...]
pred = model.predict(img_nad, batch_size=1, verbose=0)
score = np.max(pred)
pred_label = categories[np.argmax(pred[0])]
print('name:',pred_label)
print('score:',score)
Recommended Posts